imgmmu.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449
  1. /*!
  2. *****************************************************************************
  3. *
  4. * @File imgmmu.c
  5. * @Description Implementation of the MMU functions
  6. * ---------------------------------------------------------------------------
  7. *
  8. * Copyright (c) Imagination Technologies Ltd.
  9. *
  10. * The contents of this file are subject to the MIT license as set out below.
  11. *
  12. * Permission is hereby granted, free of charge, to any person obtaining a
  13. * copy of this software and associated documentation files (the "Software"),
  14. * to deal in the Software without restriction, including without limitation
  15. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  16. * and/or sell copies of the Software, and to permit persons to whom the
  17. * Software is furnished to do so, subject to the following conditions:
  18. *
  19. * The above copyright notice and this permission notice shall be included in
  20. * all copies or substantial portions of the Software.
  21. *
  22. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  23. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  24. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  25. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  26. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  27. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  28. * THE SOFTWARE.
  29. *
  30. * Alternatively, the contents of this file may be used under the terms of the
  31. * GNU General Public License Version 2 ("GPL")in which case the provisions of
  32. * GPL are applicable instead of those above.
  33. *
  34. * If you wish to allow use of your version of this file only under the terms
  35. * of GPL, and not to allow others to use your version of this file under the
  36. * terms of the MIT license, indicate your decision by deleting the provisions
  37. * above and replace them with the notice and other provisions required by GPL
  38. * as set out in the file called "GPLHEADER" included in this distribution. If
  39. * you do not delete the provisions above, a recipient may use your version of
  40. * this file under the terms of either the MIT license or GPL.
  41. *
  42. * This License is also included in this distribution in the file called
  43. * "MIT_COPYING".
  44. *
  45. *****************************************************************************/
  46. #include <linux/module.h>
  47. #include <linux/init.h>
  48. #include <linux/scatterlist.h>
  49. #include <linux/slab.h>
  50. #include <linux/list.h>
  51. #include "mmulib/mmu.h"
  52. #include "mmulib/heap.h" /* for struct imgmmu_halloc */
  53. /*-----------------------------------------------------------------------------
  54. * Following elements are in the IMGMMU_lib_int module
  55. *---------------------------------------------------------------------------*/
  56. /* access to MMU info and error printing function */
  57. #include "mmu_defs.h"
  58. #include <asm/page.h>
  59. static int pte_cache_mode;
  60. module_param(pte_cache_mode, int, 0444);
  61. MODULE_PARM_DESC(pte_cache_mode,
  62. "PTE ax_cache signals. Acceptable values:<0-15>, refer to MMUv3 spec.");
  63. static bool pte_rb_check = true;
  64. module_param(pte_rb_check, bool, 0444);
  65. MODULE_PARM_DESC(pte_rb_check,
  66. "Enables PTE read-back checks");
  67. /** variable page shift */
  68. static size_t g_mmupageshift = IMGMMU_PAGE_SHIFT;
  69. /* Page table index mask in virtual address - low bits */
  70. static uint64_t VIRT_PAGE_TBL_MASK(void) {
  71. return ((((1ULL<<IMGMMU_CAT_SHIFT)-1) & ((1ULL<<IMGMMU_DIR_SHIFT)-1)) &
  72. ~(((1ULL<<g_mmupageshift)-1)));
  73. }
  74. /* Directory index mask in virtual address - middle bits */
  75. static const uint64_t VIRT_DIR_IDX_MASK
  76. = (((1ULL<<IMGMMU_CAT_SHIFT)-1) & ~((1ULL<<IMGMMU_DIR_SHIFT)-1));
  77. /* Catalogue index mask in virtual address - high bits */
  78. static const uint64_t VIRT_CAT_IDX_MASK = (~((1ULL<<IMGMMU_CAT_SHIFT)-1));
  79. /*
  80. * Catalogue entry in the MMU - contains up to 1024 directory mappings
  81. */
  82. struct imgmmu_cat {
  83. /* Physical page used for the catalogue entries */
  84. struct imgmmu_page *page;
  85. /* All the page directory structures in
  86. * a static array of pointers
  87. */
  88. struct imgmmu_dir **dir_map;
  89. /*
  90. * Functions to use to manage pages allocation,
  91. * liberation and writing
  92. */
  93. struct imgmmu_info config;
  94. /* number of mapping using this catalogue (PCEs) */
  95. uint32_t nmap;
  96. };
  97. /* Directory entry in the MMU - contains several page mapping */
  98. struct imgmmu_dir {
  99. /* associated catalogue */
  100. struct imgmmu_cat *cat;
  101. /* Physical page used for the directory entries */
  102. struct imgmmu_page *page;
  103. /* All the page table structures
  104. * in a static array of pointers */
  105. struct imgmmu_pagetab **page_map;
  106. /*
  107. * Functions to use to manage pages allocation,
  108. * liberation and writing
  109. */
  110. struct imgmmu_info config;
  111. /* number of mapping using this directory (PDEs)*/
  112. uint32_t nmap;
  113. };
  114. /* Mapping a virtual address range and some entries in a directory */
  115. struct imgmmu_dirmap {
  116. struct list_head entry; /* Entry in <imgmmu_map:dir_maps> */
  117. /* associated directory */
  118. struct imgmmu_dir *dir;
  119. /*
  120. * device virtual address range associated with this mapping - not
  121. * owned by the mapping
  122. */
  123. struct imgmmu_halloc virt_mem;
  124. /* flag used when allocating */
  125. unsigned int flags;
  126. /* number of entries mapped (PTEs) */
  127. uint32_t entries;
  128. };
  129. /* Mapping a virtual address and catalogue entries */
  130. struct imgmmu_map {
  131. struct list_head dir_maps; /* contains <struct imgmmu_dirmap> */
  132. /*
  133. * device virtual address associated with this mapping - not
  134. * owned by the mapping
  135. */
  136. struct imgmmu_halloc virt_mem;
  137. /* number of entries mapped (PCEs) */
  138. uint32_t entries;
  139. };
  140. /* One page Table of the directory */
  141. struct imgmmu_pagetab {
  142. /* associated directory */
  143. struct imgmmu_dir *dir;
  144. /* page used to store this mapping in the MMU */
  145. struct imgmmu_page *page;
  146. /* number of valid entries in this page */
  147. uint32_t valid_entries;
  148. };
  149. /*
  150. * local functions
  151. */
  152. #define MMU_LOG_TMP 256
  153. /*
  154. * Write to stderr (or KRN_ERR if in kernel module)
  155. */
  156. void _mmu_log(int err, const char *function, uint32_t line,
  157. const char *format, ...)
  158. {
  159. char _message_[MMU_LOG_TMP];
  160. va_list args;
  161. va_start(args, format);
  162. vsprintf(_message_, format, args);
  163. va_end(args);
  164. if (err)
  165. pr_err("ERROR: %s:%u %s", function, line, _message_);
  166. else
  167. /* info, debug, ... */
  168. pr_info("%s:%u %s", function, line, _message_);
  169. }
  170. /*
  171. * Destruction of a PageTable
  172. *
  173. * warning: Does not verify if pages are still valid or not
  174. */
  175. static void mmu_pagetab_destroy(struct imgmmu_pagetab *pagetab)
  176. {
  177. WARN_ON(pagetab->dir == NULL);
  178. /* the function should be configured */
  179. WARN_ON(pagetab->dir->config.page_free == NULL);
  180. /* the physical page should still be here */
  181. WARN_ON(pagetab->page == NULL);
  182. mmu_log_dbg("Destroy page table (phys addr 0x%x)\n",
  183. pagetab->page->phys_addr);
  184. pagetab->dir->config.page_free(pagetab->page);
  185. pagetab->page = NULL;
  186. kfree(pagetab);
  187. }
  188. /*
  189. * Extact the catalogue index from a virtual address
  190. */
  191. static uint16_t mmu_cat_entry(uint64_t vaddr)
  192. {
  193. return (vaddr & VIRT_CAT_IDX_MASK) >>
  194. IMGMMU_CAT_SHIFT;
  195. }
  196. /*
  197. * Extact the directory index from a virtual address
  198. */
  199. static uint16_t mmu_dir_entry(uint64_t vaddr)
  200. {
  201. return (vaddr & VIRT_DIR_IDX_MASK) >>
  202. IMGMMU_DIR_SHIFT;
  203. }
  204. /*
  205. * Extract the page table index from a virtual address
  206. */
  207. static uint16_t mmu_page_entry(uint64_t vaddr)
  208. {
  209. return (vaddr & VIRT_PAGE_TBL_MASK())
  210. >> g_mmupageshift;
  211. }
  212. /*
  213. * Create a page table
  214. *
  215. * A pointer to the new page table structure and 0 in res
  216. * return: NULL in case of error and a value in res
  217. * -ENOMEM if internal structure allocation failed
  218. * -EFAULT if physical page allocation failed
  219. */
  220. static struct imgmmu_pagetab *mmu_pagetab_create(struct imgmmu_dir *dir,
  221. int *res)
  222. {
  223. struct imgmmu_pagetab *tab = NULL;
  224. uint32_t i;
  225. WARN_ON(res == NULL);
  226. WARN_ON(dir == NULL);
  227. WARN_ON(dir->config.page_alloc == NULL);
  228. WARN_ON(dir->config.page_write == NULL);
  229. tab = kzalloc(sizeof(struct imgmmu_pagetab), GFP_KERNEL);
  230. if (tab == NULL) {
  231. mmu_log_err("failed to allocate %zu bytes for page table\n",
  232. sizeof(struct imgmmu_pagetab));
  233. *res = -ENOMEM;
  234. return NULL;
  235. }
  236. tab->dir = dir;
  237. tab->page = dir->config.page_alloc(dir->config.ctx, IMGMMU_PTYPE_PT);
  238. if (tab->page == NULL) {
  239. mmu_log_err("failed to allocate Page Table physical page\n");
  240. kfree(tab);
  241. *res = -EFAULT;
  242. return NULL;
  243. }
  244. mmu_log_dbg("Create page table (phys addr 0x%x 0x%x)\n",
  245. tab->page->phys_addr, tab->page->cpu_addr);
  246. /* invalidate all pages */
  247. for (i = 0; i < IMGMMU_N_PAGE; i++)
  248. dir->config.page_write(tab->page, i, 0, MMU_FLAG_INVALID, NULL);
  249. /*
  250. * when non-UMA need to update the device
  251. * memory after setting it to 0
  252. */
  253. if (dir->config.page_update != NULL)
  254. dir->config.page_update(tab->page);
  255. *res = 0;
  256. return tab;
  257. }
  258. /* Sets mapped pages as invalid with given pagetab entry and range*/
  259. static void mmu_pagetab_rollback(struct imgmmu_dir *dir,
  260. unsigned int page_offs, unsigned int dir_offs,
  261. uint32_t entry, uint32_t from, uint32_t to)
  262. {
  263. while (entry > 1) {
  264. if (from == 0) {
  265. entry--;
  266. from = to;
  267. }
  268. from--;
  269. if (page_offs == 0) {
  270. /* -1 is done just after */
  271. page_offs = IMGMMU_N_PAGE;
  272. WARN_ON(dir_offs == 0);
  273. dir_offs--;
  274. }
  275. page_offs--;
  276. /* it should have been used before */
  277. WARN_ON(dir->page_map[dir_offs] == NULL);
  278. dir->config.page_write(
  279. dir->page_map[dir_offs]->page,
  280. page_offs, 0, MMU_FLAG_INVALID, NULL);
  281. dir->page_map[dir_offs]->valid_entries--;
  282. }
  283. }
  284. /*-----------------------------------------------------------------------------
  285. * End of the IMGMMU_lib_int module
  286. *---------------------------------------------------------------------------*/
  287. /*
  288. * public functions already have a group in mmu.h
  289. */
  290. static size_t g_mmupagesize = IMGMMU_PAGE_SIZE;
  291. size_t imgmmu_get_page_size(void)
  292. {
  293. return g_mmupagesize;
  294. }
  295. size_t imgmmu_get_entry_shift(unsigned char type)
  296. {
  297. if (type == IMGMMU_PTYPE_PT)
  298. return g_mmupageshift;
  299. else if (type == IMGMMU_PTYPE_PD)
  300. return IMGMMU_DIR_SHIFT;
  301. else if (type == IMGMMU_PTYPE_PC)
  302. return IMGMMU_CAT_SHIFT;
  303. else
  304. return 0;
  305. }
  306. int imgmmu_set_page_size(size_t pagesize)
  307. {
  308. if (pagesize > imgmmu_get_cpu_page_size()) {
  309. mmu_log_dbg("MMU page size: %zu is bigger than CPU page size (%zu)\
  310. and will only work with physically contiguous memory!\n",
  311. pagesize, imgmmu_get_cpu_page_size());
  312. }
  313. // get_order uses CPU page size as a base
  314. g_mmupageshift = IMGMMU_PAGE_SHIFT + get_order(pagesize);
  315. g_mmupagesize = pagesize;
  316. return 0;
  317. }
  318. size_t imgmmu_get_phys_size(void)
  319. {
  320. return IMGMMU_PHYS_SIZE;
  321. }
  322. size_t imgmmu_get_virt_size(void)
  323. {
  324. return IMGMMU_VIRT_SIZE;
  325. }
  326. static size_t g_cpupagesize = PAGE_SIZE;
  327. size_t imgmmu_get_cpu_page_size(void)
  328. {
  329. return g_cpupagesize;
  330. }
  331. int imgmmu_set_cpu_page_size(size_t pagesize)
  332. {
  333. if (pagesize != PAGE_SIZE) {
  334. mmu_log_err("trying to change CPU page size from %zu to %zu\n",
  335. PAGE_SIZE, pagesize);
  336. return -EFAULT;
  337. }
  338. return 0;
  339. }
  340. /* Proper directory will be populated on the first mapping request */
  341. static struct imgmmu_dir *mmu_dir_create(const struct imgmmu_info *info,
  342. int *res)
  343. {
  344. struct imgmmu_dir *dir = NULL;
  345. uint32_t i;
  346. WARN_ON(res == NULL);
  347. /* invalid information in the directory config:
  348. - invalid page allocator and dealloc (page write can be NULL)
  349. - invalid virtual address representation
  350. - invalid page size
  351. - invalid MMU size */
  352. if (info == NULL || info->page_alloc == NULL ||
  353. info->page_free == NULL) {
  354. mmu_log_err("invalid MMU configuration\n");
  355. *res = -EINVAL;
  356. return NULL;
  357. }
  358. dir = kzalloc(sizeof(struct imgmmu_dir), GFP_KERNEL);
  359. if (dir == NULL) {
  360. mmu_log_err("failed to allocate %zu bytes for directory\n",
  361. sizeof(struct imgmmu_dir));
  362. *res = -ENOMEM;
  363. return NULL;
  364. }
  365. dir->page_map = kzalloc(
  366. IMGMMU_N_TABLE * sizeof(struct imgmmu_pagetab *),
  367. GFP_KERNEL);
  368. if (dir->page_map == NULL) {
  369. kfree(dir);
  370. mmu_log_err("failed to allocate %zu bytes for directory\n",
  371. IMGMMU_N_TABLE * sizeof(struct imgmmu_pagetab *));
  372. *res = -ENOMEM;
  373. return NULL;
  374. }
  375. memcpy(&dir->config, info, sizeof(struct imgmmu_info));
  376. if (info->page_write == NULL ||
  377. info->page_read == NULL) {
  378. mmu_log_err("wrong configuration!\n");
  379. kfree(dir->page_map);
  380. kfree(dir);
  381. *res = -EFAULT;
  382. return NULL;
  383. }
  384. dir->page = info->page_alloc(info->ctx, IMGMMU_PTYPE_PD);
  385. if (dir->page == NULL) {
  386. mmu_log_err("failed to allocate directory physical page\n");
  387. kfree(dir->page_map);
  388. kfree(dir);
  389. *res = -EFAULT;
  390. return NULL;
  391. }
  392. mmu_log_dbg("create MMU directory (phys page 0x%x 0x%x)\n",
  393. dir->page->phys_addr, dir->page->cpu_addr);
  394. /* now we have a valid imgmmu_dir structure */
  395. /* invalidate all entries */
  396. for (i = 0; i < IMGMMU_N_TABLE; i++)
  397. dir->config.page_write(dir->page, i, 0, MMU_FLAG_INVALID, NULL);
  398. /* when non-UMA need to update the device memory */
  399. if (dir->config.page_update != NULL)
  400. dir->config.page_update(dir->page);
  401. *res = 0;
  402. return dir;
  403. }
  404. struct imgmmu_cat *imgmmu_cat_create(const struct imgmmu_info *info,
  405. int *res)
  406. {
  407. struct imgmmu_cat *cat = NULL;
  408. uint32_t i;
  409. WARN_ON(res == NULL);
  410. /* invalid information in the directory config:
  411. - invalid page allocator and dealloc (page write can be NULL)
  412. */
  413. if (info == NULL || info->page_alloc == NULL ||
  414. info->page_free == NULL) {
  415. mmu_log_err("invalid MMU configuration\n");
  416. *res = -EINVAL;
  417. return NULL;
  418. }
  419. cat = kzalloc(sizeof(struct imgmmu_cat), GFP_KERNEL);
  420. if (cat == NULL) {
  421. mmu_log_err("failed to allocate %zu bytes for catalogue\n",
  422. sizeof(struct imgmmu_cat));
  423. *res = -ENOMEM;
  424. return NULL;
  425. }
  426. cat->dir_map = kzalloc(
  427. IMGMMU_N_DIR * sizeof(struct imgmmu_dir *),
  428. GFP_KERNEL);
  429. if (cat->dir_map == NULL) {
  430. kfree(cat);
  431. mmu_log_err("failed to allocate %zu bytes for catalogue\n",
  432. IMGMMU_N_DIR * sizeof(struct imgmmu_dir *));
  433. *res = -ENOMEM;
  434. return NULL;
  435. }
  436. memcpy(&cat->config, info, sizeof(struct imgmmu_info));
  437. if (info->page_write == NULL ||
  438. info->page_read == NULL) {
  439. mmu_log_err("wrong configuration!\n");
  440. kfree(cat->dir_map);
  441. kfree(cat);
  442. *res = -EFAULT;
  443. return NULL;
  444. }
  445. cat->page = info->page_alloc(info->ctx, IMGMMU_PTYPE_PC);
  446. if (cat->page == NULL) {
  447. mmu_log_err("failed to allocate catalogue physical page\n");
  448. kfree(cat->dir_map);
  449. kfree(cat);
  450. *res = -EFAULT;
  451. return NULL;
  452. }
  453. mmu_log_dbg("create MMU catalogue (phys page 0x%x 0x%x)\n",
  454. cat->page->phys_addr, cat->page->cpu_addr);
  455. /* now we have a valid imgmmu_cat structure */
  456. /* invalidate all entries */
  457. for (i = 0; i < IMGMMU_N_DIR; i++)
  458. cat->config.page_write(cat->page, i, 0, MMU_FLAG_INVALID, NULL);
  459. /* when non-UMA need to update the device memory */
  460. if (cat->config.page_update != NULL)
  461. cat->config.page_update(cat->page);
  462. *res = 0;
  463. return cat;
  464. }
  465. static int mmu_dir_destroy(struct imgmmu_dir *dir)
  466. {
  467. uint32_t i;
  468. if (dir == NULL) {
  469. /* could be an assert */
  470. mmu_log_err("dir is NULL\n");
  471. return -EINVAL;
  472. }
  473. if (dir->nmap > 0)
  474. /* mappings should have been destroyed! */
  475. mmu_log_err("directory still has %u mapping attached to it\n",
  476. dir->nmap);
  477. WARN_ON(dir->config.page_free == NULL);
  478. WARN_ON(dir->page_map == NULL);
  479. mmu_log_dbg("destroy MMU dir (phys page 0x%x)\n",
  480. dir->page->phys_addr);
  481. /* first we destroy the directory entry */
  482. dir->config.page_free(dir->page);
  483. dir->page = NULL;
  484. /* destroy every mapping that still exists */
  485. for (i = 0; i < IMGMMU_N_TABLE; i++)
  486. if (dir->page_map[i] != NULL) {
  487. mmu_pagetab_destroy(dir->page_map[i]);
  488. dir->page_map[i] = NULL;
  489. }
  490. kfree(dir->page_map);
  491. kfree(dir);
  492. return 0;
  493. }
  494. int imgmmu_cat_destroy(struct imgmmu_cat *cat)
  495. {
  496. uint32_t i;
  497. if (cat == NULL) {
  498. /* could be an assert */
  499. mmu_log_err("cat is NULL\n");
  500. return -EINVAL;
  501. }
  502. if (cat->nmap > 0)
  503. /* mappings should have been destroyed! */
  504. mmu_log_err("catalogue still has %u mapping attached to it\n",
  505. cat->nmap);
  506. WARN_ON(cat->config.page_free == NULL);
  507. WARN_ON(cat->dir_map == NULL);
  508. mmu_log_dbg("destroy MMU cat (phys page 0x%x)\n",
  509. cat->page->phys_addr);
  510. /* first we destroy the catalogue entry */
  511. cat->config.page_free(cat->page);
  512. cat->page = NULL;
  513. /* destroy every mapping that still exists */
  514. for (i = 0; i < IMGMMU_N_DIR; i++)
  515. if (cat->dir_map[i] != NULL) {
  516. mmu_dir_destroy(cat->dir_map[i]);
  517. cat->dir_map[i] = NULL;
  518. }
  519. kfree(cat->dir_map);
  520. kfree(cat);
  521. return 0;
  522. }
  523. struct imgmmu_page *imgmmu_cat_get_page(struct imgmmu_cat *cat)
  524. {
  525. WARN_ON(cat == NULL);
  526. return cat->page;
  527. }
  528. uint64_t imgmmu_cat_get_pte(struct imgmmu_cat *cat,
  529. uint64_t vaddr)
  530. {
  531. uint16_t cat_entry = 0;
  532. uint16_t dir_entry = 0;
  533. uint16_t tab_entry = 0;
  534. struct imgmmu_dir *dir;
  535. struct imgmmu_pagetab *tab;
  536. uint64_t addr;
  537. unsigned flags;
  538. if (vaddr & (imgmmu_get_page_size()-1))
  539. return (uint64_t)-1;
  540. WARN_ONCE(cat == NULL, "No MMU entries");
  541. if (cat == NULL || cat->config.page_read == NULL)
  542. return (uint64_t)-1;
  543. cat_entry = mmu_cat_entry(vaddr);
  544. dir_entry = mmu_dir_entry(vaddr);
  545. tab_entry = mmu_page_entry(vaddr);
  546. dir = cat->dir_map[cat_entry];
  547. if (dir == NULL || dir->page_map[dir_entry] == NULL)
  548. return (uint64_t)-1;
  549. addr = cat->config.page_read(
  550. cat->page, cat_entry, NULL, &flags);
  551. /* Check consistency of PCE */
  552. if (addr != dir->page->phys_addr) {
  553. mmu_log_err("PCE entry inconsistent!\n");
  554. return (uint64_t)-1;
  555. }
  556. tab = dir->page_map[dir_entry];
  557. if (tab == NULL || dir->page == NULL)
  558. return (uint64_t)-1;
  559. addr = dir->config.page_read(
  560. dir->page, dir_entry, NULL, &flags);
  561. /* Check consistency of PDE */
  562. if (addr != tab->page->phys_addr) {
  563. mmu_log_err("PDE entry inconsistent!\n");
  564. return (uint64_t)-1;
  565. }
  566. addr = dir->config.page_read(
  567. tab->page, tab_entry, NULL, &flags);
  568. return addr|flags;
  569. }
  570. uint64_t imgmmu_cat_override_phys_addr(struct imgmmu_cat *cat,
  571. uint64_t vaddr, uint64_t new_phys_addr)
  572. {
  573. uint32_t cat_entry = 0;
  574. uint32_t dir_entry = 0;
  575. uint32_t tab_entry = 0;
  576. struct imgmmu_dir *dir;
  577. unsigned flags = 0;
  578. WARN_ON(cat == NULL);
  579. if (cat->config.page_read == NULL)
  580. return (uint64_t)-1;
  581. if (cat->config.page_write == NULL)
  582. return (uint64_t)-1;
  583. cat_entry = mmu_cat_entry(vaddr);
  584. dir_entry = mmu_dir_entry(vaddr);
  585. tab_entry = mmu_page_entry(vaddr);
  586. dir = cat->dir_map[cat_entry];
  587. WARN_ON(dir == NULL);
  588. if (dir->page_map[dir_entry] == NULL)
  589. return (uint64_t)-1;
  590. (void)dir->config.page_read(
  591. dir->page_map[dir_entry]->page, tab_entry, NULL, &flags);
  592. if (!(flags & MMU_FLAG_VALID))
  593. return (uint64_t)-1;
  594. dir->config.page_write(
  595. dir->page_map[dir_entry]->page,
  596. tab_entry,
  597. new_phys_addr | (uint64_t)pte_cache_mode << MMU_PTE_AXCACHE_SHIFT,
  598. flags | IMGMMU_BYPASS_ADDR_TRANS, NULL);
  599. return 0;
  600. }
  601. static struct imgmmu_dirmap *mmu_dir_map(struct imgmmu_dir *dir,
  602. struct imgmmu_halloc *virt_mem,
  603. unsigned int map_flag,
  604. int(*phys_iter_next) (void *arg, uint64_t *next),
  605. void *phys_iter_arg,
  606. void *priv,
  607. int *res)
  608. {
  609. unsigned int first_dir = 0, first_page = 0;
  610. unsigned int dir_offs = 0, page_offs = 0;
  611. uint32_t entries = 0;
  612. uint32_t i, d;
  613. const uint32_t duplicate = imgmmu_get_cpu_page_size() < imgmmu_get_page_size() ?
  614. 1 : imgmmu_get_cpu_page_size() / imgmmu_get_page_size();
  615. int ret = 0;
  616. struct imgmmu_dirmap *map = NULL;
  617. /* in non UMA updates on pages needs to be done,
  618. * store index of directory entry pages to update */
  619. uint32_t *pages_to_update;
  620. /* number of pages in pages_to_update
  621. * (will be at least 1 for the first_page to update) */
  622. uint32_t num_pages_to_update = 0;
  623. /* to know if we also need to update the directory page
  624. * (creation of new page) */
  625. bool dir_update = false;
  626. WARN_ON(res == NULL);
  627. WARN_ON(dir == NULL);
  628. WARN_ON(virt_mem == NULL);
  629. /* otherwise PAGE_SIZE and MMU page size are not set properly! */
  630. WARN_ON(duplicate == 0);
  631. entries = virt_mem->size / IMGMMU_GET_MAX_PAGE_SIZE();
  632. if (virt_mem->size % imgmmu_get_page_size() != 0 || entries == 0) {
  633. mmu_log_err("invalid allocation size\n");
  634. *res = -EINVAL;
  635. return NULL;
  636. }
  637. if ((map_flag & MMU_FLAG_VALID) != 0) {
  638. mmu_log_err("valid flag (0x%x) is set in the flags 0x%x\n",
  639. MMU_FLAG_VALID, map_flag);
  640. *res = -EINVAL;
  641. return NULL;
  642. }
  643. /* has to be dynamically allocated because it is bigger than 1k
  644. * (max stack in the kernel)
  645. * IMGMMU_N_TABLE is 1024 for 4096B pages,
  646. * that's a 4k allocation (1 page) */
  647. pages_to_update = kzalloc(IMGMMU_N_TABLE * sizeof(uint32_t), GFP_KERNEL);
  648. if (pages_to_update == NULL) {
  649. mmu_log_err("Failed to allocate the update index table (%zu Bytes)\n",
  650. IMGMMU_N_TABLE * sizeof(uint32_t));
  651. *res = -ENOMEM;
  652. return NULL;
  653. }
  654. /* manage multiple page table mapping */
  655. first_dir = mmu_dir_entry(virt_mem->vaddr);
  656. first_page = mmu_page_entry(virt_mem->vaddr);
  657. WARN_ON(first_dir > IMGMMU_N_TABLE);
  658. WARN_ON(first_page > IMGMMU_N_PAGE);
  659. /* verify that the pages that should be used are available */
  660. dir_offs = first_dir;
  661. page_offs = first_page;
  662. /*
  663. * loop over the number of entries given by CPU allocator
  664. * but CPU page size can be > than MMU page size therefore
  665. * it may need to "duplicate" entries by creating a fake
  666. * physical address
  667. */
  668. for (i = 0; i < entries * duplicate; i++) {
  669. if (page_offs >= IMGMMU_N_PAGE) {
  670. WARN_ON(dir_offs > IMGMMU_N_TABLE);
  671. dir_offs++; /* move to next directory */
  672. WARN_ON(dir_offs > IMGMMU_N_TABLE);
  673. page_offs = 0; /* using its first page */
  674. }
  675. /* if dir->page_map[dir_offs] == NULL not yet allocated it
  676. means all entries are available */
  677. if (pte_rb_check &&
  678. dir->page_map[dir_offs] != NULL) {
  679. /*
  680. * inside a pagetable
  681. * verify that the required offset is invalid
  682. */
  683. unsigned flags = 0;
  684. (void)dir->config.page_read(
  685. dir->page_map[dir_offs]->page, page_offs, priv, &flags);
  686. if (flags & MMU_FLAG_VALID) {
  687. mmu_log_err("PTE is currently in use\n");
  688. ret = -EBUSY;
  689. break;
  690. }
  691. }
  692. /* PageTable struct exists */
  693. page_offs++;
  694. } /* for all needed entries */
  695. /* it means one entry was not invalid or not enough page were given */
  696. if (ret != 0) {
  697. /* message already printed */
  698. *res = ret;
  699. kfree(pages_to_update);
  700. return NULL;
  701. }
  702. map = kzalloc(sizeof(struct imgmmu_dirmap), GFP_KERNEL);
  703. if (map == NULL) {
  704. mmu_log_err("failed to allocate %zu bytes for mapping structure\n",
  705. sizeof(struct imgmmu_dirmap));
  706. *res = -ENOMEM;
  707. kfree(pages_to_update);
  708. return NULL;
  709. }
  710. map->dir = dir;
  711. map->virt_mem = *virt_mem;
  712. memcpy(&(map->virt_mem), virt_mem, sizeof(struct imgmmu_halloc));
  713. map->flags = map_flag;
  714. /* we now know that all pages are available */
  715. dir_offs = first_dir;
  716. page_offs = first_page;
  717. pages_to_update[num_pages_to_update] = first_dir;
  718. num_pages_to_update++;
  719. for (i = 0; i < entries; i++) {
  720. uint64_t curr_phy_addr;
  721. if (phys_iter_next(phys_iter_arg, &curr_phy_addr) != 0) {
  722. mmu_log_err("not enough entries in physical address array/sg list!\n");
  723. kfree(map);
  724. kfree(pages_to_update);
  725. *res = -EFAULT;
  726. return NULL;
  727. }
  728. if ((curr_phy_addr & (imgmmu_get_page_size()-1)) != 0) {
  729. mmu_log_err("current physical address: %llx "
  730. "is not aligned to MMU page size: %zu!\n",
  731. curr_phy_addr, imgmmu_get_page_size());
  732. kfree(map);
  733. kfree(pages_to_update);
  734. *res = -EFAULT;
  735. return NULL;
  736. }
  737. for (d = 0; d < duplicate; d++) {
  738. if (page_offs >= IMGMMU_N_PAGE) {
  739. dir_offs++; /* move to next directory */
  740. page_offs = 0; /* using its first page */
  741. pages_to_update[num_pages_to_update] = dir_offs;
  742. num_pages_to_update++;
  743. }
  744. /* this page table object does not exists, create it */
  745. if (dir->page_map[dir_offs] == NULL) {
  746. struct imgmmu_pagetab *pagetab;
  747. pagetab = mmu_pagetab_create(dir, res);
  748. dir->page_map[dir_offs] = pagetab;
  749. if (dir->page_map[dir_offs] == NULL) {
  750. mmu_log_err("failed to create a page table\n");
  751. /* invalidate all already mapped pages
  752. * do not destroy the created pages */
  753. mmu_pagetab_rollback(dir,
  754. page_offs,
  755. dir_offs,
  756. i,
  757. d,
  758. duplicate);
  759. kfree(map);
  760. kfree(pages_to_update);
  761. *res = -EFAULT;
  762. return NULL;
  763. }
  764. pagetab->page->virt_base = (dir->page->virt_base &
  765. ~(VIRT_PAGE_TBL_MASK())) +
  766. ((1<<IMGMMU_DIR_SHIFT) * dir_offs);
  767. /*
  768. * make this page table valid
  769. * should be dir_offs
  770. */
  771. dir->config.page_write(
  772. dir->page,
  773. dir_offs,
  774. pagetab->page->phys_addr,
  775. MMU_FLAG_VALID, NULL);
  776. dir_update = true;
  777. }
  778. if (pte_rb_check) {
  779. unsigned flags = 0;
  780. (void)dir->config.page_read(
  781. dir->page_map[dir_offs]->page, page_offs, priv, &flags);
  782. if (flags & MMU_FLAG_VALID) {
  783. mmu_log_err("PTE is currently in use (2)\n");
  784. kfree(map);
  785. kfree(pages_to_update);
  786. *res = -EFAULT;
  787. return NULL;
  788. }
  789. }
  790. /*
  791. * map this particular page in the page table
  792. * use d*(MMU page size) to add additional entries
  793. * from the given physical address with the correct
  794. * offset for the MMU
  795. */
  796. dir->config.page_write(
  797. dir->page_map[dir_offs]->page,
  798. page_offs,
  799. (curr_phy_addr + d * imgmmu_get_page_size()) |
  800. (uint64_t)pte_cache_mode << MMU_PTE_AXCACHE_SHIFT,
  801. map->flags | MMU_FLAG_VALID, priv);
  802. dir->page_map[dir_offs]->valid_entries++;
  803. if (pte_rb_check) {
  804. unsigned flags = 0;
  805. uint64_t phys = dir->config.page_read(
  806. dir->page_map[dir_offs]->page, page_offs, priv, &flags);
  807. if (flags != (map->flags | MMU_FLAG_VALID) ||
  808. (phys != (curr_phy_addr + d * imgmmu_get_page_size())) ) {
  809. mmu_log_err("PTE read back failed\n");
  810. kfree(map);
  811. kfree(pages_to_update);
  812. *res = -EFAULT;
  813. return NULL;
  814. }
  815. }
  816. page_offs++;
  817. } /* for duplicate */
  818. } /* for entries */
  819. map->entries = entries * duplicate;
  820. /* one more mapping is related to this directory */
  821. dir->nmap++;
  822. /* if non UMA we need to update device memory */
  823. if (dir->config.page_update != NULL) {
  824. while (num_pages_to_update > 0) {
  825. uint32_t idx = pages_to_update[num_pages_to_update - 1];
  826. dir->config.page_update(
  827. dir->page_map[idx]->page);
  828. num_pages_to_update--;
  829. }
  830. if (dir_update == true)
  831. dir->config.page_update(
  832. dir->page);
  833. }
  834. *res = 0;
  835. kfree(pages_to_update);
  836. return map;
  837. }
  838. /*
  839. * with physical address array
  840. */
  841. struct linear_phys_iter {
  842. uint64_t *array;
  843. int idx;
  844. };
  845. static int linear_phys_iter_next(void *arg, uint64_t *next)
  846. {
  847. struct linear_phys_iter *iter = arg;
  848. int advance = imgmmu_get_cpu_page_size() < imgmmu_get_page_size() ?
  849. imgmmu_get_page_size() / imgmmu_get_cpu_page_size() : 1;
  850. *next = iter->array[iter->idx]; /* boundary check? */
  851. iter->idx += advance;
  852. return 0;
  853. }
  854. struct imgmmu_map *imgmmu_cat_map_arr(struct imgmmu_cat *cat,
  855. uint64_t *phys_page_list,
  856. const struct imgmmu_halloc *virt_mem,
  857. unsigned int map_flag,
  858. void *priv,
  859. int *res)
  860. {
  861. uint16_t idx;
  862. struct linear_phys_iter arg = { phys_page_list, 0 };
  863. struct imgmmu_map *map = NULL;
  864. struct imgmmu_dirmap *dir_map = NULL;
  865. struct imgmmu_halloc virt_mem_range;
  866. if (virt_mem->vaddr >> IMGMMU_VIRT_SIZE) {
  867. mmu_log_err("Virtual address beyond %u bits!\n",
  868. IMGMMU_VIRT_SIZE);
  869. *res = -EFAULT;
  870. return NULL;
  871. }
  872. if (virt_mem->vaddr & (imgmmu_get_page_size()-1)) {
  873. mmu_log_err("Virtual address not aligned to %zu!\n",
  874. imgmmu_get_page_size());
  875. *res = -EFAULT;
  876. return NULL;
  877. }
  878. map = kzalloc(sizeof(struct imgmmu_map), GFP_KERNEL);
  879. if (map == NULL) {
  880. mmu_log_err("failed to allocate %zu bytes for mapping structure\n",
  881. sizeof(struct imgmmu_map));
  882. *res = -ENOMEM;
  883. return NULL;
  884. }
  885. INIT_LIST_HEAD(&map->dir_maps);
  886. /* Store the whole virtual address space for this mapping */
  887. map->virt_mem = *virt_mem;
  888. /* Set starting address & total size */
  889. virt_mem_range.vaddr = virt_mem->vaddr;
  890. virt_mem_range.size = virt_mem->size;
  891. do {
  892. struct imgmmu_dir *dir;
  893. /* Determine catalogue entry (PCE-> PD) */
  894. idx = mmu_cat_entry(virt_mem_range.vaddr);
  895. dir = cat->dir_map[idx];
  896. if (dir == NULL) {
  897. dir = mmu_dir_create(
  898. &cat->config, res);
  899. if (*res != 0)
  900. goto error;
  901. dir->page->virt_base = virt_mem_range.vaddr &
  902. ~(VIRT_DIR_IDX_MASK | VIRT_PAGE_TBL_MASK());
  903. dir->cat = cat;
  904. WARN_ON(cat->dir_map[idx] != NULL);
  905. cat->dir_map[idx] = dir;
  906. /* Mark PCE valid and store PD address */
  907. cat->config.page_write(
  908. cat->page,
  909. idx, dir->page->phys_addr,
  910. MMU_FLAG_VALID, NULL);
  911. if (cat->config.page_update != NULL)
  912. cat->config.page_update(cat->page);
  913. cat->nmap++;
  914. }
  915. /* Need to handle buffer spanning across the GB boundaries */
  916. if (((virt_mem_range.vaddr % (1ULL<<IMGMMU_CAT_SHIFT)) +
  917. virt_mem_range.size) >= (1ULL<<IMGMMU_CAT_SHIFT))
  918. virt_mem_range.size = (1ULL<<IMGMMU_CAT_SHIFT) -
  919. (virt_mem_range.vaddr % (1ULL<<IMGMMU_CAT_SHIFT));
  920. dir_map = mmu_dir_map(dir, &virt_mem_range, map_flag,
  921. linear_phys_iter_next, &arg, priv, res);
  922. if (dir_map) {
  923. /* Update starting address */
  924. virt_mem_range.vaddr += virt_mem_range.size;
  925. /* and bytes left ... */
  926. virt_mem_range.size = (virt_mem->vaddr + virt_mem->size) -
  927. virt_mem_range.vaddr;
  928. list_add(&dir_map->entry, &map->dir_maps);
  929. }
  930. } while(dir_map && *res == 0 && virt_mem_range.size);
  931. if (dir_map)
  932. /* If last dir mapping succeeded,
  933. * return overlay container mapping structure */
  934. return map;
  935. else
  936. error:
  937. imgmmu_cat_unmap(map);
  938. return NULL;
  939. }
  940. /*
  941. * with sg
  942. */
  943. struct sg_phys_iter {
  944. struct scatterlist *sgl;
  945. unsigned int offset;
  946. bool use_sg_dma;
  947. };
  948. static int sg_phys_iter_next(void *arg, uint64_t *next)
  949. {
  950. struct sg_phys_iter *iter = arg;
  951. phys_addr_t phys;
  952. unsigned int len;
  953. if (!iter->sgl)
  954. return -EFAULT;
  955. if (iter->use_sg_dma) {
  956. if (sg_dma_address(iter->sgl) == ~(dma_addr_t)0 ||
  957. !sg_dma_len(iter->sgl))
  958. return -EFAULT;
  959. phys = sg_dma_address(iter->sgl);
  960. len = sg_dma_len(iter->sgl);
  961. } else {
  962. phys = sg_phys(iter->sgl);
  963. len = iter->sgl->length;
  964. }
  965. *next = phys + iter->offset;
  966. iter->offset += IMGMMU_GET_MAX_PAGE_SIZE();
  967. if (iter->offset >= len) {
  968. int advance = iter->offset/len;
  969. while (iter->sgl) {
  970. iter->sgl = sg_next(iter->sgl);
  971. advance--;
  972. if (!advance)
  973. break;
  974. }
  975. iter->offset = 0;
  976. }
  977. return 0;
  978. }
  979. struct imgmmu_map *imgmmu_cat_map_sg(
  980. struct imgmmu_cat *cat,
  981. struct scatterlist *phys_page_sg,
  982. bool use_sg_dma,
  983. const struct imgmmu_halloc *virt_mem,
  984. unsigned int map_flag,
  985. void *priv,
  986. int *res)
  987. {
  988. uint16_t idx;
  989. struct sg_phys_iter arg = { phys_page_sg, 0, use_sg_dma};
  990. struct imgmmu_map *map = NULL;
  991. struct imgmmu_dirmap *dir_map = NULL;
  992. struct imgmmu_halloc virt_mem_range;
  993. if (virt_mem->vaddr >> IMGMMU_VIRT_SIZE) {
  994. mmu_log_err("Virtual address beyond %u bits!\n",
  995. IMGMMU_VIRT_SIZE);
  996. *res = -EFAULT;
  997. return NULL;
  998. }
  999. if (virt_mem->vaddr & (imgmmu_get_page_size()-1)) {
  1000. mmu_log_err("Virtual address not aligned to %zu!\n",
  1001. imgmmu_get_page_size());
  1002. *res = -EFAULT;
  1003. return NULL;
  1004. }
  1005. map = kzalloc(sizeof(struct imgmmu_map), GFP_KERNEL);
  1006. if (map == NULL) {
  1007. mmu_log_err("failed to allocate %zu bytes for mapping structure\n",
  1008. sizeof(struct imgmmu_map));
  1009. *res = -ENOMEM;
  1010. return NULL;
  1011. }
  1012. INIT_LIST_HEAD(&map->dir_maps);
  1013. /* Store the whole virtual address space for this mapping */
  1014. map->virt_mem = *virt_mem;
  1015. /* Set starting address & total size */
  1016. virt_mem_range.vaddr = virt_mem->vaddr;
  1017. virt_mem_range.size = virt_mem->size;
  1018. do {
  1019. struct imgmmu_dir *dir;
  1020. /* Determine catalogue entry (PCE-> PD) */
  1021. idx = mmu_cat_entry(virt_mem_range.vaddr);
  1022. dir = cat->dir_map[idx];
  1023. if (dir == NULL) {
  1024. dir = mmu_dir_create(
  1025. &cat->config, res);
  1026. if (*res != 0)
  1027. goto error;
  1028. dir->page->virt_base = virt_mem_range.vaddr &
  1029. ~(VIRT_DIR_IDX_MASK | VIRT_PAGE_TBL_MASK());
  1030. dir->cat = cat;
  1031. WARN_ON(cat->dir_map[idx] != NULL);
  1032. cat->dir_map[idx] = dir;
  1033. /* Mark PCE valid and store PD address */
  1034. cat->config.page_write(
  1035. cat->page,
  1036. idx, dir->page->phys_addr,
  1037. MMU_FLAG_VALID, NULL);
  1038. if (cat->config.page_update != NULL)
  1039. cat->config.page_update(cat->page);
  1040. cat->nmap++;
  1041. }
  1042. /* Need to handle buffer spanning across the GB boundaries */
  1043. if (((virt_mem_range.vaddr % (1ULL<<IMGMMU_CAT_SHIFT)) +
  1044. virt_mem_range.size) >= (1ULL<<IMGMMU_CAT_SHIFT))
  1045. virt_mem_range.size = (1ULL<<IMGMMU_CAT_SHIFT) -
  1046. (virt_mem_range.vaddr % (1ULL<<IMGMMU_CAT_SHIFT));
  1047. dir_map = mmu_dir_map(dir, &virt_mem_range, map_flag,
  1048. sg_phys_iter_next, &arg, priv, res);
  1049. if (dir_map) {
  1050. /* Update starting address */
  1051. virt_mem_range.vaddr += virt_mem_range.size;
  1052. /* and bytes left ... */
  1053. virt_mem_range.size = (virt_mem->vaddr + virt_mem->size) -
  1054. virt_mem_range.vaddr;
  1055. list_add(&dir_map->entry, &map->dir_maps);
  1056. }
  1057. } while(dir_map && *res == 0 && virt_mem_range.size);
  1058. if (dir_map)
  1059. /* If last dir mapping succeeded,
  1060. * return overlay container mapping structure */
  1061. return map;
  1062. else
  1063. error:
  1064. imgmmu_cat_unmap(map);
  1065. return NULL;
  1066. }
  1067. static int mmu_dir_unmap(struct imgmmu_dirmap *map)
  1068. {
  1069. unsigned int first_dir = 0, first_page = 0;
  1070. unsigned int dir_offs = 0, page_offs = 0;
  1071. uint32_t i;
  1072. struct imgmmu_dir *dir = NULL;
  1073. /* in non UMA updates on pages needs to be done
  1074. * store index of directory entry pages to update */
  1075. uint32_t *pages_to_update;
  1076. uint32_t num_pages_to_update = 0;
  1077. WARN_ON(map == NULL);
  1078. WARN_ON(map->entries == 0);
  1079. WARN_ON(map->dir == NULL);
  1080. dir = map->dir;
  1081. /* has to be dynamically allocated because
  1082. * it is bigger than 1k (max stack in the kernel) */
  1083. pages_to_update = kzalloc(IMGMMU_N_TABLE * sizeof(uint32_t), GFP_KERNEL);
  1084. if (pages_to_update == NULL) {
  1085. mmu_log_err("Failed to allocate the update index table (%zu Bytes)\n",
  1086. IMGMMU_N_TABLE * sizeof(uint32_t));
  1087. kfree(map);
  1088. return -ENOMEM;
  1089. }
  1090. first_dir = mmu_dir_entry(map->virt_mem.vaddr);
  1091. first_page = mmu_page_entry(map->virt_mem.vaddr);
  1092. /* verify that the pages that should be used are available */
  1093. dir_offs = first_dir;
  1094. page_offs = first_page;
  1095. pages_to_update[num_pages_to_update] = first_dir;
  1096. num_pages_to_update++;
  1097. for (i = 0; i < map->entries; i++) {
  1098. if (page_offs >= IMGMMU_N_PAGE) {
  1099. dir_offs++; /* move to next directory */
  1100. page_offs = 0; /* using its first page */
  1101. pages_to_update[num_pages_to_update] = dir_offs;
  1102. num_pages_to_update++;
  1103. }
  1104. /* this page table object does not exists something destroyed it
  1105. * while the mapping was supposed to use it */
  1106. WARN_ON(dir->page_map[dir_offs] == NULL);
  1107. dir->config.page_write(
  1108. dir->page_map[dir_offs]->page,
  1109. page_offs, 0,
  1110. MMU_FLAG_INVALID, NULL);
  1111. dir->page_map[dir_offs]->valid_entries--;
  1112. page_offs++;
  1113. }
  1114. dir->nmap--;
  1115. if (dir->config.page_update != NULL)
  1116. while (num_pages_to_update > 0) {
  1117. uint32_t idx = pages_to_update[num_pages_to_update - 1];
  1118. dir->config.page_update(
  1119. dir->page_map[idx]->page);
  1120. num_pages_to_update--;
  1121. }
  1122. /* mapping does not own the given virtual address */
  1123. kfree(map);
  1124. kfree(pages_to_update);
  1125. return 0;
  1126. }
  1127. int imgmmu_cat_unmap(struct imgmmu_map *map)
  1128. {
  1129. WARN_ON(map == NULL);
  1130. while (!list_empty(&map->dir_maps)) {
  1131. struct imgmmu_dirmap *dir_map;
  1132. struct imgmmu_cat *cat;
  1133. struct imgmmu_dir *dir;
  1134. uint16_t idx;
  1135. dir_map = list_first_entry(&map->dir_maps,
  1136. struct imgmmu_dirmap, entry);
  1137. list_del(&dir_map->entry);
  1138. idx = mmu_cat_entry(dir_map->virt_mem.vaddr);
  1139. dir = dir_map->dir;
  1140. cat = dir->cat;
  1141. WARN_ON(cat == NULL);
  1142. /* This destroys the mapping */
  1143. mmu_dir_unmap(dir_map);
  1144. /* Check integrity */
  1145. WARN_ON(dir != cat->dir_map[idx]);
  1146. if (!dir->nmap) {
  1147. mmu_dir_destroy(dir);
  1148. WARN_ON(cat->dir_map[idx] == NULL);
  1149. cat->dir_map[idx] = NULL;
  1150. /* Mark PCE invalid */
  1151. cat->config.page_write(
  1152. cat->page,
  1153. idx, 0,
  1154. MMU_FLAG_INVALID, NULL);
  1155. if (cat->config.page_update != NULL)
  1156. cat->config.page_update(cat->page);
  1157. cat->nmap--;
  1158. }
  1159. }
  1160. kfree(map);
  1161. return 0;
  1162. }
  1163. static uint32_t mmu_dir_clean(struct imgmmu_dir *dir)
  1164. {
  1165. uint32_t i, removed = 0;
  1166. WARN_ON(dir == NULL);
  1167. WARN_ON(dir->config.page_write == NULL);
  1168. for (i = 0; i < IMGMMU_N_TABLE; i++) {
  1169. if (dir->page_map[i] != NULL &&
  1170. dir->page_map[i]->valid_entries == 0) {
  1171. dir->config.page_write(
  1172. dir->page,
  1173. i, 0,
  1174. MMU_FLAG_INVALID, NULL);
  1175. mmu_pagetab_destroy(dir->page_map[i]);
  1176. dir->page_map[i] = NULL;
  1177. removed++;
  1178. }
  1179. }
  1180. if (dir->config.page_update != NULL)
  1181. dir->config.page_update(dir->page);
  1182. return removed;
  1183. }
  1184. /* Not used */
  1185. uint32_t imgmmu_cat_clean(struct imgmmu_cat *cat)
  1186. {
  1187. uint32_t i, removed = 0;
  1188. WARN_ON(cat == NULL);
  1189. WARN_ON(cat->config.page_write == NULL);
  1190. for (i = 0; i < IMGMMU_N_DIR; i++) {
  1191. if (cat->dir_map[i] != NULL) {
  1192. mmu_dir_clean(cat->dir_map[i]);
  1193. cat->dir_map[i] = NULL;
  1194. removed++;
  1195. }
  1196. }
  1197. if (cat->config.page_update != NULL)
  1198. cat->config.page_update(cat->page);
  1199. return removed;
  1200. }
  1201. uint64_t imgmmu_get_pte_cache_bits(uint64_t pte_entry)
  1202. {
  1203. return pte_entry & MMU_PTE_AXCACHE_MASK;
  1204. }
  1205. u8 imgmmu_get_pte_parity_shift(void)
  1206. {
  1207. return MMU_PTE_PARITY_SHIFT;
  1208. }
  1209. void imgmmu_set_pte_parity(uint64_t *pte_entry)
  1210. {
  1211. *pte_entry &= ~(1ULL << imgmmu_get_pte_parity_shift());
  1212. *pte_entry |= (1ULL << imgmmu_get_pte_parity_shift());
  1213. }