vs_dc_mmu.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2020 VeriSilicon Holdings Co., Ltd.
  4. */
  5. #include <linux/device.h>
  6. #include <linux/errno.h>
  7. #include <linux/kernel.h>
  8. #include <linux/mm.h>
  9. #include <linux/mman.h>
  10. #include <linux/pagemap.h>
  11. #include <linux/slab.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/vmalloc.h>
  14. #include <asm/io.h>
  15. #include <asm/delay.h>
  16. #include "vs_dc_mmu.h"
  17. static bool mmu_construct = false;
  18. int _allocate_memory(u32 bytes, void **memory)
  19. {
  20. void *mem = NULL;
  21. if (bytes == 0 || memory == NULL) {
  22. pr_err("%s has invalid arguments.\n", __FUNCTION__);
  23. return -EINVAL;
  24. }
  25. if (bytes > PAGE_SIZE) {
  26. mem = vmalloc(bytes);
  27. }
  28. else {
  29. mem = kmalloc(bytes, GFP_KERNEL);
  30. }
  31. if (!mem) {
  32. pr_err("%s out of memory.\n", __FUNCTION__);
  33. return -ENOMEM;
  34. }
  35. memset((u8 *)mem, 0, bytes);
  36. *memory = mem;
  37. return 0;
  38. }
  39. static int _create_mutex(void **mutex)
  40. {
  41. int ret =0;
  42. if (mutex == NULL) {
  43. return -EINVAL;
  44. }
  45. ret = _allocate_memory(sizeof(struct mutex), mutex);
  46. if (ret)
  47. return ret;
  48. mutex_init(*(struct mutex **)mutex);
  49. return 0;
  50. }
  51. static int _acquire_mutex(void *mutex, u32 timeout)
  52. {
  53. if (mutex == NULL) {
  54. pr_err("%s has invalid argument.\n", __FUNCTION__);
  55. return -EINVAL;
  56. }
  57. if (timeout == DC_INFINITE) {
  58. mutex_lock(mutex);
  59. return 0;
  60. }
  61. for (;;) {
  62. /* Try to acquire the mutex. */
  63. if (mutex_trylock(mutex)) {
  64. /* Success. */
  65. return 0;
  66. }
  67. if (timeout-- == 0) {
  68. break;
  69. }
  70. /* Wait for 1 millisecond. */
  71. udelay(1000);
  72. }
  73. return -ETIMEDOUT;
  74. }
  75. static int _release_mutex(void *mutex)
  76. {
  77. if (mutex == NULL) {
  78. pr_err("%s has invalid argument.\n", __FUNCTION__);
  79. return -EINVAL;
  80. }
  81. mutex_unlock(mutex);
  82. return 0;
  83. }
  84. static u32 _mtlb_offset(u32 address)
  85. {
  86. return (address & MMU_MTLB_MASK) >> MMU_MTLB_SHIFT;
  87. }
  88. static u32 _stlb_offset(u32 address)
  89. {
  90. return (address & MMU_STLB_4K_MASK) >> MMU_STLB_4K_SHIFT;
  91. }
  92. static u32 _address_to_index(dc_mmu_pt mmu, u32 address)
  93. {
  94. return _mtlb_offset(address) * MMU_STLB_4K_ENTRY_NUM + _stlb_offset(address);
  95. }
  96. static u32 _set_page(u32 page_address, u32 page_address_ext, bool writable)
  97. {
  98. u32 entry = page_address
  99. /* AddressExt */
  100. | (page_address_ext << 4)
  101. /* Ignore exception */
  102. | (0 << 1)
  103. /* Present */
  104. | (1 << 0);
  105. if (writable) {
  106. /* writable */
  107. entry |= (1 << 2);
  108. }
  109. return entry;
  110. }
  111. static void _write_page_entry(u32 *page_entry, u32 entry_value)
  112. {
  113. *page_entry = entry_value;
  114. }
  115. static u32 _read_page_entry(u32 *page_entry)
  116. {
  117. return *page_entry;
  118. }
  119. int _allocate_stlb(dc_mmu_stlb_pt *stlb)
  120. {
  121. dc_mmu_stlb_pt stlb_t = NULL;
  122. void *mem = NULL;
  123. mem = kzalloc(sizeof(dc_mmu_stlb), GFP_KERNEL);
  124. if (!mem)
  125. return -ENOMEM;
  126. stlb_t = (dc_mmu_stlb_pt)mem;
  127. stlb_t->size = MMU_STLB_4K_SIZE;
  128. *stlb = stlb_t;
  129. return 0;
  130. }
  131. int _allocate_all_stlb(struct device *dev, dc_mmu_stlb_pt *stlb)
  132. {
  133. dc_mmu_stlb_pt stlb_t = NULL;
  134. void *mem = NULL;
  135. void *cookie = NULL;
  136. dma_addr_t dma_addr;
  137. size_t size;
  138. mem = kzalloc(sizeof(dc_mmu_stlb), GFP_KERNEL);
  139. if (!mem)
  140. return -ENOMEM;
  141. stlb_t = (dc_mmu_stlb_pt)mem;
  142. stlb_t->size = MMU_STLB_4K_SIZE * MMU_MTLB_ENTRY_NUM;
  143. size = PAGE_ALIGN(stlb_t->size);
  144. cookie = dma_alloc_wc(dev, size, &dma_addr, GFP_KERNEL);
  145. if (!cookie) {
  146. dev_err(dev, "Failed to alloc stlb buffer.\n");
  147. return -ENOMEM;
  148. }
  149. stlb_t->logical = cookie;
  150. stlb_t->physBase = (u64)dma_addr;
  151. memset(stlb_t->logical, 0, size);
  152. *stlb = stlb_t;
  153. return 0;
  154. }
  155. int _setup_process_address_space(struct device *dev, dc_mmu_pt mmu)
  156. {
  157. u32 *map = NULL;
  158. u32 free, i;
  159. u32 dynamic_mapping_entries, address;
  160. dc_mmu_stlb_pt all_stlb;
  161. int ret =0;
  162. dynamic_mapping_entries = MMU_MTLB_ENTRY_NUM;
  163. mmu->dynamic_mapping_start = 0;
  164. mmu->page_table_size = dynamic_mapping_entries * MMU_STLB_4K_SIZE;
  165. mmu->page_table_entries = mmu->page_table_size / sizeof(u32);
  166. ret = _allocate_memory(mmu->page_table_size,
  167. (void **)&mmu->map_logical);
  168. if (ret) {
  169. pr_err("Failed to alloc mmu map buffer.\n");
  170. return ret;;
  171. }
  172. map = mmu->map_logical;
  173. /* Initialize free area*/
  174. free = mmu->page_table_entries;
  175. _write_page_entry(map, (free << 8) | DC_MMU_FREE);
  176. _write_page_entry(map + 1, ~0U);
  177. mmu->heap_list = 0;
  178. mmu->free_nodes = false;
  179. ret = _allocate_all_stlb(dev, &all_stlb);
  180. if (ret)
  181. return ret;
  182. for (i = 0; i < dynamic_mapping_entries; i++) {
  183. dc_mmu_stlb_pt stlb;
  184. dc_mmu_stlb_pt *stlbs = (dc_mmu_stlb_pt *)mmu->stlbs;
  185. ret = _allocate_stlb(&stlb);
  186. if (ret)
  187. return ret;
  188. stlb->physBase = all_stlb->physBase + i * MMU_STLB_4K_SIZE;
  189. stlb->logical = all_stlb->logical + i * MMU_STLB_4K_SIZE / sizeof(u32);
  190. stlbs[i] = stlb;
  191. }
  192. address = (u32)all_stlb->physBase;
  193. ret = _acquire_mutex(mmu->page_table_mutex, DC_INFINITE);
  194. if (ret)
  195. return ret;
  196. for (i = mmu->dynamic_mapping_start;
  197. i < mmu->dynamic_mapping_start + dynamic_mapping_entries;
  198. i++) {
  199. u32 mtlb_entry;
  200. mtlb_entry = address
  201. | MMU_MTLB_4K_PAGE
  202. | MMU_MTLB_PRESENT;
  203. address += MMU_STLB_4K_SIZE;
  204. /* Insert Slave TLB address to Master TLB entry.*/
  205. _write_page_entry(mmu->mtlb_logical + i, mtlb_entry);
  206. }
  207. _release_mutex(mmu->page_table_mutex);
  208. return 0;
  209. }
  210. /* MMU Construct */
  211. int dc_mmu_construct(struct device *dev, dc_mmu_pt *mmu)
  212. {
  213. dc_mmu_pt mmu_t = NULL;
  214. void *mem = NULL;
  215. void *cookie = NULL, *cookie_safe =NULL;
  216. dma_addr_t dma_addr, dma_addr_safe;
  217. u32 size = 0;
  218. int ret = 0;
  219. if (mmu_construct)
  220. return 0;
  221. mem = kzalloc(sizeof(dc_mmu), GFP_KERNEL);
  222. if (!mem)
  223. return -ENOMEM;
  224. mmu_t = (dc_mmu_pt)mem;
  225. mmu_t->mtlb_bytes = MMU_MTLB_SIZE;
  226. size = PAGE_ALIGN(mmu_t->mtlb_bytes);
  227. /* Allocate MTLB */
  228. cookie = dma_alloc_wc(dev, size, &dma_addr, GFP_KERNEL);
  229. if (!cookie) {
  230. dev_err(dev, "Failed to alloc mtlb buffer.\n");
  231. return -ENOMEM;
  232. }
  233. mmu_t->mtlb_logical = cookie;
  234. mmu_t->mtlb_physical = (u64)dma_addr;
  235. memset(mmu_t->mtlb_logical, 0, size);
  236. size = MMU_MTLB_ENTRY_NUM * sizeof(dc_mmu_stlb_pt);
  237. ret = _allocate_memory(size, &mmu_t->stlbs);
  238. if (ret)
  239. return ret;
  240. ret = _create_mutex(&mmu_t->page_table_mutex);
  241. if (ret)
  242. return ret;
  243. mmu_t->mode = MMU_MODE_1K;
  244. ret = _setup_process_address_space(dev, mmu_t);
  245. if (ret)
  246. return ret;
  247. /* Allocate safe page */
  248. cookie_safe = dma_alloc_wc(dev, 4096, &dma_addr_safe, GFP_KERNEL);
  249. if (!cookie_safe) {
  250. dev_err(dev, "Failed to alloc safe page.\n");
  251. return -ENOMEM;
  252. }
  253. mmu_t->safe_page_logical = cookie_safe;
  254. mmu_t->safe_page_physical = (u64)dma_addr_safe;
  255. memset(mmu_t->safe_page_logical, 0, size);
  256. *mmu = mmu_t;
  257. mmu_construct = true;
  258. return 0;
  259. }
  260. int dc_mmu_get_page_entry(dc_mmu_pt mmu, u32 address, u32 **page_table)
  261. {
  262. dc_mmu_stlb_pt stlb;
  263. dc_mmu_stlb_pt *stlbs = (dc_mmu_stlb_pt *)mmu->stlbs;
  264. u32 mtlb_offset = _mtlb_offset(address);
  265. u32 stlb_offset = _stlb_offset(address);
  266. stlb = stlbs[mtlb_offset - mmu->dynamic_mapping_start];
  267. if (stlb == NULL) {
  268. pr_err("BUG: invalid stlb, mmu=%p stlbs=%p mtlb_offset=0x%x %s(%d)\n",
  269. mmu, stlbs ,mtlb_offset,__FUNCTION__,__LINE__);
  270. return -ENXIO;
  271. }
  272. *page_table = &stlb->logical[stlb_offset];
  273. return 0;
  274. }
  275. int _link(dc_mmu_pt mmu, u32 index, u32 node)
  276. {
  277. if (index >= mmu->page_table_entries) {
  278. mmu->heap_list = node;
  279. }
  280. else {
  281. u32 *map = mmu->map_logical;
  282. switch (DC_ENTRY_TYPE(_read_page_entry(&map[index]))) {
  283. case DC_MMU_SINGLE:
  284. /* Previous is a single node, link to it*/
  285. _write_page_entry(&map[index], (node << 8) | DC_MMU_SINGLE);
  286. break;
  287. case DC_MMU_FREE:
  288. /* Link to FREE TYPE node */
  289. _write_page_entry(&map[index + 1], node);
  290. break;
  291. default:
  292. pr_err("MMU table corrupted at index %u!", index);
  293. return -EINVAL;
  294. }
  295. }
  296. return 0;
  297. }
  298. int _add_free(dc_mmu_pt mmu, u32 index, u32 node, u32 count)
  299. {
  300. u32 *map = mmu->map_logical;
  301. if (count == 1) {
  302. /* Initialize a single page node */
  303. _write_page_entry(map + node, DC_SINGLE_PAGE_NODE_INITIALIZE | DC_MMU_SINGLE);
  304. }
  305. else {
  306. /* Initialize the FREE node*/
  307. _write_page_entry(map + node, (count << 8) | DC_MMU_FREE);
  308. _write_page_entry(map + node + 1, ~0U);
  309. }
  310. return _link(mmu, index, node);
  311. }
  312. /* Collect free nodes */
  313. int _collect(dc_mmu_pt mmu)
  314. {
  315. u32 *map = mmu->map_logical;
  316. u32 count = 0, start = 0, i = 0;
  317. u32 previous = ~0U;
  318. int ret = 0;
  319. mmu->heap_list = ~0U;
  320. mmu->free_nodes = false;
  321. /* Walk the entire page table */
  322. for (i = 0; i < mmu->page_table_entries; i++) {
  323. switch (DC_ENTRY_TYPE(_read_page_entry(&map[i]))) {
  324. case DC_MMU_SINGLE:
  325. if (count++ == 0) {
  326. /* Set new start node */
  327. start = i;
  328. }
  329. break;
  330. case DC_MMU_FREE:
  331. if (count == 0) {
  332. /* Set new start node */
  333. start = i;
  334. }
  335. count += _read_page_entry(&map[i]) >> 8;
  336. /* Advance the index of the page table */
  337. i += (_read_page_entry(&map[i]) >> 8) - 1;
  338. break;
  339. case DC_MMU_USED:
  340. /* Meet used node, start to collect */
  341. if (count > 0) {
  342. /* Add free node to list*/
  343. ret = _add_free(mmu, previous, start, count);
  344. if (ret)
  345. return ret;
  346. /* Reset previous unused node index */
  347. previous = start;
  348. count = 0;
  349. }
  350. break;
  351. default:
  352. pr_err("MMU page table corrupted at index %u!", i);
  353. return -EINVAL;
  354. }
  355. }
  356. /* If left node is an open node. */
  357. if (count > 0) {
  358. ret = _add_free(mmu, previous, start, count);
  359. if (ret)
  360. return ret;
  361. }
  362. return 0;
  363. }
  364. int _fill_page_table(u32 *page_table, u32 page_count, u32 entry_value)
  365. {
  366. u32 i;
  367. for (i = 0; i < page_count; i++) {
  368. _write_page_entry(page_table + i, entry_value);
  369. }
  370. return 0;
  371. }
  372. int dc_mmu_allocate_pages(dc_mmu_pt mmu, u32 page_count, u32 *address)
  373. {
  374. bool got = false, acquired = false;
  375. u32 *map;
  376. u32 index = 0, vaddr, left;
  377. u32 previous = ~0U;
  378. u32 mtlb_offset, stlb_offset;
  379. int ret = 0;
  380. if (page_count == 0 || page_count > mmu->page_table_entries) {
  381. pr_err("%s has invalid arguments.\n", __FUNCTION__);
  382. return -EINVAL;
  383. }
  384. _acquire_mutex(mmu->page_table_mutex, DC_INFINITE);
  385. acquired = true;
  386. for (map = mmu->map_logical; !got;) {
  387. for (index = mmu->heap_list; !got && (index < mmu->page_table_entries);) {
  388. switch (DC_ENTRY_TYPE(_read_page_entry(&map[index]))) {
  389. case DC_MMU_SINGLE:
  390. if (page_count == 1) {
  391. got = true;
  392. }
  393. else {
  394. /* Move to next node */
  395. previous = index;
  396. index = _read_page_entry(&map[index]) >> 8;
  397. }
  398. break;
  399. case DC_MMU_FREE:
  400. if (page_count <= (_read_page_entry(&map[index]) >> 8)) {
  401. got = true;
  402. }
  403. else {
  404. /* Move to next node */
  405. previous = index;
  406. index = _read_page_entry(&map[index + 1]);
  407. }
  408. break;
  409. default:
  410. /* Only link SINGLE and FREE node */
  411. pr_err("MMU table corrupted at index %u!", index);
  412. ret = -EINVAL;
  413. goto OnError;
  414. }
  415. }
  416. /* If out of index */
  417. if (index >= mmu->page_table_entries) {
  418. if (mmu->free_nodes) {
  419. /* Collect the free node */
  420. ret = _collect(mmu);
  421. if (ret)
  422. goto OnError;
  423. }
  424. else {
  425. ret = -ENODATA;
  426. goto OnError;
  427. }
  428. }
  429. }
  430. switch (DC_ENTRY_TYPE(_read_page_entry(&map[index]))) {
  431. case DC_MMU_SINGLE:
  432. /* Unlink single node from node list */
  433. ret = _link(mmu, previous, _read_page_entry(&map[index]) >> 8);
  434. if (ret)
  435. goto OnError;
  436. break;
  437. case DC_MMU_FREE:
  438. left = (_read_page_entry(&map[index]) >> 8) - page_count;
  439. switch (left) {
  440. case 0:
  441. /* Unlink the entire FREE type node */
  442. ret = _link(mmu, previous, _read_page_entry(&map[index + 1]));
  443. if (ret)
  444. goto OnError;
  445. break;
  446. case 1:
  447. /* Keep the map[index] as a single node,
  448. * mark the left as used
  449. */
  450. _write_page_entry(&map[index],
  451. (_read_page_entry(&map[index + 1]) << 8) |
  452. DC_MMU_SINGLE);
  453. index++;
  454. break;
  455. default:
  456. /* FREE type node left */
  457. _write_page_entry(&map[index],
  458. (left << 8) | DC_MMU_FREE);
  459. index += left;
  460. break;
  461. }
  462. break;
  463. default:
  464. /* Only link SINGLE and FREE node */
  465. pr_err("MMU table corrupted at index %u!", index);
  466. ret = -EINVAL;
  467. goto OnError;
  468. }
  469. /* Mark node as used */
  470. ret = _fill_page_table(&map[index], page_count, DC_MMU_USED);
  471. if (ret)
  472. goto OnError;
  473. _release_mutex(mmu->page_table_mutex);
  474. mtlb_offset = index / MMU_STLB_4K_ENTRY_NUM + mmu->dynamic_mapping_start;
  475. stlb_offset = index % MMU_STLB_4K_ENTRY_NUM;
  476. vaddr = (mtlb_offset << MMU_MTLB_SHIFT) | (stlb_offset << MMU_STLB_4K_SHIFT);
  477. if (address != NULL) {
  478. *address = vaddr;
  479. }
  480. return 0;
  481. OnError:
  482. if (acquired) {
  483. _release_mutex(mmu->page_table_mutex);
  484. }
  485. return ret;
  486. }
  487. int dc_mmu_free_pages(dc_mmu_pt mmu, u32 address, u32 page_count)
  488. {
  489. u32 *node;
  490. if (page_count == 0)
  491. return -EINVAL;
  492. node = mmu->map_logical + _address_to_index(mmu, address);
  493. _acquire_mutex(mmu->page_table_mutex, DC_INFINITE);
  494. if (page_count == 1) {
  495. /* Mark the Single page node free */
  496. _write_page_entry(node, DC_SINGLE_PAGE_NODE_INITIALIZE | DC_MMU_SINGLE);
  497. }
  498. else {
  499. /* Mark the FREE type node free */
  500. _write_page_entry(node, (page_count << 8) | DC_MMU_FREE);
  501. _write_page_entry(node + 1, ~0U);
  502. }
  503. mmu->free_nodes = true;
  504. _release_mutex(mmu->page_table_mutex);
  505. return 0;
  506. }
  507. int dc_mmu_set_page(dc_mmu_pt mmu, u64 page_address, u32 *page_entry)
  508. {
  509. u32 address_ext;
  510. u32 address;
  511. if (page_entry == NULL || (page_address & 0xFFF)) {
  512. return -EINVAL;
  513. }
  514. /* [31:0]. */
  515. address = (u32)(page_address & 0xFFFFFFFF);
  516. /* [39:32]. */
  517. address_ext = (u32)((page_address >> 32) & 0xFF);
  518. _write_page_entry(page_entry, _set_page(address, address_ext, true));
  519. return 0;
  520. }
  521. int dc_mmu_map_memory(dc_mmu_pt mmu, u64 physical, u32 page_count,
  522. u32 *address, bool continuous, bool security)
  523. {
  524. u32 virutal_address, i= 0;
  525. u32 mtlb_num, mtlb_entry, mtlb_offset;
  526. bool allocated = false;
  527. int ret = 0;
  528. ret = dc_mmu_allocate_pages(mmu, page_count, &virutal_address);
  529. if (ret)
  530. goto OnError;
  531. *address = virutal_address;
  532. allocated = true;
  533. /*Fill mtlb security bit*/
  534. mtlb_num = _mtlb_offset(virutal_address + page_count * MMU_PAGE_4K_SIZE - 1) -
  535. _mtlb_offset(virutal_address) + 1;
  536. mtlb_offset = _mtlb_offset(virutal_address);
  537. mtlb_entry = mmu->mtlb_logical[mtlb_offset];
  538. for (i = 0; i < mtlb_num ; i++) {
  539. mtlb_entry = mmu->mtlb_logical[mtlb_offset + i];
  540. if(security) {
  541. mtlb_entry = mtlb_entry
  542. | MMU_MTLB_SECURITY
  543. | MMU_MTLB_EXCEPTION;
  544. _write_page_entry(&mmu->mtlb_logical[mtlb_offset + i], mtlb_entry);
  545. } else {
  546. mtlb_entry = mtlb_entry & (~MMU_MTLB_SECURITY);
  547. _write_page_entry(&mmu->mtlb_logical[mtlb_offset + i], mtlb_entry);
  548. }
  549. }
  550. /* Fill in page table */
  551. for (i = 0; i < page_count; i++) {
  552. u64 page_phy;
  553. u32 *page_entry;
  554. struct page **pages;
  555. if (continuous == true) {
  556. page_phy = physical + i * MMU_PAGE_4K_SIZE;
  557. }
  558. else {
  559. pages = (struct page **)physical;
  560. page_phy = page_to_phys(pages[i]);
  561. }
  562. ret = dc_mmu_get_page_entry(mmu, virutal_address, &page_entry);
  563. if (ret)
  564. goto OnError;
  565. /* Write the page address to the page entry */
  566. ret = dc_mmu_set_page(mmu, page_phy, page_entry);
  567. if (ret)
  568. goto OnError;
  569. /* Get next page */
  570. virutal_address += MMU_PAGE_4K_SIZE;
  571. }
  572. return 0;
  573. OnError:
  574. if (allocated)
  575. dc_mmu_free_pages(mmu, virutal_address, page_count);
  576. pr_info("%s fail!\n", __FUNCTION__);
  577. return ret;
  578. }
  579. int dc_mmu_unmap_memory(dc_mmu_pt mmu, u32 gpu_address, u32 page_count)
  580. {
  581. return dc_mmu_free_pages(mmu, gpu_address, page_count);
  582. }