img_mem_unified.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002
  1. /*!
  2. *****************************************************************************
  3. *
  4. * @File img_mem_unified.c
  5. * ---------------------------------------------------------------------------
  6. *
  7. * Copyright (c) Imagination Technologies Ltd.
  8. *
  9. * The contents of this file are subject to the MIT license as set out below.
  10. *
  11. * Permission is hereby granted, free of charge, to any person obtaining a
  12. * copy of this software and associated documentation files (the "Software"),
  13. * to deal in the Software without restriction, including without limitation
  14. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  15. * and/or sell copies of the Software, and to permit persons to whom the
  16. * Software is furnished to do so, subject to the following conditions:
  17. *
  18. * The above copyright notice and this permission notice shall be included in
  19. * all copies or substantial portions of the Software.
  20. *
  21. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  22. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  23. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  24. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  25. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  26. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  27. * THE SOFTWARE.
  28. *
  29. * Alternatively, the contents of this file may be used under the terms of the
  30. * GNU General Public License Version 2 ("GPL")in which case the provisions of
  31. * GPL are applicable instead of those above.
  32. *
  33. * If you wish to allow use of your version of this file only under the terms
  34. * of GPL, and not to allow others to use your version of this file under the
  35. * terms of the MIT license, indicate your decision by deleting the provisions
  36. * above and replace them with the notice and other provisions required by GPL
  37. * as set out in the file called "GPLHEADER" included in this distribution. If
  38. * you do not delete the provisions above, a recipient may use your version of
  39. * this file under the terms of either the MIT license or GPL.
  40. *
  41. * This License is also included in this distribution in the file called
  42. * "MIT_COPYING".
  43. *
  44. *****************************************************************************/
  45. #include <linux/module.h>
  46. #include <linux/mm.h>
  47. #include <linux/slab.h>
  48. #include <linux/scatterlist.h>
  49. #include <linux/gfp.h>
  50. #include <linux/vmalloc.h>
  51. #include <linux/dma-mapping.h>
  52. #include <linux/dma-buf.h>
  53. #include <linux/version.h>
  54. #ifdef CONFIG_X86
  55. #include <asm/cacheflush.h>
  56. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
  57. #include <linux/set_memory.h>
  58. #endif
  59. #endif
  60. #include <img_mem_man.h>
  61. #include "img_mem_man_priv.h"
  62. static int trace_physical_pages;
  63. static int trace_mmap_fault;
  64. struct buffer_data {
  65. struct sg_table *sgt;
  66. enum img_mem_attr mattr; /* memory attributes */
  67. enum dma_data_direction dma_dir;
  68. struct vm_area_struct *mapped_vma;
  69. /* exporter via dmabuf */
  70. struct dma_buf *dma_buf;
  71. bool exported;
  72. };
  73. static void set_page_cache(struct page *page,
  74. enum img_mem_attr attr)
  75. {
  76. #ifdef CONFIG_X86
  77. if (attr & IMG_MEM_ATTR_UNCACHED)
  78. set_memory_uc((unsigned long)page_address(page), 1);
  79. else if (attr & IMG_MEM_ATTR_WRITECOMBINE)
  80. set_memory_wc((unsigned long)page_address(page), 1);
  81. else if (attr & IMG_MEM_ATTR_CACHED)
  82. set_memory_wb((unsigned long)page_address(page), 1);
  83. #endif
  84. }
  85. /*
  86. * dmabuf wrapper ops
  87. */
  88. static struct sg_table *unified_map_dmabuf(struct dma_buf_attachment *attach,
  89. enum dma_data_direction dir)
  90. {
  91. struct buffer *buffer = attach->dmabuf->priv;
  92. struct buffer_data *buffer_data;
  93. struct sg_table *sgt;
  94. struct scatterlist *src, *dst;
  95. int ret, i;
  96. if (!buffer)
  97. return NULL;
  98. pr_debug("%s:%d client:%p buffer %d (0x%p)\n", __func__, __LINE__,
  99. attach->dev, buffer->id, buffer);
  100. buffer_data = buffer->priv;
  101. /* Copy sgt so that we make an independent mapping */
  102. sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
  103. if (sgt == NULL)
  104. return NULL;
  105. ret = sg_alloc_table(sgt, buffer_data->sgt->orig_nents, GFP_KERNEL);
  106. if (ret)
  107. goto err_free;
  108. src = buffer_data->sgt->sgl;
  109. dst = sgt->sgl;
  110. for (i = 0; i < buffer_data->sgt->orig_nents; ++i) {
  111. sg_set_page(dst, sg_page(src), src->length, src->offset);
  112. dst = sg_next(dst);
  113. src = sg_next(src);
  114. }
  115. ret = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
  116. if (ret <= 0) {
  117. pr_err("%s dma_map_sg failed!\n", __func__);
  118. goto err_free_sgt;
  119. }
  120. sgt->nents = ret;
  121. return sgt;
  122. err_free_sgt:
  123. sg_free_table(sgt);
  124. err_free:
  125. kfree(sgt);
  126. return NULL;
  127. }
  128. static void unified_unmap_dmabuf(struct dma_buf_attachment *attach,
  129. struct sg_table *sgt,
  130. enum dma_data_direction dir)
  131. {
  132. struct buffer *buffer = attach->dmabuf->priv;
  133. pr_debug("%s:%d client:%p buffer %d (0x%p)\n", __func__, __LINE__,
  134. attach->dev, buffer ? buffer->id : -1, buffer);
  135. dma_unmap_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
  136. sg_free_table(sgt);
  137. kfree(sgt);
  138. }
  139. /* Called when when ref counter reaches zero! */
  140. static void unified_release_dmabuf(struct dma_buf *buf)
  141. {
  142. struct buffer *buffer = buf->priv;
  143. struct buffer_data *buffer_data;
  144. if (!buffer)
  145. return;
  146. buffer_data = buffer->priv;
  147. pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
  148. buffer->id, buffer);
  149. if (!buffer_data)
  150. return;
  151. buffer_data->exported = false;
  152. }
  153. static void unified_dma_map(struct buffer *buffer);
  154. static void unified_dma_unmap(struct buffer *buffer);
  155. static int unified_begin_cpu_access_dmabuf(struct dma_buf *buf,
  156. #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
  157. size_t start, size_t len,
  158. #endif
  159. enum dma_data_direction direction)
  160. {
  161. struct buffer *buffer = buf->priv;
  162. struct buffer_data *buffer_data;
  163. struct sg_table *sgt;
  164. if (!buffer) {
  165. /* Buffer may have been released, exit silently */
  166. return 0;
  167. }
  168. buffer_data = buffer->priv;
  169. pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
  170. buffer->id, buffer);
  171. buffer_data->dma_dir = direction;
  172. unified_dma_map(buffer);
  173. sgt = buffer_data->sgt;
  174. dma_sync_sg_for_cpu(buffer->device, sgt->sgl, sgt->orig_nents,
  175. direction);
  176. return 0;
  177. }
  178. #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
  179. static void unified_end_cpu_access_dmabuf(struct dma_buf *buf,
  180. size_t start, size_t len,
  181. enum dma_data_direction direction)
  182. #else
  183. static int unified_end_cpu_access_dmabuf(struct dma_buf *buf,
  184. enum dma_data_direction direction)
  185. #endif
  186. {
  187. struct buffer *buffer = buf->priv;
  188. struct buffer_data *buffer_data;
  189. struct sg_table *sgt;
  190. if (!buffer) {
  191. /* Buffer may have been released, exit silently */
  192. return 0;
  193. }
  194. buffer_data = buffer->priv;
  195. pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
  196. buffer->id, buffer);
  197. sgt = buffer_data->sgt;
  198. dma_sync_sg_for_device(buffer->device, sgt->sgl, sgt->orig_nents,
  199. direction);
  200. unified_dma_unmap(buffer);
  201. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
  202. return 0;
  203. #endif
  204. ;
  205. }
  206. /* Called on file descriptor mmap */
  207. static int unified_mmap_dmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
  208. {
  209. struct buffer *buffer = buf->priv;
  210. struct buffer_data *buffer_data;
  211. struct scatterlist *sgl;
  212. unsigned long addr;
  213. if (!buffer)
  214. return -EINVAL;
  215. buffer_data = buffer->priv;
  216. pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
  217. buffer->id, buffer);
  218. pr_debug("%s:%d vm_start %#lx vm_end %#lx size %#lx\n",
  219. __func__, __LINE__,
  220. vma->vm_start, vma->vm_end, vma->vm_end - vma->vm_start);
  221. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  222. sgl = buffer_data->sgt->sgl;
  223. addr = vma->vm_start;
  224. while (sgl) {
  225. dma_addr_t phys = sg_phys(sgl);
  226. unsigned long pfn = phys >> PAGE_SHIFT;
  227. unsigned int len = sgl->length;
  228. int ret;
  229. if (vma->vm_end < (addr + len)) {
  230. unsigned long size = vma->vm_end - addr;
  231. pr_debug("%s:%d buffer %d (0x%p) truncating len=%#x to size=%#lx\n",
  232. __func__, __LINE__,
  233. buffer->id, buffer, len, size);
  234. WARN(round_up(size, PAGE_SIZE) != size,
  235. "VMA size %#lx not page aligned\n", size);
  236. len = size;
  237. if (!len) /* VM space is smaller than allocation */
  238. break;
  239. }
  240. ret = remap_pfn_range(vma, addr, pfn, len, vma->vm_page_prot);
  241. if (ret)
  242. return ret;
  243. addr += len;
  244. sgl = sg_next(sgl);
  245. }
  246. return 0;
  247. }
  248. #if LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)
  249. static void *unified_kmap_dmabuf(struct dma_buf *buf, unsigned long page)
  250. {
  251. pr_err("%s not supported\n", __func__);
  252. return NULL;
  253. }
  254. #endif
  255. static int unified_map_km(struct heap *heap, struct buffer *buffer);
  256. static int unified_unmap_km(struct heap *heap, struct buffer *buffer);
  257. static void *unified_vmap_dmabuf(struct dma_buf *buf)
  258. {
  259. struct buffer *buffer = buf->priv;
  260. struct heap *heap;
  261. if (!buffer)
  262. return NULL;
  263. heap = buffer->heap;
  264. if (unified_map_km(heap, buffer))
  265. return NULL;
  266. pr_debug("%s:%d buffer %d kptr 0x%p\n", __func__, __LINE__,
  267. buffer->id, buffer->kptr);
  268. return buffer->kptr;
  269. }
  270. static void unified_vunmap_dmabuf(struct dma_buf *buf, void *kptr)
  271. {
  272. struct buffer *buffer = buf->priv;
  273. struct heap *heap;
  274. if (!buffer)
  275. return;
  276. heap = buffer->heap;
  277. pr_debug("%s:%d buffer %d kptr 0x%p (0x%p)\n", __func__, __LINE__,
  278. buffer->id, buffer->kptr, kptr);
  279. if (buffer->kptr == kptr)
  280. unified_unmap_km(heap, buffer);
  281. }
  282. static const struct dma_buf_ops unified_dmabuf_ops = {
  283. .map_dma_buf = unified_map_dmabuf,
  284. .unmap_dma_buf = unified_unmap_dmabuf,
  285. .release = unified_release_dmabuf,
  286. .begin_cpu_access = unified_begin_cpu_access_dmabuf,
  287. .end_cpu_access = unified_end_cpu_access_dmabuf,
  288. .mmap = unified_mmap_dmabuf,
  289. #if LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)
  290. .kmap_atomic = unified_kmap_dmabuf,
  291. .kmap = unified_kmap_dmabuf,
  292. #else
  293. #if LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)
  294. .map_atomic = unified_kmap_dmabuf,
  295. .map = unified_kmap_dmabuf,
  296. #endif
  297. #endif
  298. .vmap = unified_vmap_dmabuf,
  299. .vunmap = unified_vunmap_dmabuf,
  300. };
  301. static int unified_export(struct device *device, struct heap *heap,
  302. size_t size, enum img_mem_attr attr,
  303. struct buffer *buffer, uint64_t* buf_hnd)
  304. {
  305. struct buffer_data *buffer_data = buffer->priv;
  306. struct dma_buf *dma_buf;
  307. int ret, fd;
  308. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
  309. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  310. #endif
  311. pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
  312. buffer->id, buffer);
  313. if (!buffer_data)
  314. /* Nothing to export ? */
  315. return -ENOMEM;
  316. if (buffer_data->exported) {
  317. pr_err("%s: already exported!\n", __func__);
  318. return -EBUSY;
  319. }
  320. #if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
  321. dma_buf = dma_buf_export(buffer_data, &unified_dmabuf_ops,
  322. size, O_RDWR);
  323. #elif LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0)
  324. dma_buf = dma_buf_export(buffer_data, &unified_dmabuf_ops,
  325. size, O_RDWR, NULL);
  326. #else
  327. exp_info.ops = &unified_dmabuf_ops;
  328. exp_info.size = size;
  329. exp_info.flags = O_RDWR;
  330. exp_info.priv = buffer;
  331. exp_info.resv = NULL;
  332. dma_buf = dma_buf_export(&exp_info);
  333. #endif
  334. if (IS_ERR(dma_buf)) {
  335. pr_err("%s:dma_buf_export failed\n", __func__);
  336. ret = PTR_ERR(dma_buf);
  337. return ret;
  338. }
  339. get_dma_buf(dma_buf);
  340. fd = dma_buf_fd(dma_buf, 0);
  341. if (fd < 0) {
  342. pr_err("%s: dma_buf_fd failed\n", __func__);
  343. dma_buf_put(dma_buf);
  344. return -EFAULT;
  345. }
  346. buffer_data->dma_buf = dma_buf;
  347. buffer_data->exported = true;
  348. *buf_hnd = (uint64_t)fd;
  349. return 0;
  350. }
  351. static int unified_alloc(struct device *device, struct heap *heap,
  352. size_t size, enum img_mem_attr attr,
  353. struct buffer *buffer)
  354. {
  355. struct buffer_data *buffer_data;
  356. struct sg_table *sgt;
  357. struct scatterlist *sgl;
  358. struct page *page, *tmp_page;
  359. struct list_head pages_list;
  360. int pages = 0;
  361. int ret;
  362. int min_order = heap->options.unified.min_order;
  363. int max_order = heap->options.unified.max_order;
  364. if (min_order == 0)
  365. min_order = IMG_MIN_ALLOC_ORDER_DEFAULT;
  366. if (max_order == 0)
  367. max_order = IMG_MAX_ALLOC_ORDER_DEFAULT;
  368. pr_debug("%s:%d buffer %d (0x%p) size:%zu attr:%x\n", __func__, __LINE__,
  369. buffer->id, buffer, size, attr);
  370. /* Allocations for MMU pages are still 4k so CPU page size is enough */
  371. if (attr & IMG_MEM_ATTR_MMU)
  372. min_order = get_order(size);
  373. if (min_order > max_order) {
  374. pr_err("min_alloc_order > max_alloc_order !\n");
  375. return -EINVAL;
  376. }
  377. INIT_LIST_HEAD(&pages_list);
  378. while((long)size > 0) {
  379. int order;
  380. page = NULL;
  381. /* Fit the buffer size starting from the biggest order.
  382. When system already run out of chunks with specific order,
  383. try with lowest available with min_order constraint */
  384. for (order = max_order; order >= min_order; order--) {
  385. int page_order;
  386. /* Try to allocate min_order size */
  387. if (size < (PAGE_SIZE << order) && (order > min_order))
  388. continue;
  389. page = alloc_pages(heap->options.unified.gfp_type |
  390. __GFP_COMP | __GFP_NOWARN, order);
  391. if (!page)
  392. continue;
  393. page_order = compound_order(page);
  394. if (trace_physical_pages)
  395. pr_info("%s:%d phys %#llx size %lu page_address %p order:%d\n",
  396. __func__, __LINE__,
  397. (unsigned long long)page_to_phys(page),
  398. PAGE_SIZE << page_order, page_address(page), page_order);
  399. /* The below code is just a sanity check
  400. * that dma streaming api is going to work with this device */
  401. if (!(attr & IMG_MEM_ATTR_UNCACHED)) {
  402. /*
  403. * dma_map_page() is probably going to fail if
  404. * alloc flags are GFP_HIGHMEM, since it is not
  405. * mapped to CPU. Hopefully, this will never happen
  406. * because memory of this sort cannot be used
  407. * for DMA anyway. To check if this is the case,
  408. * build with debug, set trace_physical_pages=1
  409. * and check if page_address printed above is NULL
  410. */
  411. dma_addr_t dma_addr = dma_map_page(device,
  412. page, 0, PAGE_SIZE << page_order, DMA_BIDIRECTIONAL);
  413. if (dma_mapping_error(device, dma_addr)) {
  414. __free_page(page);
  415. pr_err("%s dma_map_page failed!\n", __func__);
  416. ret = -EIO;
  417. goto alloc_pages_failed;
  418. }
  419. dma_unmap_page(device, dma_addr,
  420. PAGE_SIZE, DMA_BIDIRECTIONAL);
  421. }
  422. /* Record the max order taking the info
  423. * from the page we have just found */
  424. max_order = page_order;
  425. break;
  426. }
  427. if (!page) {
  428. pr_err("%s alloc_pages failed!\n", __func__);
  429. ret = -ENOMEM;
  430. goto alloc_pages_failed;
  431. }
  432. size -= PAGE_SIZE << max_order;
  433. /* Split pages back to order 0 ->
  434. * this is required to properly map into UM */
  435. if (max_order) {
  436. struct page *end = page + (1 << max_order);
  437. split_page(page, max_order);
  438. while (page < end) {
  439. list_add_tail(&page->lru, &pages_list);
  440. pages++;
  441. /* There should not by any mapping attached to the page at this point,
  442. * but clear it just for sanity.
  443. * This is workaround for kernel 4.15 & "splited" pages. */
  444. page->mapping = NULL;
  445. page++;
  446. }
  447. } else {
  448. list_add_tail(&page->lru, &pages_list);
  449. pages++;
  450. }
  451. }
  452. sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
  453. if (!sgt) {
  454. ret = -ENOMEM;
  455. goto alloc_pages_failed;
  456. }
  457. ret = sg_alloc_table(sgt, pages, GFP_KERNEL);
  458. if (ret)
  459. goto sg_alloc_table_failed;
  460. sgl = sgt->sgl;
  461. list_for_each_entry_safe(page, tmp_page, &pages_list, lru) {
  462. sg_set_page(sgl, page, PAGE_SIZE, 0);
  463. set_page_cache(page, attr);
  464. sgl = sg_next(sgl);
  465. list_del(&page->lru);
  466. }
  467. pr_debug("%s:%d buffer %d orig_nents %d\n", __func__, __LINE__,
  468. buffer->id, sgt->orig_nents);
  469. buffer_data = kzalloc(sizeof(struct buffer_data), GFP_KERNEL);
  470. if (!buffer_data) {
  471. ret = -ENOMEM;
  472. goto alloc_buffer_data_failed;
  473. }
  474. buffer->priv = buffer_data;
  475. buffer_data->sgt = sgt;
  476. buffer_data->mattr = attr;
  477. buffer_data->dma_dir = DMA_NONE;
  478. buffer_data->mapped_vma = NULL;
  479. return 0;
  480. alloc_buffer_data_failed:
  481. sg_free_table(sgt);
  482. sg_alloc_table_failed:
  483. kfree(sgt);
  484. alloc_pages_failed:
  485. list_for_each_entry_safe(page, tmp_page, &pages_list, lru) {
  486. set_page_cache(page, IMG_MEM_ATTR_CACHED);
  487. __free_page(page);
  488. }
  489. return ret;
  490. }
  491. static void unified_dma_map(struct buffer *buffer)
  492. {
  493. struct buffer_data *buffer_data = buffer->priv;
  494. struct sg_table *sgt = buffer_data->sgt;
  495. int ret = 0;
  496. if (buffer_data->dma_dir == DMA_NONE)
  497. buffer_data->dma_dir = DMA_BIDIRECTIONAL;
  498. ret = dma_map_sg(buffer->device, sgt->sgl, sgt->orig_nents,
  499. buffer_data->dma_dir);
  500. if (ret <= 0) {
  501. pr_err("%s dma_map_sg failed!\n", __func__);
  502. buffer_data->dma_dir = DMA_NONE;
  503. return;
  504. }
  505. pr_debug("%s:%d buffer %d orig_nents %d nents %d\n", __func__, __LINE__,
  506. buffer->id, sgt->orig_nents, ret);
  507. sgt->nents = ret;
  508. }
  509. static void unified_dma_unmap(struct buffer *buffer)
  510. {
  511. struct buffer_data *buffer_data = buffer->priv;
  512. struct sg_table *sgt = buffer_data->sgt;
  513. if (buffer_data->dma_dir == DMA_NONE)
  514. return;
  515. dma_unmap_sg(buffer->device, sgt->sgl,
  516. sgt->orig_nents, buffer_data->dma_dir);
  517. buffer_data->dma_dir = DMA_NONE;
  518. pr_debug("%s:%d buffer %d orig_nents %d\n", __func__, __LINE__,
  519. buffer->id, sgt->orig_nents);
  520. }
  521. static void unified_free(struct heap *heap, struct buffer *buffer)
  522. {
  523. struct buffer_data *buffer_data = buffer->priv;
  524. struct sg_table *sgt = buffer_data->sgt;
  525. struct scatterlist *sgl;
  526. pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
  527. buffer->id, buffer);
  528. /* If user forgot to unmap, free dma mapping anyway */
  529. unified_dma_unmap(buffer);
  530. if (buffer_data->dma_buf) {
  531. dma_buf_put(buffer_data->dma_buf);
  532. buffer_data->dma_buf->priv = NULL;
  533. }
  534. if (buffer->kptr) {
  535. pr_debug("%s vunmap 0x%p\n", __func__, buffer->kptr);
  536. vunmap(buffer->kptr);
  537. }
  538. if (buffer_data->mapped_vma)
  539. buffer_data->mapped_vma->vm_private_data = NULL;
  540. sgl = sgt->sgl;
  541. while (sgl) {
  542. struct page *page = sg_page(sgl);
  543. if (page) {
  544. set_page_cache(page, IMG_MEM_ATTR_CACHED);
  545. __free_page(page);
  546. }
  547. sgl = sg_next(sgl);
  548. }
  549. sg_free_table(sgt);
  550. kfree(sgt);
  551. kfree(buffer_data);
  552. }
  553. static void unified_mmap_open(struct vm_area_struct *vma)
  554. {
  555. struct buffer *buffer = vma->vm_private_data;
  556. struct buffer_data *buffer_data = buffer->priv;
  557. struct sg_table *sgt = buffer_data->sgt;
  558. buffer_data->mapped_vma = vma;
  559. pr_debug("%s:%d buffer %d (0x%p) vma:%p\n",
  560. __func__, __LINE__, buffer->id, buffer, vma);
  561. if (!(buffer_data->mattr & IMG_MEM_ATTR_UNCACHED)) {
  562. if (vma->vm_flags & VM_WRITE)
  563. buffer_data->dma_dir = DMA_TO_DEVICE;
  564. else
  565. buffer_data->dma_dir = DMA_FROM_DEVICE;
  566. unified_dma_map(buffer);
  567. /* User will read the buffer so invalidate D-cache */
  568. if (buffer_data->dma_dir == DMA_FROM_DEVICE)
  569. dma_sync_sg_for_cpu(buffer->device,
  570. sgt->sgl,
  571. sgt->orig_nents,
  572. DMA_FROM_DEVICE);
  573. }
  574. }
  575. static void unified_mmap_close(struct vm_area_struct *vma)
  576. {
  577. struct buffer *buffer = vma->vm_private_data;
  578. struct buffer_data *buffer_data;
  579. struct sg_table *sgt;
  580. if (!buffer)
  581. return;
  582. buffer_data = buffer->priv;
  583. sgt = buffer_data->sgt;
  584. pr_debug("%s:%d buffer %d (0x%p) vma:%p\n",
  585. __func__, __LINE__, buffer->id, buffer, vma);
  586. if (!(buffer_data->mattr & IMG_MEM_ATTR_UNCACHED)) {
  587. /* User may have written to the buffer so flush D-cache */
  588. if (buffer_data->dma_dir == DMA_TO_DEVICE) {
  589. dma_sync_sg_for_device(buffer->device,
  590. sgt->sgl,
  591. sgt->orig_nents,
  592. DMA_TO_DEVICE);
  593. dma_sync_sg_for_cpu(buffer->device,
  594. sgt->sgl,
  595. sgt->orig_nents,
  596. DMA_FROM_DEVICE);
  597. }
  598. unified_dma_unmap(buffer);
  599. }
  600. buffer_data->mapped_vma = NULL;
  601. }
  602. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
  603. static vm_fault_t unified_mmap_fault(struct vm_fault *vmf)
  604. {
  605. struct vm_area_struct *vma = vmf->vma;
  606. #else
  607. static int unified_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  608. {
  609. #endif
  610. struct buffer *buffer = vma->vm_private_data;
  611. struct buffer_data *buffer_data = buffer->priv;
  612. struct sg_table *sgt = buffer_data->sgt;
  613. struct scatterlist *sgl;
  614. struct page *page = NULL;
  615. int err;
  616. unsigned long addr;
  617. if (trace_mmap_fault) {
  618. pr_debug("%s:%d buffer %d (0x%p) vma:%p\n",
  619. __func__, __LINE__, buffer->id, buffer, vma);
  620. pr_debug("%s:%d vm_start %#lx vm_end %#lx total size %ld\n",
  621. __func__, __LINE__,
  622. vma->vm_start, vma->vm_end,
  623. vma->vm_end - vma->vm_start);
  624. }
  625. sgl = sgt->sgl;
  626. addr = vma->vm_start;
  627. while (sgl && addr < vma->vm_end) {
  628. page = sg_page(sgl);
  629. if (!page) {
  630. pr_err("%s:%d no page!\n", __func__, __LINE__);
  631. return VM_FAULT_SIGBUS;
  632. }
  633. if (trace_mmap_fault)
  634. pr_info("%s:%d vmf addr %lx page_address:%p phys:%#llx\n",
  635. __func__, __LINE__, addr, page,
  636. (unsigned long long)page_to_phys(page));
  637. err = vm_insert_page(vma, addr, page);
  638. switch (err) {
  639. case 0:
  640. case -EAGAIN:
  641. case -ERESTARTSYS:
  642. case -EINTR:
  643. case -EBUSY:
  644. break; // passthrough
  645. case -ENOMEM:
  646. return VM_FAULT_OOM;
  647. default:
  648. return VM_FAULT_SIGBUS;
  649. }
  650. addr += sgl->length;
  651. sgl = sg_next(sgl);
  652. }
  653. return VM_FAULT_NOPAGE;
  654. }
  655. /* vma ops->fault handler is used to track user space mappings
  656. * (inspired by other gpu/drm drivers from the kernel source tree)
  657. * to properly call dma_sync_* ops when the mapping is destroyed
  658. * (when user calls unmap syscall).
  659. * vma flags are used to choose a correct dma mapping.
  660. * By default use DMA_BIDIRECTONAL mapping type (kernel space only).
  661. * The above facts allows us to do automatic cache flushing/invalidation.
  662. *
  663. * Examples:
  664. * mmap() -> .open -> invalidate buffer cache
  665. * .. read content from buffer
  666. * unmap() -> .close -> do nothing
  667. *
  668. * mmap() -> .open -> do nothing
  669. * .. write content to buffer
  670. * unmap() -> .close -> flush buffer cache
  671. */
  672. static struct vm_operations_struct unified_mmap_vm_ops = {
  673. .open = unified_mmap_open,
  674. .close = unified_mmap_close,
  675. .fault = unified_mmap_fault,
  676. };
  677. static int unified_map_um(struct heap *heap, struct buffer *buffer,
  678. struct vm_area_struct *vma)
  679. {
  680. struct buffer_data *buffer_data = buffer->priv;
  681. pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
  682. buffer->id, buffer);
  683. pr_debug("%s:%d vm_start %#lx vm_end %#lx size %ld\n",
  684. __func__, __LINE__,
  685. vma->vm_start, vma->vm_end, vma->vm_end - vma->vm_start);
  686. /* Throw a warning when attempting
  687. * to do dma mapping when already exists */
  688. WARN_ON(buffer_data->dma_dir != DMA_NONE);
  689. /* CACHED by default */
  690. if (buffer_data->mattr & IMG_MEM_ATTR_WRITECOMBINE)
  691. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  692. else if (buffer_data->mattr & IMG_MEM_ATTR_UNCACHED)
  693. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  694. vma->vm_ops = &unified_mmap_vm_ops;
  695. vma->vm_flags &= ~VM_PFNMAP;
  696. vma->vm_flags |= VM_MIXEDMAP;
  697. vma->vm_private_data = buffer;
  698. vma->vm_pgoff = 0;
  699. unified_mmap_open(vma);
  700. return 0;
  701. }
  702. static int unified_map_km(struct heap *heap, struct buffer *buffer)
  703. {
  704. struct buffer_data *buffer_data = buffer->priv;
  705. struct sg_table *sgt = buffer_data->sgt;
  706. struct scatterlist *sgl = sgt->sgl;
  707. unsigned int num_pages = sg_nents(sgl);
  708. struct page **pages;
  709. pgprot_t prot;
  710. int i;
  711. pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
  712. buffer->id, buffer);
  713. if (buffer->kptr) {
  714. pr_warn("%s called for already mapped buffer %d\n",
  715. __func__, buffer->id);
  716. return 0;
  717. }
  718. /*
  719. * Use vmalloc to avoid limit with kmalloc
  720. * where max possible allocation is 4MB,
  721. * therefore the limit for the buffer that can be mapped
  722. * 4194304 = number of 4k pages x sizeof(struct page *)
  723. * number of 4k pages = 524288 which represents ~2.1GB.
  724. * */
  725. pages = vmalloc(num_pages * sizeof(struct page *));
  726. if (!pages) {
  727. pr_err("%s failed to allocate memory for pages\n", __func__);
  728. return -ENOMEM;
  729. }
  730. prot = PAGE_KERNEL;
  731. /* CACHED by default */
  732. if (buffer_data->mattr & IMG_MEM_ATTR_WRITECOMBINE)
  733. prot = pgprot_writecombine(prot);
  734. else if (buffer_data->mattr & IMG_MEM_ATTR_UNCACHED)
  735. prot = pgprot_noncached(prot);
  736. /* Make dma mapping before mapping into kernel */
  737. if (!(buffer_data->mattr & IMG_MEM_ATTR_UNCACHED))
  738. unified_dma_map(buffer);
  739. i = 0;
  740. while (sgl) {
  741. pages[i++] = sg_page(sgl);
  742. sgl = sg_next(sgl);
  743. }
  744. buffer->kptr = vmap(pages, num_pages, VM_MAP, prot);
  745. vfree(pages);
  746. if (!buffer->kptr) {
  747. pr_err("%s vmap failed!\n", __func__);
  748. return -EFAULT;
  749. }
  750. pr_debug("%s:%d buffer %d vmap to 0x%p\n", __func__, __LINE__,
  751. buffer->id, buffer->kptr);
  752. return 0;
  753. }
  754. static int unified_unmap_km(struct heap *heap, struct buffer *buffer)
  755. {
  756. pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
  757. buffer->id, buffer);
  758. if (!buffer->kptr) {
  759. pr_warn("%s called for already unmapped buffer %d\n",
  760. __func__, buffer->id);
  761. return -EFAULT;
  762. }
  763. unified_dma_unmap(buffer);
  764. pr_debug("%s vunmap 0x%p\n", __func__, buffer->kptr);
  765. vunmap(buffer->kptr);
  766. buffer->kptr = NULL;
  767. return 0;
  768. }
  769. static int unified_get_sg_table(struct heap *heap, struct buffer *buffer,
  770. struct sg_table **sg_table, bool *use_sg_dma)
  771. {
  772. struct buffer_data *buffer_data = buffer->priv;
  773. if (!buffer_data)
  774. return -EINVAL;
  775. *sg_table = buffer_data->sgt;
  776. *use_sg_dma = false;
  777. return 0;
  778. }
  779. static void unified_sync_cpu_to_dev(struct heap *heap, struct buffer *buffer)
  780. {
  781. struct buffer_data *buffer_data = buffer->priv;
  782. struct sg_table *sgt = buffer_data->sgt;
  783. pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
  784. buffer->id, buffer);
  785. if (!(buffer_data->mattr & IMG_MEM_ATTR_UNCACHED) &&
  786. buffer_data->dma_dir != DMA_NONE) {
  787. dma_sync_sg_for_device(buffer->device,
  788. sgt->sgl,
  789. sgt->orig_nents,
  790. DMA_TO_DEVICE);
  791. dma_sync_sg_for_cpu(buffer->device,
  792. sgt->sgl,
  793. sgt->orig_nents,
  794. DMA_FROM_DEVICE);
  795. }
  796. }
  797. static void unified_sync_dev_to_cpu(struct heap *heap, struct buffer *buffer)
  798. {
  799. struct buffer_data *buffer_data = buffer->priv;
  800. struct sg_table *sgt = buffer_data->sgt;
  801. pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
  802. buffer->id, buffer);
  803. if (!(buffer_data->mattr & IMG_MEM_ATTR_UNCACHED) &&
  804. buffer_data->dma_dir != DMA_NONE)
  805. dma_sync_sg_for_cpu(buffer->device,
  806. sgt->sgl,
  807. sgt->orig_nents,
  808. DMA_FROM_DEVICE);
  809. }
  810. static void unified_heap_destroy(struct heap *heap)
  811. {
  812. pr_debug("%s:%d\n", __func__, __LINE__);
  813. }
  814. static struct heap_ops unified_heap_ops = {
  815. .export = unified_export,
  816. .alloc = unified_alloc,
  817. .import = NULL,
  818. .free = unified_free,
  819. .map_um = unified_map_um,
  820. .unmap_um = NULL, /* we are using vma ops to detect unmap event */
  821. .map_km = unified_map_km,
  822. .unmap_km = unified_unmap_km,
  823. .get_sg_table = unified_get_sg_table,
  824. .get_page_array = NULL,
  825. .sync_cpu_to_dev = unified_sync_cpu_to_dev,
  826. .sync_dev_to_cpu = unified_sync_dev_to_cpu,
  827. .set_offset = NULL,
  828. .destroy = unified_heap_destroy,
  829. };
  830. int img_mem_unified_init(const struct heap_config *heap_cfg, struct heap *heap)
  831. {
  832. pr_debug("%s:%d\n", __func__, __LINE__);
  833. heap->ops = &unified_heap_ops;
  834. return 0;
  835. }
  836. /*
  837. * coding style for emacs
  838. *
  839. * Local variables:
  840. * indent-tabs-mode: t
  841. * tab-width: 8
  842. * c-basic-offset: 8
  843. * End:
  844. */