img_mem_dmabuf.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502
  1. /*!
  2. *****************************************************************************
  3. *
  4. * @File img_mem_dmabuf.c
  5. * ---------------------------------------------------------------------------
  6. *
  7. * Copyright (c) Imagination Technologies Ltd.
  8. *
  9. * The contents of this file are subject to the MIT license as set out below.
  10. *
  11. * Permission is hereby granted, free of charge, to any person obtaining a
  12. * copy of this software and associated documentation files (the "Software"),
  13. * to deal in the Software without restriction, including without limitation
  14. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  15. * and/or sell copies of the Software, and to permit persons to whom the
  16. * Software is furnished to do so, subject to the following conditions:
  17. *
  18. * The above copyright notice and this permission notice shall be included in
  19. * all copies or substantial portions of the Software.
  20. *
  21. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  22. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  23. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  24. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  25. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  26. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  27. * THE SOFTWARE.
  28. *
  29. * Alternatively, the contents of this file may be used under the terms of the
  30. * GNU General Public License Version 2 ("GPL")in which case the provisions of
  31. * GPL are applicable instead of those above.
  32. *
  33. * If you wish to allow use of your version of this file only under the terms
  34. * of GPL, and not to allow others to use your version of this file under the
  35. * terms of the MIT license, indicate your decision by deleting the provisions
  36. * above and replace them with the notice and other provisions required by GPL
  37. * as set out in the file called "GPLHEADER" included in this distribution. If
  38. * you do not delete the provisions above, a recipient may use your version of
  39. * this file under the terms of either the MIT license or GPL.
  40. *
  41. * This License is also included in this distribution in the file called
  42. * "MIT_COPYING".
  43. *
  44. *****************************************************************************/
  45. #include <linux/module.h>
  46. #include <linux/mm.h>
  47. #include <linux/slab.h>
  48. #include <linux/scatterlist.h>
  49. #include <linux/gfp.h>
  50. #include <linux/device.h>
  51. #include <linux/vmalloc.h>
  52. #include <linux/dma-buf.h>
  53. #include <linux/scatterlist.h>
  54. #include <linux/dma-mapping.h>
  55. #include <linux/version.h>
  56. #include <img_mem_man.h>
  57. #include "img_mem_man_priv.h"
  58. /* this condition is actually true for kernels < 4.4.100 */
  59. #ifndef PHYS_PFN
  60. #define PHYS_PFN(x) ((unsigned long)((x) >> PAGE_SHIFT))
  61. #endif
  62. static int trace_physical_pages;
  63. static int trace_mmap_fault;
  64. struct buffer_data {
  65. struct dma_buf *dma_buf;
  66. struct dma_buf_attachment *attach;
  67. struct sg_table *sgt;
  68. enum img_mem_attr mattr; /* memory attributes */
  69. struct vm_area_struct *mapped_vma;
  70. };
  71. static int dmabuf_heap_import(struct device *device, struct heap *heap,
  72. size_t size, enum img_mem_attr attr, uint64_t buf_hnd,
  73. struct buffer *buffer)
  74. {
  75. struct buffer_data *data;
  76. int ret;
  77. int buf_fd = (int)buf_hnd;
  78. pr_debug("%s:%d buffer %d (0x%p) buf_fd %d\n", __func__, __LINE__,
  79. buffer->id, buffer, buf_fd);
  80. data = kmalloc(sizeof(struct buffer_data), GFP_KERNEL);
  81. if (!data)
  82. return -ENOMEM;
  83. data->dma_buf = dma_buf_get(buf_fd);
  84. if (IS_ERR_OR_NULL(data->dma_buf)) {
  85. pr_err("%s dma_buf_get fd %d\n", __func__, buf_fd);
  86. ret = -EINVAL;
  87. goto dma_buf_get_failed;
  88. }
  89. pr_debug("%s:%d buffer %d dma_buf %p\n", __func__, __LINE__,
  90. buffer->id, data->dma_buf);
  91. data->attach = dma_buf_attach(data->dma_buf, device);
  92. if (IS_ERR(data->attach)) {
  93. pr_err("%s dma_buf_attach fd %d\n", __func__, buf_fd);
  94. ret = -EINVAL;
  95. goto dma_buf_attach_failed;
  96. }
  97. data->sgt = dma_buf_map_attachment(data->attach, DMA_BIDIRECTIONAL);
  98. if (IS_ERR(data->sgt)) {
  99. pr_err("%s dma_buf_map_attachment fd %d\n", __func__, buf_fd);
  100. ret = -EINVAL;
  101. goto dma_buf_map_failed;
  102. }
  103. if (trace_physical_pages) {
  104. struct scatterlist *sgl = data->sgt->sgl;
  105. while (sgl) {
  106. pr_info("%s:%d phys %#llx length %d (dma_addr:%#llx len:%d)\n",
  107. __func__, __LINE__,
  108. (unsigned long long)sg_phys(sgl), sgl->length,
  109. sg_dma_address(sgl), sg_dma_len(sgl));
  110. sgl = sg_next(sgl);
  111. }
  112. }
  113. data->mattr = attr;
  114. data->mapped_vma = NULL;
  115. buffer->priv = data;
  116. return 0;
  117. dma_buf_map_failed:
  118. dma_buf_detach(data->dma_buf, data->attach);
  119. dma_buf_attach_failed:
  120. dma_buf_put(data->dma_buf);
  121. dma_buf_get_failed:
  122. kfree(data);
  123. return ret;
  124. }
  125. static void dmabuf_heap_free(struct heap *heap, struct buffer *buffer)
  126. {
  127. struct buffer_data *data = buffer->priv;
  128. pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
  129. buffer->id, buffer);
  130. if (buffer->kptr) {
  131. struct dma_buf *dma_buf = data->dma_buf;
  132. dma_buf_end_cpu_access(dma_buf,
  133. #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
  134. 0 /* start */,
  135. buffer->actual_size,
  136. #endif
  137. DMA_BIDIRECTIONAL);
  138. dma_buf_vunmap(dma_buf, buffer->kptr);
  139. buffer->kptr = NULL;
  140. }
  141. if (data->mapped_vma)
  142. data->mapped_vma->vm_private_data = NULL;
  143. dma_buf_unmap_attachment(data->attach, data->sgt, DMA_BIDIRECTIONAL);
  144. dma_buf_detach(data->dma_buf, data->attach);
  145. dma_buf_put(data->dma_buf);
  146. kfree(data);
  147. }
  148. static void dmabuf_mmap_open(struct vm_area_struct *vma)
  149. {
  150. struct buffer *buffer = vma->vm_private_data;
  151. struct buffer_data *data = buffer->priv;
  152. pr_debug("%s:%d buffer %d (0x%p) vma:%p\n",
  153. __func__, __LINE__, buffer->id, buffer, vma);
  154. if (!(data->mattr & IMG_MEM_ATTR_UNCACHED)) {
  155. enum dma_data_direction dma_dir;
  156. if (vma->vm_flags & VM_WRITE)
  157. dma_dir = DMA_TO_DEVICE;
  158. else
  159. dma_dir = DMA_FROM_DEVICE;
  160. /* User will read the buffer so invalidate D-cache */
  161. dma_buf_begin_cpu_access(data->dma_buf,
  162. #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
  163. 0 /* start */,
  164. buffer->actual_size,
  165. #endif
  166. dma_dir);
  167. }
  168. data->mapped_vma = vma;
  169. }
  170. static void dmabuf_mmap_close(struct vm_area_struct *vma)
  171. {
  172. struct buffer *buffer = vma->vm_private_data;
  173. struct buffer_data *data;
  174. if (!buffer)
  175. return;
  176. data = buffer->priv;
  177. pr_debug("%s:%d buffer %d (0x%p) vma:%p\n",
  178. __func__, __LINE__, buffer->id, buffer, vma);
  179. if (!(data->mattr & IMG_MEM_ATTR_UNCACHED)) {
  180. /* User may have written to the buffer so flush D-cache */
  181. dma_buf_end_cpu_access(data->dma_buf,
  182. #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
  183. 0 /* start */,
  184. buffer->actual_size,
  185. #endif
  186. DMA_TO_DEVICE);
  187. }
  188. data->mapped_vma = NULL;
  189. }
  190. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
  191. static vm_fault_t dmabuf_mmap_fault(struct vm_fault *vmf)
  192. {
  193. struct vm_area_struct *vma = vmf->vma;
  194. #else
  195. static int dmabuf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  196. {
  197. #endif
  198. struct buffer *buffer = vma->vm_private_data;
  199. struct buffer_data *data = buffer->priv;
  200. struct sg_table *sgt = data->sgt;
  201. struct scatterlist *sgl;
  202. pgoff_t curr_offset;
  203. dma_addr_t phys = 0;
  204. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
  205. unsigned long addr = vmf->address;
  206. #else
  207. unsigned long addr = (unsigned long)vmf->virtual_address;
  208. #endif
  209. if (trace_mmap_fault) {
  210. pr_debug("%s:%d buffer %d (0x%p) vma:%p\n",
  211. __func__, __LINE__, buffer->id, buffer, vma);
  212. pr_debug("%s:%d vm_start %#lx vm_end %#lx total size %ld\n",
  213. __func__, __LINE__,
  214. vma->vm_start,
  215. vma->vm_end,
  216. vma->vm_end - vma->vm_start);
  217. }
  218. curr_offset = addr - vma->vm_start;
  219. sgl = sgt->sgl;
  220. while (sgl) {
  221. phys = sg_phys(sgl);
  222. if(curr_offset < sgl->length)
  223. break;
  224. curr_offset -= sgl->length;
  225. sgl = sg_next(sgl);
  226. }
  227. phys += curr_offset; /* set to middle of current block */
  228. if (trace_mmap_fault)
  229. pr_info("%s:%d vmf pgoff:%#lx vmf addr:%lx phys:%#llx\n",
  230. __func__, __LINE__, vmf->pgoff, addr,
  231. (unsigned long long)phys);
  232. {
  233. #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
  234. unsigned long pfn = PHYS_PFN(phys);
  235. #else
  236. pfn_t pfn = {
  237. .val = PHYS_PFN(phys)
  238. };
  239. #endif
  240. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
  241. return vmf_insert_mixed(vma, addr, pfn);
  242. #else
  243. {
  244. int err = vm_insert_mixed(vma, addr, pfn);
  245. switch (err) {
  246. case 0:
  247. case -EAGAIN:
  248. case -ERESTARTSYS:
  249. case -EINTR:
  250. case -EBUSY:
  251. return VM_FAULT_NOPAGE;
  252. case -ENOMEM:
  253. return VM_FAULT_OOM;
  254. }
  255. return VM_FAULT_SIGBUS;
  256. }
  257. #endif
  258. }
  259. }
  260. /* vma ops->fault handler is used to track user space mappings
  261. * (inspired by other gpu/drm drivers from the kernel source tree)
  262. * to properly call dma_sync_* ops when the mapping is destroyed
  263. * (when user calls unmap syscall).
  264. * vma flags are used to choose a correct dma mapping.
  265. * By default use DMA_BIDIRECTONAL mapping type (kernel space only).
  266. * The above facts allows us to do automatic cache flushing/invalidation.
  267. *
  268. * Examples:
  269. * mmap() -> .open -> invalidate buffer cache
  270. * .. read content from buffer
  271. * unmap() -> .close -> do nothing
  272. *
  273. * mmap() -> .open -> do nothing
  274. * .. write content to buffer
  275. * unmap() -> .close -> flush buffer cache
  276. */
  277. static struct vm_operations_struct dmabuf_heap_mmap_vm_ops = {
  278. .open = dmabuf_mmap_open,
  279. .close = dmabuf_mmap_close,
  280. .fault = dmabuf_mmap_fault,
  281. };
  282. static int dmabuf_heap_map_um(struct heap *heap, struct buffer *buffer,
  283. struct vm_area_struct *vma)
  284. {
  285. struct buffer_data *data = buffer->priv;
  286. pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
  287. buffer->id, buffer);
  288. pr_debug("%s:%d vm_start %#lx vm_end %#lx size %ld\n",
  289. __func__, __LINE__,
  290. vma->vm_start, vma->vm_end, vma->vm_end - vma->vm_start);
  291. /* CACHED by default */
  292. if (data->mattr & IMG_MEM_ATTR_WRITECOMBINE)
  293. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  294. else if (data->mattr & IMG_MEM_ATTR_UNCACHED)
  295. WARN_ONCE(1, "Uncached not allowed");
  296. /*vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);*/
  297. vma->vm_ops = &dmabuf_heap_mmap_vm_ops;
  298. vma->vm_flags &= ~VM_PFNMAP;
  299. vma->vm_flags |= VM_MIXEDMAP;
  300. vma->vm_private_data = buffer;
  301. vma->vm_pgoff = 0;
  302. dmabuf_mmap_open(vma);
  303. return 0;
  304. }
  305. static int dmabuf_heap_map_km(struct heap *heap, struct buffer *buffer)
  306. {
  307. struct buffer_data *data = buffer->priv;
  308. struct dma_buf *dma_buf = data->dma_buf;
  309. int ret;
  310. pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
  311. buffer->id, buffer);
  312. if (buffer->kptr) {
  313. pr_warn("%s called for already mapped buffer %d\n",
  314. __func__, buffer->id);
  315. return 0;
  316. }
  317. ret = dma_buf_begin_cpu_access(dma_buf,
  318. #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
  319. 0 /* start */,
  320. buffer->actual_size,
  321. #endif
  322. DMA_BIDIRECTIONAL);
  323. if (ret) {
  324. pr_err("%s begin_cpu_access failed for bufid %d\n", __func__, buffer->id);
  325. return ret;
  326. }
  327. buffer->kptr = dma_buf_vmap(dma_buf);
  328. if (!buffer->kptr) {
  329. pr_err("%s dma_buf_vmap failed!\n", __func__);
  330. return -EFAULT;
  331. }
  332. pr_debug("%s:%d buffer %d vmap to 0x%p\n", __func__, __LINE__,
  333. buffer->id, buffer->kptr);
  334. return 0;
  335. }
  336. static int dmabuf_heap_unmap_km(struct heap *heap, struct buffer *buffer)
  337. {
  338. struct buffer_data *data = buffer->priv;
  339. struct dma_buf *dma_buf = data->dma_buf;
  340. pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
  341. buffer->id, buffer);
  342. if (!buffer->kptr) {
  343. pr_warn("%s called for unmapped buffer %d\n",
  344. __func__, buffer->id);
  345. return 0;
  346. }
  347. dma_buf_end_cpu_access(dma_buf,
  348. #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
  349. 0 /* start */,
  350. buffer->actual_size,
  351. #endif
  352. DMA_BIDIRECTIONAL);
  353. dma_buf_vunmap(dma_buf, buffer->kptr);
  354. pr_debug("%s:%d buffer %d kunmap from 0x%p\n", __func__, __LINE__,
  355. buffer->id, buffer->kptr);
  356. buffer->kptr = NULL;
  357. return 0;
  358. }
  359. static int dmabuf_get_sg_table(struct heap *heap, struct buffer *buffer,
  360. struct sg_table **sg_table, bool *use_sg_dma)
  361. {
  362. struct buffer_data *data = buffer->priv;
  363. *sg_table = data->sgt;
  364. *use_sg_dma = heap->options.dmabuf.use_sg_dma;
  365. return 0;
  366. }
  367. static void dmabuf_sync_cpu_to_dev(struct heap *heap, struct buffer *buffer)
  368. {
  369. struct buffer_data *data = buffer->priv;
  370. struct dma_buf *dma_buf = data->dma_buf;
  371. pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
  372. buffer->id, buffer);
  373. if (!(data->mattr & IMG_MEM_ATTR_UNCACHED)) {
  374. dma_buf_end_cpu_access(dma_buf,
  375. #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
  376. 0 /* start */,
  377. buffer->actual_size,
  378. #endif
  379. DMA_TO_DEVICE);
  380. }
  381. }
  382. static void dmabuf_sync_dev_to_cpu(struct heap *heap, struct buffer *buffer)
  383. {
  384. struct buffer_data *data = buffer->priv;
  385. struct dma_buf *dma_buf = data->dma_buf;
  386. pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
  387. buffer->id, buffer);
  388. if (!(data->mattr & IMG_MEM_ATTR_UNCACHED)) {
  389. dma_buf_begin_cpu_access(dma_buf,
  390. #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
  391. 0 /* start */,
  392. buffer->actual_size,
  393. #endif
  394. DMA_FROM_DEVICE);
  395. }
  396. }
  397. static void dmabuf_heap_destroy(struct heap *heap)
  398. {
  399. pr_debug("%s:%d\n", __func__, __LINE__);
  400. }
  401. static struct heap_ops dmabuf_heap_ops = {
  402. .alloc = NULL,
  403. .import = dmabuf_heap_import,
  404. .free = dmabuf_heap_free,
  405. .map_um = dmabuf_heap_map_um,
  406. .unmap_um = NULL,
  407. .map_km = dmabuf_heap_map_km,
  408. .unmap_km = dmabuf_heap_unmap_km,
  409. .get_sg_table = dmabuf_get_sg_table,
  410. .get_page_array = NULL,
  411. .sync_cpu_to_dev = dmabuf_sync_cpu_to_dev,
  412. .sync_dev_to_cpu = dmabuf_sync_dev_to_cpu,
  413. .set_offset = NULL,
  414. .destroy = dmabuf_heap_destroy,
  415. };
  416. int img_mem_dmabuf_init(const struct heap_config *heap_cfg, struct heap *heap)
  417. {
  418. pr_debug("%s:%d\n", __func__, __LINE__);
  419. heap->ops = &dmabuf_heap_ops;
  420. return 0;
  421. }
  422. /*
  423. * coding style for emacs
  424. *
  425. * Local variables:
  426. * indent-tabs-mode: t
  427. * tab-width: 8
  428. * c-basic-offset: 8
  429. * End:
  430. */