de_heap_noncoherent.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496
  1. /*
  2. * de_heap_noncoherent.c
  3. */
  4. #include <linux/version.h>
  5. #include <linux/dma-buf.h>
  6. #include <linux/err.h>
  7. #include <linux/types.h>
  8. #include <linux/module.h>
  9. #include <linux/slab.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/vmalloc.h>
  12. #ifdef CONFIG_X86
  13. #if LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)
  14. #include <asm/cacheflush.h>
  15. #else
  16. #include <asm/set_memory.h>
  17. #endif
  18. #endif /* CONFIG_X86 */
  19. #include "de_heap.h"
  20. #define MEMORY_ALLOCATION_FLAGS (GFP_DMA32 | __GFP_ZERO)
  21. enum mem_cache_type {
  22. MEM_TYPE_CACHED = 1,
  23. MEM_TYPE_UNCACHED = 2,
  24. MEM_TYPE_WRITECOMBINE = 3,
  25. };
  26. static unsigned int cache_type = MEM_TYPE_WRITECOMBINE;
  27. module_param(cache_type, uint, 0444);
  28. MODULE_PARM_DESC(cache_type,
  29. "Memory cache type: 1-cached, 2-uncached, 3-writecombine");
  30. struct buffer {
  31. size_t size;
  32. void *vaddr;
  33. struct sg_table *sg_table;
  34. enum dma_data_direction dma_dir;
  35. struct device* client;
  36. int fd; /* Just for tracking */
  37. };
  38. /*
  39. * dmabuf ops
  40. */
  41. static void de_noncoherent_kunmap(struct dma_buf *buf, unsigned long page,
  42. void *vaddr);
  43. static void de_noncoherent_release(struct dma_buf *buf)
  44. {
  45. struct buffer *buffer = buf->priv;
  46. struct scatterlist *sgl;
  47. pr_info("%s fd:%d\n", __func__, buffer->fd);
  48. if (unlikely(buffer->vaddr))
  49. de_noncoherent_kunmap(buf, 0, buffer->vaddr);
  50. sgl = buffer->sg_table->sgl;
  51. while (sgl) {
  52. struct page *page = sg_page(sgl);
  53. if (page) {
  54. #ifdef CONFIG_X86
  55. set_memory_wb((unsigned long)page_address(page), 1);
  56. #endif
  57. __free_page(page);
  58. }
  59. sgl = sg_next(sgl);
  60. }
  61. sg_free_table(buffer->sg_table);
  62. kfree(buffer->sg_table);
  63. kfree(buffer);
  64. }
  65. #if LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)
  66. static void *de_noncoherent_kmap_atomic(struct dma_buf *buf, unsigned long page)
  67. {
  68. pr_debug("%s\n", __func__);
  69. return NULL;
  70. }
  71. #endif
  72. static struct sg_table *de_noncoherent_map_dma(struct dma_buf_attachment *attach,
  73. enum dma_data_direction dir)
  74. {
  75. struct buffer *buffer = attach->dmabuf->priv;
  76. struct scatterlist *sgl = buffer->sg_table->sgl;
  77. pr_info("%s\n", __func__);
  78. if (buffer->client) {
  79. pr_err("%s client already attached!\n", __func__);
  80. return NULL;
  81. }
  82. /* We are only checking if buffer is mapable */
  83. while (sgl) {
  84. struct page *page = sg_page(sgl);
  85. dma_addr_t dma_addr;
  86. pr_debug("%s:%d phys %#llx length %d\n",
  87. __func__, __LINE__,
  88. (unsigned long long)sg_phys(sgl), sgl->length);
  89. if(!page)
  90. WARN_ONCE(1, "Page does not exist!");
  91. dma_addr = dma_map_page(attach->dev, page, 0, PAGE_SIZE,
  92. DMA_BIDIRECTIONAL);
  93. if (dma_mapping_error(attach->dev, dma_addr)) {
  94. pr_err("%s dma_map_page failed!\n", __func__);
  95. return NULL;;
  96. }
  97. dma_unmap_page(attach->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
  98. #ifdef CONFIG_X86
  99. {
  100. if (cache_type == MEM_TYPE_CACHED)
  101. set_memory_wb((unsigned long)page_address(page), 1);
  102. else if (cache_type == MEM_TYPE_WRITECOMBINE)
  103. set_memory_wc((unsigned long)page_address(page), 1);
  104. else if (cache_type == MEM_TYPE_UNCACHED)
  105. set_memory_uc((unsigned long)page_address(page), 1);
  106. }
  107. #endif
  108. sgl = sg_next(sgl);
  109. }
  110. buffer->client = attach->dev;
  111. return buffer->sg_table;
  112. }
  113. static void de_noncoherent_unmap_dma(struct dma_buf_attachment *attach,
  114. struct sg_table *sgt,
  115. enum dma_data_direction dir)
  116. {
  117. struct buffer *buffer = attach->dmabuf->priv;
  118. pr_info("%s\n", __func__);
  119. buffer->client = NULL;
  120. }
  121. static int de_noncoherent_begin_cpu_access(struct dma_buf *dmabuf,
  122. #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
  123. size_t start, size_t len,
  124. #endif
  125. enum dma_data_direction direction)
  126. {
  127. struct buffer *buffer = dmabuf->priv;
  128. struct sg_table *sgt = buffer->sg_table;
  129. int ret;
  130. pr_info("%s\n", __func__);
  131. if (!buffer->client) {
  132. pr_err("%s client is NULL\n", __func__);
  133. return -EFAULT;
  134. }
  135. if (buffer->dma_dir == DMA_NONE) {
  136. ret = dma_map_sg(buffer->client, sgt->sgl, sgt->orig_nents,
  137. direction);
  138. if (ret <= 0) {
  139. pr_err("%s dma_map_sg failed!\n", __func__);
  140. return -EFAULT;
  141. }
  142. sgt->nents = ret;
  143. buffer->dma_dir = direction;
  144. }
  145. if (buffer->dma_dir == DMA_FROM_DEVICE)
  146. dma_sync_sg_for_cpu(buffer->client, sgt->sgl, sgt->orig_nents,
  147. DMA_FROM_DEVICE);
  148. return 0;
  149. }
  150. #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
  151. static void de_noncoherent_end_cpu_access(struct dma_buf *dmabuf,
  152. size_t start, size_t len,
  153. enum dma_data_direction direction)
  154. #else
  155. static int de_noncoherent_end_cpu_access(struct dma_buf *dmabuf,
  156. enum dma_data_direction direction)
  157. #endif
  158. {
  159. struct buffer *buffer = dmabuf->priv;
  160. struct sg_table *sgt = buffer->sg_table;
  161. pr_info("%s\n", __func__);
  162. if (!buffer->client) {
  163. pr_err("%s client is NULL\n", __func__);
  164. goto exit;
  165. }
  166. if (buffer->dma_dir == DMA_NONE)
  167. goto exit;
  168. if (buffer->dma_dir == DMA_TO_DEVICE)
  169. dma_sync_sg_for_cpu(buffer->client, sgt->sgl, sgt->orig_nents,
  170. DMA_TO_DEVICE);
  171. dma_unmap_sg(buffer->client, sgt->sgl,
  172. sgt->orig_nents, buffer->dma_dir);
  173. buffer->dma_dir = DMA_NONE;
  174. exit:
  175. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
  176. return 0;
  177. #endif
  178. ;
  179. }
  180. static int de_noncoherent_mmap(struct dma_buf *dmabuf,
  181. struct vm_area_struct *vma)
  182. {
  183. struct buffer *buffer = dmabuf->priv;
  184. struct scatterlist *sgl = buffer->sg_table->sgl;
  185. unsigned long addr;
  186. pr_debug("%s\n", __func__);
  187. /* pgprot_t cached by default */
  188. if (cache_type == MEM_TYPE_WRITECOMBINE)
  189. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  190. else if (cache_type == MEM_TYPE_UNCACHED)
  191. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  192. addr = vma->vm_start;
  193. while (sgl) {
  194. dma_addr_t phys = sg_phys(sgl); /* sg_dma_address ? */
  195. unsigned long pfn = phys >> PAGE_SHIFT;
  196. unsigned int len = sgl->length;
  197. int ret;
  198. ret = remap_pfn_range(vma, addr, pfn, len, vma->vm_page_prot);
  199. if (ret)
  200. return ret;
  201. addr += len;
  202. sgl = sg_next(sgl);
  203. }
  204. return 0;
  205. }
  206. static void *de_noncoherent_kmap(struct dma_buf *dma_buf, unsigned long page)
  207. {
  208. struct buffer *buffer = dma_buf->priv;
  209. struct scatterlist *sgl = buffer->sg_table->sgl;
  210. unsigned int num_pages = sg_nents(sgl);
  211. struct page **pages;
  212. pgprot_t prot;
  213. int i;
  214. pr_debug("%s\n", __func__);
  215. /* NOTE: Ignoring pages param, we have the info info sgt */
  216. if (buffer->vaddr)
  217. return buffer->vaddr;
  218. pages = kmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL);
  219. if (!pages) {
  220. pr_err("%s failed to allocate memory for pages\n", __func__);
  221. return NULL;
  222. }
  223. prot = PAGE_KERNEL;
  224. /* CACHED by default */
  225. if (cache_type == MEM_TYPE_WRITECOMBINE)
  226. prot = pgprot_writecombine(prot);
  227. else if (cache_type == MEM_TYPE_UNCACHED)
  228. prot = pgprot_noncached(prot);
  229. i = 0;
  230. while (sgl) {
  231. pages[i++] = sg_page(sgl);
  232. sgl = sg_next(sgl);
  233. }
  234. buffer->vaddr = vmap(pages, num_pages, VM_MAP, prot);
  235. kfree(pages);
  236. return buffer->vaddr;
  237. }
  238. static void de_noncoherent_kunmap(struct dma_buf *buf, unsigned long page,
  239. void *vaddr)
  240. {
  241. struct buffer *buffer = buf->priv;
  242. pr_debug("%s\n", __func__);
  243. if (buffer->vaddr != vaddr || !buffer->vaddr) {
  244. pr_warn("%s called with wrong address %p != %p\n",
  245. __func__, vaddr, buffer->vaddr);
  246. return;
  247. }
  248. vunmap(buffer->vaddr);
  249. buffer->vaddr = NULL;
  250. }
  251. static void *de_noncoherent_vmap(struct dma_buf *buf)
  252. {
  253. return de_noncoherent_kmap(buf, 0);
  254. }
  255. static void de_noncoherent_vunmap(struct dma_buf *buf, void *kptr)
  256. {
  257. de_noncoherent_kunmap(buf, 0, kptr);
  258. }
  259. static const struct dma_buf_ops dmabuf_ops = {
  260. .attach = NULL, /* optional */
  261. .detach = NULL, /* optional */
  262. .map_dma_buf = de_noncoherent_map_dma,
  263. .unmap_dma_buf = de_noncoherent_unmap_dma,
  264. .release = de_noncoherent_release,
  265. .begin_cpu_access = de_noncoherent_begin_cpu_access,
  266. .end_cpu_access = de_noncoherent_end_cpu_access,
  267. #if LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)
  268. .kmap_atomic = de_noncoherent_kmap_atomic,
  269. .kunmap_atomic = NULL, /* optional */
  270. .kmap = de_noncoherent_kmap,
  271. .kunmap = de_noncoherent_kunmap, /* optional */
  272. #else
  273. #if LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)
  274. .map_atomic = de_noncoherent_kmap_atomic,
  275. .unmap_atomic = NULL, /* optional */
  276. #endif
  277. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
  278. .map = de_noncoherent_kmap,
  279. .unmap = de_noncoherent_kunmap, /* optional */
  280. #endif
  281. #endif
  282. .mmap = de_noncoherent_mmap,
  283. .vmap = de_noncoherent_vmap,
  284. .vunmap = de_noncoherent_vunmap,
  285. };
  286. int de_heap_buffer_create(size_t size, unsigned long align, void **private_data)
  287. {
  288. struct buffer *buffer;
  289. struct dma_buf *dma_buf;
  290. struct scatterlist *sgl;
  291. int ret;
  292. int pages;
  293. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
  294. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  295. #endif
  296. pr_info("%s:noncoherent size %zu\n", __func__, size);
  297. buffer = kzalloc(sizeof(struct buffer), GFP_KERNEL);
  298. if (!buffer) {
  299. pr_err("%s:noncoherent failed to allocate buffer\n", __func__);
  300. return -ENOMEM;
  301. }
  302. buffer->size = size;
  303. buffer->sg_table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
  304. if (!buffer->sg_table) {
  305. pr_err("%s:noncoherent failed to allocate sg_table\n", __func__);
  306. ret = -ENOMEM;
  307. goto sg_table_malloc_failed;
  308. }
  309. pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
  310. ret = sg_alloc_table(buffer->sg_table, pages, GFP_KERNEL);
  311. if (ret) {
  312. pr_err("%s:noncoherent sg_alloc_table failed\n", __func__);
  313. goto sg_alloc_table_failed;
  314. }
  315. sgl = buffer->sg_table->sgl;
  316. while (sgl) {
  317. struct page *page;
  318. page = alloc_page(MEMORY_ALLOCATION_FLAGS);
  319. if (!page) {
  320. pr_err("%s alloc_page failed!\n", __func__);
  321. ret = -ENOMEM;
  322. goto alloc_page_failed;
  323. }
  324. sg_set_page(sgl, page, PAGE_SIZE, 0);
  325. sgl = sg_next(sgl);
  326. }
  327. #if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
  328. dma_buf = dma_buf_export(buffer, &dmabuf_ops, size, O_RDWR);
  329. #elif LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0)
  330. dma_buf = dma_buf_export(buffer, &dmabuf_ops, size, O_RDWR, NULL);
  331. #else
  332. exp_info.ops = &dmabuf_ops;
  333. exp_info.size = size;
  334. exp_info.flags = O_RDWR;
  335. exp_info.priv = buffer;
  336. exp_info.resv = NULL;
  337. dma_buf = dma_buf_export(&exp_info);
  338. #endif
  339. if (IS_ERR(dma_buf)) {
  340. pr_err("%s:noncoherent dma_buf_export failed\n", __func__);
  341. ret = PTR_ERR(dma_buf);
  342. goto dma_buf_export_failed;
  343. }
  344. buffer->dma_dir = DMA_NONE;
  345. dma_buf->priv = buffer;
  346. *private_data = dma_buf;
  347. pr_info("%s:noncoherent size %zu\n",
  348. __func__, size);
  349. return 0;
  350. alloc_page_failed:
  351. sgl = buffer->sg_table->sgl;
  352. while (sgl) {
  353. struct page *page = sg_page(sgl);
  354. if (page) {
  355. #ifdef CONFIG_X86
  356. set_memory_wb((unsigned long)page_address(page), 1);
  357. #endif
  358. __free_page(page);
  359. }
  360. sgl = sg_next(sgl);
  361. }
  362. dma_buf_export_failed:
  363. sg_free_table(buffer->sg_table);
  364. sg_alloc_table_failed:
  365. kfree(buffer->sg_table);
  366. sg_table_malloc_failed:
  367. kfree(buffer);
  368. return ret;
  369. }
  370. int de_heap_export_fd(void *private_data, unsigned long flags)
  371. {
  372. struct dma_buf *dma_buf = private_data;
  373. struct buffer *buffer = dma_buf->priv;
  374. int ret;
  375. pr_debug("%s:noncoherent %p\n", __func__, dma_buf);
  376. get_dma_buf(dma_buf);
  377. buffer->fd = ret = dma_buf_fd(dma_buf, flags);
  378. if (ret < 0) {
  379. pr_err("%s:noncoherent dma_buf_fd failed\n", __func__);
  380. dma_buf_put(dma_buf);
  381. return ret;
  382. }
  383. pr_info("%s:noncoherent export fd %d\n",
  384. __func__, ret);
  385. return ret;
  386. }
  387. void de_heap_buffer_free(void *private_data)
  388. {
  389. struct dma_buf *dma_buf = private_data;
  390. struct buffer *buffer = dma_buf->priv;
  391. pr_info("%s:noncoherent fd:%d\n", __func__, buffer->fd);
  392. dma_buf_put(dma_buf);
  393. }
  394. int de_heap_heap_init(void)
  395. {
  396. pr_info("%s:noncoherent cache_type:%d\n", __func__, cache_type);
  397. return 0;
  398. }
  399. void de_heap_heap_deinit(void)
  400. {
  401. pr_info("%s:noncoherent\n", __func__);
  402. }
  403. /*
  404. * coding style for emacs
  405. *
  406. * Local variables:
  407. * indent-tabs-mode: t
  408. * tab-width: 8
  409. * c-basic-offset: 8
  410. * End:
  411. */