de_heap_coherent.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303
  1. /*
  2. * de_heap_coherent.c
  3. */
  4. #include <linux/version.h>
  5. #include <linux/dma-buf.h>
  6. #include <linux/err.h>
  7. #include <linux/types.h>
  8. #include <linux/module.h>
  9. #include <linux/slab.h>
  10. #include <linux/uaccess.h>
  11. #ifdef CONFIG_X86
  12. #if LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)
  13. #include <asm/cacheflush.h>
  14. #else
  15. #include <asm/set_memory.h>
  16. #endif
  17. #endif /* CONFIG_X86 */
  18. #include "de_heap.h"
  19. #define MEMORY_ALLOCATION_FLAGS (GFP_HIGHUSER | __GFP_ZERO)
  20. struct buffer {
  21. size_t size;
  22. void *vaddr;
  23. struct sg_table *sg_table;
  24. dma_addr_t handle;
  25. };
  26. /*
  27. * dmabuf ops
  28. */
  29. static void de_coherent_release(struct dma_buf *buf)
  30. {
  31. struct buffer *buffer = buf->priv;
  32. pr_info("%s phys address 0x%llx\n",
  33. __func__, (unsigned long long int)buffer->handle);
  34. sg_free_table(buffer->sg_table);
  35. kfree(buffer->sg_table);
  36. #ifdef CONFIG_X86
  37. set_memory_wb((unsigned long)buffer->vaddr,
  38. (buffer->size + PAGE_SIZE - 1) / PAGE_SIZE);
  39. #endif
  40. dma_free_coherent(NULL, buffer->size, buffer->vaddr, buffer->handle);
  41. kfree(buffer);
  42. }
  43. #if LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)
  44. static void *de_coherent_kmap_atomic(struct dma_buf *buf, unsigned long page)
  45. {
  46. pr_debug("%s\n", __func__);
  47. return NULL;
  48. }
  49. #endif
  50. static struct sg_table *de_coherent_map_dma(struct dma_buf_attachment *attach,
  51. enum dma_data_direction dir)
  52. {
  53. struct buffer *buffer = attach->dmabuf->priv;
  54. pr_debug("%s\n", __func__);
  55. return buffer->sg_table;
  56. }
  57. static void de_coherent_unmap_dma(struct dma_buf_attachment *attach,
  58. struct sg_table *sgt,
  59. enum dma_data_direction dir)
  60. {
  61. pr_debug("%s\n", __func__);
  62. }
  63. static int de_coherent_mmap(struct dma_buf *dmabuf,
  64. struct vm_area_struct *vma)
  65. {
  66. struct buffer *buffer = dmabuf->priv;
  67. unsigned long user_count, count, pfn, off;
  68. /*
  69. * we could use dma_mmap_coherent() here, but it hard-codes
  70. * an uncached behaviour and the kernel complains on x86 for
  71. * a double mapping with different semantics (write-combine and
  72. * uncached). Instead, we re-implement here the mapping.
  73. * code copied from dma_common_mmap()
  74. */
  75. pr_debug("%s\n", __func__);
  76. user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
  77. count = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
  78. pfn = page_to_pfn(virt_to_page(buffer->vaddr));
  79. off = vma->vm_pgoff;
  80. if (off >= count || user_count > (count - off))
  81. return ENXIO;
  82. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  83. return remap_pfn_range(vma, vma->vm_start, pfn + off,
  84. user_count << PAGE_SHIFT,
  85. vma->vm_page_prot);
  86. }
  87. static void *de_coherent_kmap(struct dma_buf *dma_buf, unsigned long page)
  88. {
  89. struct buffer *buffer = dma_buf->priv;
  90. pr_debug("%s\n", __func__);
  91. /* kernel memory mapping has been done at allocation time */
  92. return buffer->vaddr;
  93. }
  94. static void de_coherent_kunmap(struct dma_buf *buf, unsigned long page,
  95. void *vaddr)
  96. {
  97. pr_debug("%s\n", __func__);
  98. }
  99. static void *de_coherent_vmap(struct dma_buf *buf)
  100. {
  101. return de_coherent_kmap(buf, 0);
  102. }
  103. static void de_coherent_vunmap(struct dma_buf *buf, void *kptr)
  104. {
  105. de_coherent_kunmap(buf, 0, kptr);
  106. }
  107. static const struct dma_buf_ops dmabuf_ops = {
  108. .attach = NULL, /* optional */
  109. .detach = NULL, /* optional */
  110. .map_dma_buf = de_coherent_map_dma,
  111. .unmap_dma_buf = de_coherent_unmap_dma,
  112. .release = de_coherent_release,
  113. .begin_cpu_access = NULL, /* optional */
  114. .end_cpu_access = NULL, /* optional */
  115. #if LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)
  116. .kmap_atomic = de_coherent_kmap_atomic,
  117. .kunmap_atomic = NULL, /* optional */
  118. .kmap = de_coherent_kmap,
  119. .kunmap = de_coherent_kunmap, /* optional */
  120. #else
  121. #if LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)
  122. .map_atomic = de_coherent_kmap_atomic,
  123. .unmap_atomic = NULL, /* optional */
  124. #endif
  125. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
  126. .map = de_coherent_kmap,
  127. .unmap = de_coherent_kunmap, /* optional */
  128. #endif
  129. #endif
  130. .mmap = de_coherent_mmap,
  131. .vmap = de_coherent_vmap,
  132. .vunmap = de_coherent_vunmap,
  133. };
  134. int de_heap_buffer_create(size_t size, unsigned long align, void **private_data)
  135. {
  136. struct buffer *buffer;
  137. struct dma_buf *dma_buf;
  138. int ret;
  139. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
  140. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  141. #endif
  142. pr_info("%s:coherent size %zu\n", __func__, size);
  143. buffer = kzalloc(sizeof(struct buffer), GFP_KERNEL);
  144. if (!buffer) {
  145. pr_err("%s:coherent failed to allocate buffer\n", __func__);
  146. return -ENOMEM;
  147. }
  148. buffer->size = size;
  149. buffer->vaddr = dma_alloc_coherent(NULL, size, &buffer->handle,
  150. MEMORY_ALLOCATION_FLAGS);
  151. if (!buffer->vaddr) {
  152. pr_err("%s:coherent dma_alloc_coherent failed for size %zu\n",
  153. __func__, size);
  154. ret = -ENOMEM;
  155. goto dma_alloc_coherent_failed;
  156. }
  157. #ifdef CONFIG_X86
  158. set_memory_wc((unsigned long)buffer->vaddr,
  159. (buffer->size + PAGE_SIZE - 1) / PAGE_SIZE);
  160. #endif
  161. buffer->sg_table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
  162. if (!buffer->sg_table) {
  163. pr_err("%s:coherent failed to allocate sg_table\n", __func__);
  164. ret = -ENOMEM;
  165. goto sg_table_malloc_failed;
  166. }
  167. ret = sg_alloc_table(buffer->sg_table, 1, GFP_KERNEL);
  168. if (ret) {
  169. pr_err("%s:coherent sg_alloc_table failed\n", __func__);
  170. goto sg_alloc_table_failed;
  171. }
  172. sg_set_page(buffer->sg_table->sgl, virt_to_page(buffer->vaddr),
  173. PAGE_ALIGN(size), 0);
  174. #if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
  175. dma_buf = dma_buf_export(buffer, &dmabuf_ops, size, O_RDWR);
  176. #elif LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0)
  177. dma_buf = dma_buf_export(buffer, &dmabuf_ops, size, O_RDWR, NULL);
  178. #else
  179. exp_info.ops = &dmabuf_ops;
  180. exp_info.size = size;
  181. exp_info.flags = O_RDWR;
  182. exp_info.priv = buffer;
  183. exp_info.resv = NULL;
  184. dma_buf = dma_buf_export(&exp_info);
  185. #endif
  186. if (IS_ERR(dma_buf)) {
  187. pr_err("%s:coherent dma_buf_export failed\n", __func__);
  188. ret = PTR_ERR(dma_buf);
  189. goto dma_buf_export_failed;
  190. }
  191. dma_buf->priv = buffer;
  192. *private_data = dma_buf;
  193. pr_info("%s:coherent phys address 0x%llx virtual addr %p size %zu\n",
  194. __func__, (unsigned long long int)buffer->handle,
  195. buffer->vaddr, size);
  196. return 0;
  197. dma_buf_export_failed:
  198. sg_free_table(buffer->sg_table);
  199. sg_alloc_table_failed:
  200. kfree(buffer->sg_table);
  201. sg_table_malloc_failed:
  202. #ifdef CONFIG_X86
  203. set_memory_wb((unsigned long)buffer->vaddr,
  204. (buffer->size + PAGE_SIZE - 1) / PAGE_SIZE);
  205. #endif
  206. dma_free_coherent(NULL, size, buffer->vaddr, buffer->handle);
  207. dma_alloc_coherent_failed:
  208. kfree(buffer);
  209. return ret;
  210. }
  211. int de_heap_export_fd(void *private_data, unsigned long flags)
  212. {
  213. struct dma_buf *dma_buf = private_data;
  214. struct buffer *buffer = dma_buf->priv;
  215. int ret;
  216. pr_debug("%s:coherent %p\n", __func__, dma_buf);
  217. get_dma_buf(dma_buf);
  218. ret = dma_buf_fd(dma_buf, flags);
  219. if (ret < 0) {
  220. pr_err("%s:coherent dma_buf_fd failed\n", __func__);
  221. dma_buf_put(dma_buf);
  222. return ret;
  223. }
  224. pr_info("%s:coherent phys address 0x%llx export fd %d\n",
  225. __func__, (unsigned long long int)buffer->handle, ret);
  226. return ret;
  227. }
  228. void de_heap_buffer_free(void *private_data)
  229. {
  230. struct dma_buf *dma_buf = private_data;
  231. struct buffer *buffer = dma_buf->priv;
  232. pr_info("%s:coherent phys address 0x%llx\n",
  233. __func__, (unsigned long long int)buffer->handle);
  234. dma_buf_put(dma_buf);
  235. }
  236. int de_heap_heap_init(void)
  237. {
  238. pr_info("%s:coherent\n", __func__);
  239. return 0;
  240. }
  241. void de_heap_heap_deinit(void)
  242. {
  243. pr_info("%s:coherent\n", __func__);
  244. }
  245. /*
  246. * coding style for emacs
  247. *
  248. * Local variables:
  249. * indent-tabs-mode: t
  250. * tab-width: 8
  251. * c-basic-offset: 8
  252. * End:
  253. */