de_heap_carveout.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468
  1. /*
  2. * de_heap_carveout.c
  3. */
  4. #include <linux/version.h>
  5. #include <linux/types.h>
  6. #include <linux/err.h>
  7. #include <linux/vmalloc.h>
  8. #include <linux/genalloc.h>
  9. #include <linux/pci.h>
  10. #include <linux/dma-buf.h>
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. #include <linux/uaccess.h>
  14. #include "de_heap.h"
  15. /*
  16. * module parameters
  17. */
  18. static unsigned int use_pci = 0;
  19. module_param(use_pci, uint, 0444);
  20. MODULE_PARM_DESC(use_pci, "use PCI bar memory (default: false)");
  21. static unsigned int cpu_map = 1;
  22. module_param(cpu_map, uint, 0444);
  23. MODULE_PARM_DESC(cpu_map, "map memory to CPU (default: true)");
  24. /* mandatory carveout parameters (use_pci = 0) */
  25. static unsigned long carveout_base = 0;
  26. module_param(carveout_base, ulong, 0444);
  27. MODULE_PARM_DESC(carveout_base, "physical base address. "
  28. "mandatory when use_pci is false");
  29. static unsigned long carveout_size = 0;
  30. module_param(carveout_size, ulong, 0444);
  31. MODULE_PARM_DESC(carveout_size, "physical size in bytes. "
  32. "mandatory when use_pci is false");
  33. /* mandatory pci parameters (use_pci = 1) */
  34. static unsigned int pci_vendor = 0;
  35. module_param(pci_vendor, uint, 0444);
  36. MODULE_PARM_DESC(pci_vendor, "PCI vendor id. mandatory when use_pci is true");
  37. static unsigned int pci_product = 0;
  38. module_param(pci_product, uint, 0444);
  39. MODULE_PARM_DESC(pci_product, "PCI product id. mandatory when use_pci is true");
  40. static int pci_bar = -1;
  41. module_param(pci_bar, int, 0444);
  42. MODULE_PARM_DESC(pci_bar, "PCI bar index. mandatory when use_pci is true");
  43. /* optional pci parameters (use_pci = 1) */
  44. static unsigned long pci_size = 0;
  45. module_param(pci_size, ulong, 0444);
  46. MODULE_PARM_DESC(pci_size, "physical size in bytes. "
  47. "used when use_pci is true. "
  48. "when 0 (the default), use all memory in the PCI bar");
  49. static unsigned long pci_offset = 0;
  50. module_param(pci_offset, ulong, 0444);
  51. MODULE_PARM_DESC(pci_offset, "offset from PCI bar start. "
  52. "used when use_pci is true. optional (default: 0)");
  53. static bool use_sg_dma = true;
  54. module_param(use_sg_dma, bool, 0444);
  55. MODULE_PARM_DESC(use_sg_dma,
  56. "Sets sg_dma_address/len info");
  57. /*
  58. * internal values
  59. */
  60. static phys_addr_t pool_base;
  61. /* 12 bits (4096 bytes) */
  62. #define POOL_ALLOC_ORDER 12
  63. static struct gen_pool *heap_pool;
  64. static struct pci_dev *pci_device;
  65. struct buffer {
  66. phys_addr_t phys;
  67. size_t size;
  68. struct sg_table *sg_table;
  69. struct dma_buf *dma_buf;
  70. dma_addr_t dma_base;
  71. unsigned int dma_size;
  72. };
  73. /*
  74. * dmabuf ops
  75. */
  76. static struct sg_table *de_carveout_map_dma(struct dma_buf_attachment *attach,
  77. enum dma_data_direction dir)
  78. {
  79. struct buffer *buffer = attach->dmabuf->priv;
  80. pr_debug("%s\n", __func__);
  81. if (use_sg_dma) {
  82. sg_dma_address(buffer->sg_table->sgl) = buffer->dma_base;
  83. sg_dma_len(buffer->sg_table->sgl) = buffer->dma_size;
  84. }
  85. return buffer->sg_table;
  86. }
  87. static void de_carveout_unmap_dma(struct dma_buf_attachment *attach,
  88. struct sg_table *sgt,
  89. enum dma_data_direction dir)
  90. {
  91. struct buffer *buffer = attach->dmabuf->priv;
  92. pr_debug("%s\n", __func__);
  93. if (use_sg_dma) {
  94. sg_dma_address(buffer->sg_table->sgl) = (~(dma_addr_t)0);
  95. sg_dma_len(buffer->sg_table->sgl) = 0;
  96. }
  97. }
  98. static void de_carveout_release(struct dma_buf *buf)
  99. {
  100. struct buffer *buffer = buf->priv;
  101. pr_info("%s phys address 0x%llx size %zu\n",
  102. __func__, (unsigned long long)buffer->phys, buffer->size);
  103. sg_free_table(buffer->sg_table);
  104. kfree(buffer->sg_table);
  105. gen_pool_free(heap_pool, buffer->phys, buffer->size);
  106. kfree(buffer);
  107. }
  108. #if LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)
  109. static void *de_carveout_kmap_atomic(struct dma_buf *buf, unsigned long page)
  110. {
  111. pr_err("%s not supported\n", __func__);
  112. return NULL;
  113. }
  114. #endif
  115. static int de_carveout_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
  116. {
  117. struct buffer *buffer = dmabuf->priv;
  118. pr_debug("%s\n", __func__);
  119. if (!cpu_map) {
  120. pr_err("%s not allowed (cpu_map is false)\n", __func__);
  121. return -EIO;
  122. }
  123. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  124. return remap_pfn_range(vma, vma->vm_start,
  125. page_to_pfn(sg_page(buffer->sg_table->sgl)),
  126. buffer->sg_table->sgl->length,
  127. vma->vm_page_prot);
  128. }
  129. static void *de_carveout_kmap(struct dma_buf *dma_buf, unsigned long page)
  130. {
  131. struct buffer *buffer = dma_buf->priv;
  132. void *ptr;
  133. if (!cpu_map) {
  134. pr_err("%s not allowed (cpu_map is false)\n", __func__);
  135. return NULL;
  136. }
  137. ptr = (void __force *)ioremap(buffer->phys, buffer->size);
  138. if (!ptr) {
  139. pr_err("%s:carveout ioremap failed\n", __func__);
  140. return NULL;
  141. }
  142. return ptr;
  143. }
  144. static void de_carveout_kunmap(struct dma_buf *buf, unsigned long page,
  145. void *vaddr)
  146. {
  147. pr_debug("%s\n", __func__);
  148. if (vaddr)
  149. iounmap((void __iomem __force *)vaddr);
  150. }
  151. static void *de_carveout_vmap(struct dma_buf *buf)
  152. {
  153. return de_carveout_kmap(buf, 0);
  154. }
  155. static void de_carveout_vunmap(struct dma_buf *buf, void *kptr)
  156. {
  157. de_carveout_kunmap(buf, 0, kptr);
  158. }
  159. static const struct dma_buf_ops dmabuf_ops = {
  160. .attach = NULL, /* optional */
  161. .detach = NULL, /* optional */
  162. .map_dma_buf = de_carveout_map_dma,
  163. .unmap_dma_buf = de_carveout_unmap_dma,
  164. .release = de_carveout_release,
  165. .begin_cpu_access = NULL, /* optional */
  166. .end_cpu_access = NULL, /* optional */
  167. #if LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)
  168. .kmap_atomic = de_carveout_kmap_atomic,
  169. .kunmap_atomic = NULL, /* optional */
  170. .kmap = de_carveout_kmap,
  171. .kunmap = de_carveout_kunmap, /* optional */
  172. #else
  173. #if LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)
  174. .map_atomic = de_carveout_kmap_atomic,
  175. .unmap_atomic = NULL, /* optional */
  176. #endif
  177. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
  178. .map = de_carveout_kmap,
  179. .unmap = de_carveout_kunmap, /* optional */
  180. #endif
  181. #endif
  182. .mmap = de_carveout_mmap,
  183. .vmap = de_carveout_vmap,
  184. .vunmap = de_carveout_vunmap,
  185. };
  186. int de_heap_buffer_create(size_t size, unsigned long align, void **private_data)
  187. {
  188. struct buffer *buffer;
  189. struct dma_buf *dma_buf;
  190. int ret;
  191. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
  192. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  193. #endif
  194. pr_info("%s:carveout size %zu\n", __func__, size);
  195. buffer = kzalloc(sizeof(struct buffer), GFP_KERNEL);
  196. if (!buffer) {
  197. pr_err("%s:carveout failed to allocate buffer\n", __func__);
  198. return -ENOMEM;
  199. }
  200. buffer->phys = gen_pool_alloc(heap_pool, size);
  201. if (!buffer->phys) {
  202. pr_err("%s:carveout gen_pool_alloc failed for size %zu\n",
  203. __func__, size);
  204. ret = -ENOMEM;
  205. goto free_buffer;
  206. }
  207. buffer->size = size;
  208. buffer->sg_table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
  209. if (!buffer->sg_table) {
  210. pr_err("%s:carveout failed to allocate sg_table\n", __func__);
  211. ret = -ENOMEM;
  212. goto free_alloc;
  213. }
  214. ret = sg_alloc_table(buffer->sg_table, 1, GFP_KERNEL);
  215. if (ret) {
  216. pr_err("%s:carveout sg_alloc_table failed\n", __func__);
  217. goto free_sg_table_mem;
  218. }
  219. sg_set_page(buffer->sg_table->sgl, pfn_to_page(PFN_DOWN(buffer->phys)),
  220. PAGE_ALIGN(buffer->size), 0);
  221. /* Store dma info */
  222. buffer->dma_base = buffer->phys;
  223. if (use_pci) {
  224. buffer->dma_base -= pool_base;
  225. buffer->dma_base += pci_offset;
  226. }
  227. buffer->dma_size = PAGE_ALIGN(size);
  228. if (use_sg_dma) {
  229. /* No mapping yet */
  230. sg_dma_address(buffer->sg_table->sgl) = (~(dma_addr_t)0);
  231. sg_dma_len(buffer->sg_table->sgl) = 0;
  232. }
  233. #if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
  234. dma_buf = dma_buf_export(buffer, &dmabuf_ops, size, O_RDWR);
  235. #elif LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0)
  236. dma_buf = dma_buf_export(buffer, &dmabuf_ops, size, O_RDWR, NULL);
  237. #else
  238. exp_info.ops = &dmabuf_ops;
  239. exp_info.size = size;
  240. exp_info.flags = O_RDWR;
  241. exp_info.priv = buffer;
  242. exp_info.resv = NULL;
  243. dma_buf = dma_buf_export(&exp_info);
  244. #endif
  245. if (IS_ERR(dma_buf)) {
  246. pr_err("%s:carveout dma_buf_export failed\n", __func__);
  247. ret = PTR_ERR(dma_buf);
  248. goto free_sg_table;
  249. }
  250. buffer->dma_buf = dma_buf;
  251. *private_data = buffer;
  252. pr_info("%s:carveout phys address 0x%llx size %zu\n",
  253. __func__, (unsigned long long)buffer->phys, buffer->size);
  254. return 0;
  255. free_sg_table:
  256. sg_free_table(buffer->sg_table);
  257. free_sg_table_mem:
  258. kfree(buffer->sg_table);
  259. free_alloc:
  260. gen_pool_free(heap_pool, buffer->phys, buffer->size);
  261. free_buffer:
  262. kfree(buffer);
  263. return ret;
  264. }
  265. void de_heap_buffer_free(void *private_data)
  266. {
  267. struct buffer *buffer = private_data;
  268. pr_info("%s:carveout phys address 0x%llx size %zu\n",
  269. __func__, (unsigned long long)buffer->phys, buffer->size);
  270. dma_buf_put(buffer->dma_buf);
  271. }
  272. int de_heap_export_fd(void *private_data, unsigned long flags)
  273. {
  274. struct buffer *buffer = private_data;
  275. struct dma_buf *dma_buf = buffer->dma_buf;
  276. int ret;
  277. pr_debug("%s:carveout %p\n", __func__, buffer);
  278. get_dma_buf(dma_buf);
  279. ret = dma_buf_fd(dma_buf, flags);
  280. if (ret < 0) {
  281. pr_err("%s:carveout dma_buf_fd failed\n", __func__);
  282. dma_buf_put(dma_buf);
  283. return ret;
  284. }
  285. pr_info("%s:carveout phys address 0x%llx export fd %d\n",
  286. __func__, (unsigned long long)buffer->phys, ret);
  287. return ret;
  288. }
  289. int de_heap_heap_init(void)
  290. {
  291. size_t pool_size;
  292. int ret;
  293. pr_debug("%s:carveout\n", __func__);
  294. if (use_pci) {
  295. unsigned long bar_base, bar_len;
  296. if (pci_vendor == 0 || pci_product == 0 || pci_bar < 0) {
  297. pr_err("%s:carveout missing pci parameters\n",
  298. __func__);
  299. return -EFAULT;
  300. }
  301. pci_device = pci_get_device(pci_vendor, pci_product, NULL);
  302. if (pci_device == NULL) {
  303. pr_err("%s:carveout PCI device not found\n", __func__);
  304. return -EFAULT;
  305. }
  306. bar_base = pci_resource_start(pci_device, pci_bar);
  307. if (bar_base == 0) {
  308. pr_err("%s:carveout PCI bar %d not found\n",
  309. __func__, pci_bar);
  310. ret = -EFAULT;
  311. goto free_pci_device;
  312. }
  313. bar_len = pci_resource_len(pci_device, pci_bar);
  314. if (bar_len == 0) {
  315. pr_err("%s:carveout PCI bar %d has zero length\n",
  316. __func__, pci_bar);
  317. ret = -EFAULT;
  318. goto free_pci_device;
  319. }
  320. pr_info("%s:carveout PCI bar %d start %#lx length %ld\n",
  321. __func__, pci_bar, bar_base, bar_len);
  322. if (pci_size == 0)
  323. pci_size = bar_len;
  324. if (pci_offset + pci_size > bar_len) {
  325. pr_err("%s:carveout pci_offset and size exceeds bar\n",
  326. __func__);
  327. ret = -EFAULT;
  328. goto free_pci_device;
  329. }
  330. pool_base = bar_base + pci_offset;
  331. pool_size = pci_size;
  332. } else {
  333. pci_device = NULL;
  334. if (carveout_base == 0) {
  335. pr_err("%s:carveout carveout_base not defined\n",
  336. __func__);
  337. return -EFAULT;
  338. }
  339. if (carveout_size == 0) {
  340. pr_err("%s:carveout carveout_size not defined\n",
  341. __func__);
  342. return -EFAULT;
  343. }
  344. pool_base = carveout_base;
  345. pool_size = carveout_size;
  346. }
  347. heap_pool = gen_pool_create(POOL_ALLOC_ORDER, -1);
  348. if (!heap_pool) {
  349. pr_err("%s:carveout gen_pool_create failed\n", __func__);
  350. ret = -ENOMEM;
  351. goto free_pci_device;
  352. }
  353. ret = gen_pool_add(heap_pool, (unsigned long)pool_base, pool_size, -1);
  354. if (ret) {
  355. pr_err("%s:carveout gen_pool_add failed\n", __func__);
  356. goto free_pool;
  357. }
  358. pr_info("%s:carveout base %#llx size %zu\n", __func__,
  359. (unsigned long long)pool_base, pool_size);
  360. return 0;
  361. free_pool:
  362. gen_pool_destroy(heap_pool);
  363. free_pci_device:
  364. if (pci_device)
  365. pci_dev_put(pci_device);
  366. return ret;
  367. }
  368. void de_heap_heap_deinit(void)
  369. {
  370. pr_info("%s:carveout\n", __func__);
  371. gen_pool_destroy(heap_pool);
  372. if (pci_device)
  373. pci_dev_put(pci_device);
  374. }
  375. /*
  376. * coding style for emacs
  377. *
  378. * Local variables:
  379. * indent-tabs-mode: t
  380. * tab-width: 8
  381. * c-basic-offset: 8
  382. * End:
  383. */