img_mem_anonymous.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395
  1. /*!
  2. *****************************************************************************
  3. *
  4. * @File img_mem_anonymous.c
  5. * ---------------------------------------------------------------------------
  6. *
  7. * Copyright (c) Imagination Technologies Ltd.
  8. *
  9. * The contents of this file are subject to the MIT license as set out below.
  10. *
  11. * Permission is hereby granted, free of charge, to any person obtaining a
  12. * copy of this software and associated documentation files (the "Software"),
  13. * to deal in the Software without restriction, including without limitation
  14. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  15. * and/or sell copies of the Software, and to permit persons to whom the
  16. * Software is furnished to do so, subject to the following conditions:
  17. *
  18. * The above copyright notice and this permission notice shall be included in
  19. * all copies or substantial portions of the Software.
  20. *
  21. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  22. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  23. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  24. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  25. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  26. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  27. * THE SOFTWARE.
  28. *
  29. * Alternatively, the contents of this file may be used under the terms of the
  30. * GNU General Public License Version 2 ("GPL")in which case the provisions of
  31. * GPL are applicable instead of those above.
  32. *
  33. * If you wish to allow use of your version of this file only under the terms
  34. * of GPL, and not to allow others to use your version of this file under the
  35. * terms of the MIT license, indicate your decision by deleting the provisions
  36. * above and replace them with the notice and other provisions required by GPL
  37. * as set out in the file called "GPLHEADER" included in this distribution. If
  38. * you do not delete the provisions above, a recipient may use your version of
  39. * this file under the terms of either the MIT license or GPL.
  40. *
  41. * This License is also included in this distribution in the file called
  42. * "MIT_COPYING".
  43. *
  44. *****************************************************************************/
  45. #include <linux/module.h>
  46. #include <linux/mm.h>
  47. #include <linux/slab.h>
  48. #include <linux/scatterlist.h>
  49. #include <linux/gfp.h>
  50. #include <linux/device.h>
  51. #include <linux/vmalloc.h>
  52. #include <linux/sched.h>
  53. #include <linux/dma-mapping.h>
  54. #include <linux/version.h>
  55. #include <img_mem_man.h>
  56. #include "img_mem_man_priv.h"
  57. static int trace_physical_pages;
  58. struct buffer_data {
  59. struct sg_table *sgt;
  60. enum img_mem_attr mattr; /* memory attributes */
  61. };
  62. static int anonymous_heap_import(struct device *device, struct heap *heap,
  63. size_t size, enum img_mem_attr attr, uint64_t buf_hnd,
  64. struct buffer *buffer)
  65. {
  66. struct buffer_data *data;
  67. unsigned long cpu_addr = (unsigned long)buf_hnd;
  68. struct sg_table *sgt;
  69. struct page **pages;
  70. struct scatterlist *sgl;
  71. int num_pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
  72. int ret;
  73. int i;
  74. pr_debug("%s:%d buffer %d (0x%p) cpu_addr %#lx for PID:%d\n",
  75. __func__, __LINE__, buffer->id, buffer,
  76. cpu_addr, task_pid_nr(current));
  77. /* Check alignment */
  78. if (cpu_addr & (PAGE_SIZE-1)) {
  79. pr_err("%s wrong alignment of %#lx address!\n",
  80. __func__, cpu_addr);
  81. return -EFAULT;
  82. }
  83. pages = kmalloc_array(num_pages, sizeof(struct page *),
  84. GFP_KERNEL | __GFP_ZERO);
  85. if (!pages) {
  86. pr_err("%s failed to allocate memory for pages\n", __func__);
  87. return -ENOMEM;
  88. }
  89. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
  90. down_read(&current->mm->mmap_sem);
  91. #else
  92. down_read(&current->mm->mmap_lock);
  93. #endif
  94. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
  95. ret = get_user_pages(
  96. cpu_addr, num_pages,
  97. FOLL_WRITE,
  98. pages, NULL);
  99. #else
  100. pr_err("%s get_user_pages not supported for this kernel version\n",
  101. __func__);
  102. ret = -1;
  103. #endif
  104. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
  105. up_read(&current->mm->mmap_sem);
  106. #else
  107. up_read(&current->mm->mmap_lock);
  108. #endif
  109. if (ret != num_pages) {
  110. pr_err("%s failed to get_user_pages count:%d for %#lx address\n",
  111. __func__, num_pages, cpu_addr);
  112. ret = -ENOMEM;
  113. goto get_user_pages_failed;
  114. }
  115. sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
  116. if (!sgt) {
  117. ret = -ENOMEM;
  118. goto alloc_sgt_failed;
  119. }
  120. ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
  121. if (ret) {
  122. pr_err("%s failed to allocate sgt with num_pages\n", __func__);
  123. goto alloc_sgt_pages_failed;
  124. }
  125. data = kmalloc(sizeof(struct buffer_data), GFP_KERNEL);
  126. if (!data) {
  127. ret = -ENOMEM;
  128. goto alloc_priv_failed;
  129. }
  130. for_each_sg(sgt->sgl, sgl, sgt->nents, i) {
  131. struct page *page = pages[i];
  132. sg_set_page(sgl, page, PAGE_SIZE, 0);
  133. /* Sanity check if physical address is
  134. * accessible from the device PoV */
  135. if (~dma_get_mask(device) & sg_phys(sgl)) {
  136. pr_err("%s physical address is out of dma_mask,"
  137. " and probably won't be accessible by the core!\n",
  138. __func__);
  139. ret = -ERANGE;
  140. goto dma_mask_check_failed;
  141. }
  142. if (trace_physical_pages)
  143. pr_info("%s:%d phys %#llx length %d\n",
  144. __func__, __LINE__,
  145. (unsigned long long)sg_phys(sgl), sgl->length);
  146. }
  147. pr_debug("%s:%d buffer %d orig_nents %d\n", __func__, __LINE__,
  148. buffer->id, sgt->orig_nents);
  149. data->sgt = sgt;
  150. data->mattr = attr;
  151. buffer->priv = data;
  152. ret = dma_map_sg(buffer->device, sgt->sgl, sgt->orig_nents,
  153. DMA_BIDIRECTIONAL);
  154. if (ret <= 0) {
  155. pr_err("%s dma_map_sg failed!\n", __func__);
  156. goto dma_mask_check_failed;
  157. }
  158. kfree(pages);
  159. return 0;
  160. dma_mask_check_failed:
  161. kfree(data);
  162. alloc_priv_failed:
  163. sg_free_table(sgt);
  164. alloc_sgt_pages_failed:
  165. kfree(sgt);
  166. get_user_pages_failed:
  167. for (i = 0; i < num_pages; i++)
  168. if (pages[i])
  169. put_page(pages[i]);
  170. alloc_sgt_failed:
  171. kfree(pages);
  172. return ret;
  173. }
  174. static void anonymous_heap_free(struct heap *heap, struct buffer *buffer)
  175. {
  176. struct buffer_data *data = buffer->priv;
  177. struct sg_table *sgt = data->sgt;
  178. struct scatterlist *sgl;
  179. bool dirty = false;
  180. pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
  181. buffer->id, buffer);
  182. dma_unmap_sg(buffer->device, sgt->sgl, sgt->orig_nents,
  183. DMA_BIDIRECTIONAL);
  184. if (buffer->kptr) {
  185. pr_debug("%s vunmap 0x%p\n", __func__, buffer->kptr);
  186. dirty = true;
  187. vunmap(buffer->kptr);
  188. buffer->kptr = NULL;
  189. }
  190. sgl = sgt->sgl;
  191. while (sgl) {
  192. struct page *page = sg_page(sgl);
  193. if (page) {
  194. if (dirty)
  195. set_page_dirty(page);
  196. put_page(page);
  197. }
  198. sgl = sg_next(sgl);
  199. }
  200. sg_free_table(sgt);
  201. kfree(sgt);
  202. kfree(data);
  203. }
  204. static int anonymous_heap_map_km(struct heap *heap, struct buffer *buffer)
  205. {
  206. struct buffer_data *buffer_data = buffer->priv;
  207. struct sg_table *sgt = buffer_data->sgt;
  208. struct scatterlist *sgl = sgt->sgl;
  209. unsigned int num_pages = sg_nents(sgl);
  210. struct page **pages;
  211. pgprot_t prot;
  212. int i;
  213. pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
  214. buffer->id, buffer);
  215. if (buffer->kptr) {
  216. pr_warn("%s called for already mapped buffer %d\n",
  217. __func__, buffer->id);
  218. return 0;
  219. }
  220. pages = kmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL);
  221. if (!pages) {
  222. pr_err("%s failed to allocate memory for pages\n", __func__);
  223. return -ENOMEM;
  224. }
  225. prot = PAGE_KERNEL;
  226. /* CACHED by default */
  227. if (buffer_data->mattr & IMG_MEM_ATTR_WRITECOMBINE)
  228. prot = pgprot_writecombine(prot);
  229. else if (buffer_data->mattr & IMG_MEM_ATTR_UNCACHED)
  230. prot = pgprot_noncached(prot);
  231. i = 0;
  232. while (sgl) {
  233. pages[i++] = sg_page(sgl);
  234. sgl = sg_next(sgl);
  235. }
  236. buffer->kptr = vmap(pages, num_pages, VM_MAP, prot);
  237. kfree(pages);
  238. if (!buffer->kptr) {
  239. pr_err("%s vmap failed!\n", __func__);
  240. return -EFAULT;
  241. }
  242. pr_debug("%s:%d buffer %d vmap to 0x%p\n", __func__, __LINE__,
  243. buffer->id, buffer->kptr);
  244. return 0;
  245. }
  246. static int anonymous_heap_unmap_km(struct heap *heap, struct buffer *buffer)
  247. {
  248. struct buffer_data *data = buffer->priv;
  249. struct sg_table *sgt = data->sgt;
  250. struct scatterlist *sgl;
  251. pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
  252. buffer->id, buffer);
  253. if (!buffer->kptr) {
  254. pr_warn("%s called for unmapped buffer %d\n",
  255. __func__, buffer->id);
  256. return 0;
  257. }
  258. pr_debug("%s:%d buffer %d kunmap from 0x%p\n", __func__, __LINE__,
  259. buffer->id, buffer->kptr);
  260. vunmap(buffer->kptr);
  261. buffer->kptr = NULL;
  262. sgl = sgt->sgl;
  263. while (sgl) {
  264. struct page *page = sg_page(sgl);
  265. if (page) {
  266. set_page_dirty(page);
  267. }
  268. sgl = sg_next(sgl);
  269. }
  270. return 0;
  271. }
  272. static int anonymous_get_sg_table(struct heap *heap, struct buffer *buffer,
  273. struct sg_table **sg_table, bool *use_sg_dma)
  274. {
  275. struct buffer_data *data = buffer->priv;
  276. *sg_table = data->sgt;
  277. *use_sg_dma = false;
  278. return 0;
  279. }
  280. static void anonymous_sync_cpu_to_dev(struct heap *heap, struct buffer *buffer)
  281. {
  282. struct buffer_data *buffer_data = buffer->priv;
  283. struct sg_table *sgt = buffer_data->sgt;
  284. pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
  285. buffer->id, buffer);
  286. if (!(buffer_data->mattr & IMG_MEM_ATTR_UNCACHED)) {
  287. dma_sync_sg_for_device(buffer->device,
  288. sgt->sgl,
  289. sgt->orig_nents,
  290. DMA_TO_DEVICE);
  291. dma_sync_sg_for_cpu(buffer->device,
  292. sgt->sgl,
  293. sgt->orig_nents,
  294. DMA_FROM_DEVICE);
  295. }
  296. }
  297. static void anonymous_sync_dev_to_cpu(struct heap *heap, struct buffer *buffer)
  298. {
  299. struct buffer_data *buffer_data = buffer->priv;
  300. struct sg_table *sgt = buffer_data->sgt;
  301. pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
  302. buffer->id, buffer);
  303. if (!(buffer_data->mattr & IMG_MEM_ATTR_UNCACHED)) {
  304. dma_sync_sg_for_cpu(buffer->device,
  305. sgt->sgl,
  306. sgt->orig_nents,
  307. DMA_FROM_DEVICE);
  308. }
  309. }
  310. static void anonymous_heap_destroy(struct heap *heap)
  311. {
  312. pr_debug("%s:%d\n", __func__, __LINE__);
  313. }
  314. static struct heap_ops anonymous_heap_ops = {
  315. .alloc = NULL,
  316. .import = anonymous_heap_import,
  317. .free = anonymous_heap_free,
  318. .map_um = NULL,
  319. .unmap_um = NULL,
  320. .map_km = anonymous_heap_map_km,
  321. .unmap_km = anonymous_heap_unmap_km,
  322. .get_sg_table = anonymous_get_sg_table,
  323. .get_page_array = NULL,
  324. .sync_cpu_to_dev = anonymous_sync_cpu_to_dev,
  325. .sync_dev_to_cpu = anonymous_sync_dev_to_cpu,
  326. .set_offset = NULL,
  327. .destroy = anonymous_heap_destroy,
  328. };
  329. int img_mem_anonymous_init(const struct heap_config *heap_cfg, struct heap *heap)
  330. {
  331. pr_debug("%s:%d\n", __func__, __LINE__);
  332. heap->ops = &anonymous_heap_ops;
  333. return 0;
  334. }
  335. /*
  336. * coding style for emacs
  337. *
  338. * Local variables:
  339. * indent-tabs-mode: t
  340. * tab-width: 8
  341. * c-basic-offset: 8
  342. * End:
  343. */