img_mem_carveout.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798
  1. /*!
  2. *****************************************************************************
  3. *
  4. * @File img_mem_carveout.c
  5. * ---------------------------------------------------------------------------
  6. *
  7. * Copyright (c) Imagination Technologies Ltd.
  8. *
  9. * The contents of this file are subject to the MIT license as set out below.
  10. *
  11. * Permission is hereby granted, free of charge, to any person obtaining a
  12. * copy of this software and associated documentation files (the "Software"),
  13. * to deal in the Software without restriction, including without limitation
  14. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  15. * and/or sell copies of the Software, and to permit persons to whom the
  16. * Software is furnished to do so, subject to the following conditions:
  17. *
  18. * The above copyright notice and this permission notice shall be included in
  19. * all copies or substantial portions of the Software.
  20. *
  21. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  22. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  23. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  24. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  25. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  26. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  27. * THE SOFTWARE.
  28. *
  29. * Alternatively, the contents of this file may be used under the terms of the
  30. * GNU General Public License Version 2 ("GPL")in which case the provisions of
  31. * GPL are applicable instead of those above.
  32. *
  33. * If you wish to allow use of your version of this file only under the terms
  34. * of GPL, and not to allow others to use your version of this file under the
  35. * terms of the MIT license, indicate your decision by deleting the provisions
  36. * above and replace them with the notice and other provisions required by GPL
  37. * as set out in the file called "GPLHEADER" included in this distribution. If
  38. * you do not delete the provisions above, a recipient may use your version of
  39. * this file under the terms of either the MIT license or GPL.
  40. *
  41. * This License is also included in this distribution in the file called
  42. * "MIT_COPYING".
  43. *
  44. *****************************************************************************/
  45. #include <linux/module.h>
  46. #include <linux/mm.h>
  47. #include <linux/slab.h>
  48. #include <linux/scatterlist.h>
  49. #include <linux/dma-buf.h>
  50. #include <linux/gfp.h>
  51. #include <linux/vmalloc.h>
  52. #include <linux/genalloc.h>
  53. #include <linux/version.h>
  54. #include <asm/cacheflush.h>
  55. #include <img_mem_man.h>
  56. #include "img_mem_man_priv.h"
  57. /* Default allocation order */
  58. #define POOL_ALLOC_ORDER_BASE PAGE_SHIFT
  59. struct heap_data {
  60. struct gen_pool *pool;
  61. };
  62. struct buffer_data {
  63. unsigned long addr; /* addr returned by genalloc */
  64. uint64_t *addrs; /* array of physical addresses, upcast to 64-bit */
  65. enum img_mem_attr mattr; /* memory attributes */
  66. struct vm_area_struct *mapped_vma; /* Needed for cache manipulation */
  67. /* exporter via dmabuf */
  68. struct sg_table *sgt;
  69. bool exported;
  70. struct dma_buf *dma_buf;
  71. dma_addr_t dma_base;
  72. unsigned int dma_size;
  73. };
  74. static int trace_physical_pages;
  75. static int trace_mmap_fault;
  76. /*
  77. * dmabuf wrapper ops
  78. */
  79. static struct sg_table *carveout_map_dmabuf(struct dma_buf_attachment *attach,
  80. enum dma_data_direction dir)
  81. {
  82. struct buffer *buffer = attach->dmabuf->priv;
  83. struct buffer_data *buffer_data;
  84. if (!buffer)
  85. return NULL;
  86. pr_debug("%s\n", __func__);
  87. buffer_data = buffer->priv;
  88. sg_dma_address(buffer_data->sgt->sgl) = buffer_data->dma_base;
  89. sg_dma_len(buffer_data->sgt->sgl) = buffer_data->dma_size;
  90. return buffer_data->sgt;
  91. }
  92. static void carveout_unmap_dmabuf(struct dma_buf_attachment *attach,
  93. struct sg_table *sgt,
  94. enum dma_data_direction dir)
  95. {
  96. struct buffer *buffer = attach->dmabuf->priv;
  97. struct buffer_data *buffer_data;
  98. if (!buffer)
  99. return;
  100. pr_debug("%s\n", __func__);
  101. buffer_data = buffer->priv;
  102. sg_dma_address(buffer_data->sgt->sgl) = (~(dma_addr_t)0);
  103. sg_dma_len(buffer_data->sgt->sgl) = 0;
  104. }
  105. /* Called when when ref counter reaches zero! */
  106. static void carveout_release_dmabuf(struct dma_buf *buf)
  107. {
  108. struct buffer *buffer = buf->priv;
  109. struct buffer_data *buffer_data;
  110. if (!buffer)
  111. return;
  112. buffer_data = buffer->priv;
  113. pr_debug("%s %p\n", __func__, buffer_data);
  114. if (!buffer_data)
  115. return;
  116. buffer_data->exported = false;
  117. }
  118. /* Called on file descriptor mmap */
  119. static int carveout_mmap_dmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
  120. {
  121. struct buffer *buffer = buf->priv;
  122. struct buffer_data *buffer_data;
  123. struct scatterlist *sgl;
  124. unsigned long addr;
  125. if (!buffer)
  126. return -EINVAL;
  127. buffer_data = buffer->priv;
  128. pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
  129. buffer->id, buffer);
  130. pr_debug("%s:%d vm_start %#lx vm_end %#lx size %#lx\n",
  131. __func__, __LINE__,
  132. vma->vm_start, vma->vm_end, vma->vm_end - vma->vm_start);
  133. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  134. sgl = buffer_data->sgt->sgl;
  135. addr = vma->vm_start;
  136. while (sgl) {
  137. dma_addr_t phys = sg_phys(sgl);
  138. unsigned long pfn = phys >> PAGE_SHIFT;
  139. unsigned int len = sgl->length;
  140. int ret;
  141. if (vma->vm_end < (addr + len)) {
  142. unsigned long size = vma->vm_end - addr;
  143. pr_debug("%s:%d buffer %d (0x%p) truncating len=%#x to size=%#lx\n",
  144. __func__, __LINE__,
  145. buffer->id, buffer, len, size);
  146. WARN(round_up(size, PAGE_SIZE) != size,
  147. "VMA size %#lx not page aligned\n", size);
  148. len = size;
  149. if (!len) /* VM space is smaller than allocation */
  150. break;
  151. }
  152. ret = remap_pfn_range(vma, addr, pfn, len, vma->vm_page_prot);
  153. if (ret)
  154. return ret;
  155. addr += len;
  156. sgl = sg_next(sgl);
  157. }
  158. return 0;
  159. }
  160. #if LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)
  161. static void *carveout_kmap_dmabuf(struct dma_buf *buf, unsigned long page)
  162. {
  163. pr_err("%s not supported\n", __func__);
  164. return NULL;
  165. }
  166. #endif
  167. static int carveout_heap_map_km(struct heap *heap, struct buffer *buffer);
  168. static int carveout_heap_unmap_km(struct heap *heap, struct buffer *buffer);
  169. static void *carveout_vmap_dmabuf(struct dma_buf *buf)
  170. {
  171. struct buffer *buffer = buf->priv;
  172. struct heap *heap;
  173. if (!buffer)
  174. return NULL;
  175. heap = buffer->heap;
  176. if (carveout_heap_map_km(heap, buffer))
  177. return NULL;
  178. pr_debug("%s:%d buffer %d kptr 0x%p\n", __func__, __LINE__,
  179. buffer->id, buffer->kptr);
  180. return buffer->kptr;
  181. }
  182. static void carveout_vunmap_dmabuf(struct dma_buf *buf, void *kptr)
  183. {
  184. struct buffer *buffer = buf->priv;
  185. struct heap *heap;
  186. if (!buffer)
  187. return;
  188. heap = buffer->heap;
  189. pr_debug("%s:%d buffer %d kptr 0x%p (0x%p)\n", __func__, __LINE__,
  190. buffer->id, buffer->kptr, kptr);
  191. if (buffer->kptr == kptr)
  192. carveout_heap_unmap_km(heap, buffer);
  193. }
  194. static const struct dma_buf_ops carveout_dmabuf_ops = {
  195. .map_dma_buf = carveout_map_dmabuf,
  196. .unmap_dma_buf = carveout_unmap_dmabuf,
  197. .release = carveout_release_dmabuf,
  198. .mmap = carveout_mmap_dmabuf,
  199. #if LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)
  200. .kmap_atomic = carveout_kmap_dmabuf,
  201. .kmap = carveout_kmap_dmabuf,
  202. #else
  203. #if LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)
  204. .map_atomic = carveout_kmap_dmabuf,
  205. .map = carveout_kmap_dmabuf,
  206. #endif
  207. #endif
  208. .vmap = carveout_vmap_dmabuf,
  209. .vunmap = carveout_vunmap_dmabuf,
  210. };
  211. static int carveout_heap_export(struct device *device, struct heap *heap,
  212. size_t size, enum img_mem_attr attr,
  213. struct buffer *buffer, uint64_t* buf_hnd)
  214. {
  215. struct buffer_data *buffer_data = buffer->priv;
  216. struct dma_buf *dma_buf;
  217. int ret, fd;
  218. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
  219. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  220. #endif
  221. pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
  222. buffer->id, buffer);
  223. if (!buffer_data)
  224. /* Nothing to export ? */
  225. return -ENOMEM;
  226. if (buffer_data->exported) {
  227. pr_err("%s: already exported!\n", __func__);
  228. return -EBUSY;
  229. }
  230. if (!buffer_data->sgt) {
  231. /* Create for the very first time */
  232. buffer_data->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
  233. if (!buffer_data->sgt) {
  234. pr_err("%s: failed to allocate sg_table\n", __func__);
  235. return -ENOMEM;
  236. }
  237. ret = sg_alloc_table(buffer_data->sgt, 1, GFP_KERNEL);
  238. if (ret) {
  239. pr_err("%s: sg_alloc_table failed\n", __func__);
  240. goto free_sgt_mem;
  241. }
  242. sg_set_page(buffer_data->sgt->sgl,
  243. pfn_to_page(PFN_DOWN(buffer_data->addr+heap->options.carveout.offs)),
  244. PAGE_ALIGN(size), 0);
  245. /* Store dma info */
  246. if (heap->to_dev_addr)
  247. buffer_data->dma_base = heap->to_dev_addr(&heap->options,
  248. buffer_data->addr+heap->options.carveout.offs);
  249. else
  250. buffer_data->dma_base = buffer_data->addr+heap->options.carveout.offs;
  251. buffer_data->dma_size = PAGE_ALIGN(size);
  252. /* No mapping yet */
  253. sg_dma_address(buffer_data->sgt->sgl) = (~(dma_addr_t)0);
  254. sg_dma_len(buffer_data->sgt->sgl) = 0;
  255. }
  256. #if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
  257. dma_buf = dma_buf_export(buffer_data, &carveout_dmabuf_ops,
  258. size, O_RDWR);
  259. #elif LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0)
  260. dma_buf = dma_buf_export(buffer_data, &carveout_dmabuf_ops,
  261. size, O_RDWR, NULL);
  262. #else
  263. exp_info.ops = &carveout_dmabuf_ops;
  264. exp_info.size = size;
  265. exp_info.flags = O_RDWR;
  266. exp_info.priv = buffer;
  267. exp_info.resv = NULL;
  268. dma_buf = dma_buf_export(&exp_info);
  269. #endif
  270. if (IS_ERR(dma_buf)) {
  271. pr_err("%s:dma_buf_export failed\n", __func__);
  272. ret = PTR_ERR(dma_buf);
  273. return ret;
  274. }
  275. get_dma_buf(dma_buf);
  276. fd = dma_buf_fd(dma_buf, 0);
  277. if (fd < 0) {
  278. pr_err("%s: dma_buf_fd failed\n", __func__);
  279. dma_buf_put(dma_buf);
  280. return -EFAULT;
  281. }
  282. buffer_data->dma_buf = dma_buf;
  283. buffer_data->exported = true;
  284. *buf_hnd = (uint64_t)fd;
  285. return 0;
  286. free_sgt_mem:
  287. kfree(buffer_data->sgt);
  288. buffer_data->sgt = NULL;
  289. return ret;
  290. }
  291. static int carveout_heap_alloc(struct device *device, struct heap *heap,
  292. size_t size, enum img_mem_attr attr,
  293. struct buffer *buffer)
  294. {
  295. struct heap_data *heap_data = heap->priv;
  296. struct buffer_data *buffer_data;
  297. phys_addr_t phys_addr;
  298. size_t pages, page;
  299. pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
  300. buffer->id, buffer);
  301. buffer_data = kzalloc(sizeof(struct buffer_data), GFP_KERNEL);
  302. if (!buffer_data)
  303. return -ENOMEM;
  304. pages = size / PAGE_SIZE;
  305. buffer_data->addrs = kmalloc_array(pages, sizeof(uint64_t), GFP_KERNEL);
  306. if (!buffer_data->addrs) {
  307. kfree(buffer_data);
  308. return -ENOMEM;
  309. }
  310. buffer_data->mattr = attr;
  311. buffer_data->addr = gen_pool_alloc(heap_data->pool, size);
  312. if (!buffer_data->addr) {
  313. pr_err("%s gen_pool_alloc failed!\n", __func__);
  314. kfree(buffer_data->addrs);
  315. kfree(buffer_data);
  316. return -ENOMEM;
  317. }
  318. /* The below assigns buffer_data->addr-> 1:1 mapping */
  319. phys_addr = gen_pool_virt_to_phys(heap_data->pool,
  320. buffer_data->addr + heap->options.carveout.offs);
  321. page = 0;
  322. while (page < pages) {
  323. if (trace_physical_pages)
  324. pr_info("%s phys %llx\n",
  325. __func__, (unsigned long long)phys_addr);
  326. buffer_data->addrs[page++] = phys_addr;
  327. phys_addr += PAGE_SIZE;
  328. };
  329. buffer->priv = buffer_data;
  330. pr_debug("%s buffer %d phys %#llx size %zu attrs %x\n", __func__,
  331. buffer->id,
  332. (unsigned long long)buffer_data->addrs[0],
  333. size,
  334. attr);
  335. return 0;
  336. }
  337. static void carveout_heap_free(struct heap *heap, struct buffer *buffer)
  338. {
  339. struct heap_data *heap_data = heap->priv;
  340. struct buffer_data *buffer_data = buffer->priv;
  341. pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
  342. buffer->id, buffer);
  343. /* If forgot to unmap */
  344. if (heap->options.carveout.put_kptr && buffer->kptr) {
  345. heap->options.carveout.put_kptr(buffer->kptr);
  346. buffer->kptr = NULL;
  347. }
  348. if (buffer_data->dma_buf) {
  349. dma_buf_put(buffer_data->dma_buf);
  350. buffer_data->dma_buf->priv = NULL;
  351. }
  352. if (buffer_data->sgt) {
  353. sg_free_table(buffer_data->sgt);
  354. kfree(buffer_data->sgt);
  355. buffer_data->sgt = NULL;
  356. }
  357. if (buffer_data->mapped_vma)
  358. buffer_data->mapped_vma->vm_private_data = NULL;
  359. gen_pool_free(heap_data->pool, buffer_data->addr, buffer->actual_size);
  360. kfree(buffer_data->addrs);
  361. kfree(buffer_data);
  362. }
  363. static void _mmap_open(struct vm_area_struct *vma)
  364. {
  365. struct buffer *buffer = vma->vm_private_data;
  366. struct buffer_data *buffer_data = buffer->priv;
  367. buffer_data->mapped_vma = vma;
  368. pr_debug("%s:%d buffer %d (0x%p) vma:%p\n",
  369. __func__, __LINE__, buffer->id, buffer, vma);
  370. }
  371. static void _mmap_close(struct vm_area_struct *vma)
  372. {
  373. struct buffer *buffer = vma->vm_private_data;
  374. struct buffer_data *buffer_data;
  375. if (!buffer)
  376. return;
  377. buffer_data = buffer->priv;
  378. pr_debug("%s:%d buffer %d (0x%p) vma:%p\n",
  379. __func__, __LINE__, buffer->id, buffer, vma);
  380. buffer_data->mapped_vma = NULL;
  381. }
  382. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
  383. static vm_fault_t _mmap_fault(struct vm_fault *vmf)
  384. {
  385. struct vm_area_struct *vma = vmf->vma;
  386. #else
  387. static int _mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  388. {
  389. #endif
  390. struct buffer *buffer = vma->vm_private_data;
  391. struct buffer_data *buffer_data = buffer->priv;
  392. phys_addr_t phys_addr;
  393. pgoff_t offset;
  394. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
  395. unsigned long addr = vmf->address;
  396. #else
  397. unsigned long addr = (unsigned long)vmf->virtual_address;
  398. #endif
  399. if (trace_mmap_fault) {
  400. pr_debug("%s:%d buffer %d (0x%p) vma:%p\n",
  401. __func__, __LINE__, buffer->id, buffer, vma);
  402. pr_debug("%s:%d vm_start %#lx vm_end %#lx total size %ld\n",
  403. __func__, __LINE__,
  404. vma->vm_start, vma->vm_end,
  405. vma->vm_end - vma->vm_start);
  406. }
  407. offset = (addr - vma->vm_start) >> PAGE_SHIFT;
  408. if (offset > (buffer->actual_size / PAGE_SIZE)) {
  409. pr_err("%s:%d offs:%ld\n",
  410. __func__, __LINE__, offset);
  411. return VM_FAULT_SIGBUS;
  412. }
  413. phys_addr = buffer_data->addrs[0] + (offset * PAGE_SIZE);
  414. if (trace_mmap_fault)
  415. pr_info("%s:%d vmf pgoff %#lx vmf addr %lx offs :%ld phys:%#llx\n",
  416. __func__, __LINE__, vmf->pgoff, addr, offset, phys_addr);
  417. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
  418. return vmf_insert_pfn(vma, addr, phys_addr >> PAGE_SHIFT);
  419. #else
  420. {
  421. int err = vm_insert_pfn(vma, addr, phys_addr >> PAGE_SHIFT);
  422. switch (err) {
  423. case 0:
  424. case -EAGAIN:
  425. case -ERESTARTSYS:
  426. case -EINTR:
  427. case -EBUSY:
  428. return VM_FAULT_NOPAGE;
  429. case -ENOMEM:
  430. return VM_FAULT_OOM;
  431. }
  432. return VM_FAULT_SIGBUS;
  433. }
  434. #endif
  435. }
  436. /* vma ops->fault handler is used to track user space mappings
  437. * (inspired by other gpu/drm drivers from the kernel source tree)
  438. * to properly call cache handling ops when the mapping is destroyed
  439. * (when user calls unmap syscall).
  440. * vma flags are used to choose a correct direction.
  441. * The above facts allows us to do automatic cache flushing/invalidation.
  442. *
  443. * Examples:
  444. * mmap() -> .open -> invalidate buffer cache
  445. * .. read content from buffer
  446. * unmap() -> .close -> do nothing
  447. *
  448. * mmap() -> .open -> do nothing
  449. * .. write content to buffer
  450. * unmap() -> .close -> flush buffer cache
  451. */
  452. static struct vm_operations_struct carveout_mmap_vm_ops = {
  453. .open = _mmap_open,
  454. .close = _mmap_close,
  455. .fault = _mmap_fault,
  456. };
  457. static int carveout_heap_map_um(struct heap *heap, struct buffer *buffer,
  458. struct vm_area_struct *vma)
  459. {
  460. struct buffer_data *buffer_data = buffer->priv;
  461. pr_debug("%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
  462. buffer->id, buffer);
  463. pr_debug("%s:%d vm_start %#lx vm_end %#lx size %ld\n",
  464. __func__, __LINE__,
  465. vma->vm_start, vma->vm_end, vma->vm_end - vma->vm_start);
  466. /* CACHED by default */
  467. if (buffer_data->mattr & IMG_MEM_ATTR_UNCACHED)
  468. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  469. else if (buffer_data->mattr & IMG_MEM_ATTR_WRITECOMBINE)
  470. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  471. vma->vm_ops = &carveout_mmap_vm_ops;
  472. vma->vm_flags |= VM_PFNMAP;
  473. vma->vm_private_data = buffer;
  474. vma->vm_pgoff = 0;
  475. _mmap_open(vma);
  476. return 0;
  477. }
  478. static int carveout_heap_map_km(struct heap *heap, struct buffer *buffer)
  479. {
  480. struct buffer_data *buffer_data = buffer->priv;
  481. if (buffer->kptr) {
  482. pr_warn("%s called for already mapped buffer %d\n",
  483. __func__, buffer->id);
  484. return 0;
  485. }
  486. if (heap->options.carveout.get_kptr)
  487. buffer->kptr = heap->options.carveout.get_kptr(
  488. buffer_data->addrs[0],
  489. buffer->actual_size,
  490. buffer_data->mattr);
  491. else if (heap->options.carveout.kptr)
  492. buffer->kptr = heap->options.carveout.kptr +
  493. (buffer_data->addrs[0] - heap->options.carveout.phys);
  494. else
  495. return -ENOMEM;
  496. if (!buffer->kptr)
  497. return -ENOMEM;
  498. pr_debug("%s:%d buffer %d (0x%p) kptr 0x%p size:%zu\n", __func__, __LINE__,
  499. buffer->id, buffer, buffer->kptr, buffer->actual_size);
  500. return 0;
  501. }
  502. static int carveout_heap_unmap_km(struct heap *heap, struct buffer *buffer)
  503. {
  504. pr_debug("%s:%d buffer %d (0x%p) kptr 0x%p\n", __func__, __LINE__,
  505. buffer->id, buffer, buffer->kptr);
  506. if (!buffer->kptr) {
  507. pr_warn("%s called for unmapped buffer %d\n",
  508. __func__, buffer->id);
  509. return 0;
  510. }
  511. if (heap->options.carveout.put_kptr)
  512. heap->options.carveout.put_kptr(buffer->kptr);
  513. buffer->kptr = NULL;
  514. return 0;
  515. }
  516. static int carveout_heap_get_page_array(struct heap *heap,
  517. struct buffer *buffer,
  518. uint64_t **addrs)
  519. {
  520. struct buffer_data *buffer_data = buffer->priv;
  521. *addrs = buffer_data->addrs;
  522. return 0;
  523. }
  524. static int carveout_set_offset(struct heap *heap, size_t offs)
  525. {
  526. if (heap->options.carveout.offs > heap->options.carveout.size) {
  527. pr_err("%s offset exceeds size!\n", __func__);
  528. return -EINVAL;
  529. }
  530. heap->options.carveout.offs = offs;
  531. return 0;
  532. }
  533. static void carveout_cache_update(struct vm_area_struct *vma)
  534. {
  535. if (!vma)
  536. return;
  537. pr_debug("%s vma start:%lx end:%lx\n",
  538. __func__, vma->vm_start, vma->vm_end);
  539. #if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64)
  540. /* This function is not exported for modules by ARM kernel */
  541. flush_cache_range(vma, vma->vm_start, vma->vm_end);
  542. #else
  543. /* Tentative for the SFF, this function is exported by the kernel... */
  544. /* vivt_flush_cache_range(vma, vma->vm_start, vma->vm_end); */
  545. #endif
  546. }
  547. static void carveout_sync_cpu_to_dev(struct heap *heap, struct buffer *buffer)
  548. {
  549. struct buffer_data *buffer_data = buffer->priv;
  550. if (!(buffer_data->mattr & IMG_MEM_ATTR_UNCACHED))
  551. carveout_cache_update(buffer_data->mapped_vma);
  552. }
  553. static void carveout_sync_dev_to_cpu(struct heap *heap, struct buffer *buffer)
  554. {
  555. struct buffer_data *buffer_data = buffer->priv;
  556. if (!(buffer_data->mattr & IMG_MEM_ATTR_UNCACHED))
  557. carveout_cache_update(buffer_data->mapped_vma);
  558. }
  559. static void carveout_heap_destroy(struct heap *heap)
  560. {
  561. struct heap_data *heap_data = heap->priv;
  562. pr_debug("%s:%d\n", __func__, __LINE__);
  563. gen_pool_destroy(heap_data->pool);
  564. kfree(heap_data);
  565. }
  566. static struct heap_ops carveout_heap_ops = {
  567. .export = carveout_heap_export,
  568. .alloc = carveout_heap_alloc,
  569. .import = NULL,
  570. .free = carveout_heap_free,
  571. .map_um = carveout_heap_map_um,
  572. .unmap_um = NULL,
  573. .map_km = carveout_heap_map_km,
  574. .unmap_km = carveout_heap_unmap_km,
  575. .get_sg_table = NULL,
  576. .get_page_array = carveout_heap_get_page_array,
  577. .sync_cpu_to_dev = carveout_sync_cpu_to_dev,
  578. .sync_dev_to_cpu = carveout_sync_dev_to_cpu,
  579. .set_offset = carveout_set_offset,
  580. .destroy = carveout_heap_destroy,
  581. };
  582. int img_mem_carveout_init(const struct heap_config *config, struct heap *heap)
  583. {
  584. struct heap_data *heap_data;
  585. unsigned long virt_start;
  586. int ret;
  587. int pool_order = POOL_ALLOC_ORDER_BASE +
  588. heap->options.carveout.pool_order;
  589. if (heap->options.carveout.offs > heap->options.carveout.size) {
  590. pr_err("%s offset exceeds size!\n", __func__);
  591. return -EINVAL;
  592. }
  593. pr_debug("%s phys base:%#llx kptr %p (offs:%llx) size:%zu order:%d\n", __func__,
  594. (unsigned long long)heap->options.carveout.phys,
  595. heap->options.carveout.kptr,
  596. (unsigned long long)heap->options.carveout.offs,
  597. heap->options.carveout.size,
  598. pool_order);
  599. if (config->options.carveout.kptr &&
  600. (heap->options.carveout.put_kptr || heap->options.carveout.get_kptr)) {
  601. pr_err("%s can't use static & dynamic kernel mapping at the same time!\n",
  602. __func__);
  603. return -EINVAL;
  604. }
  605. if (!config->options.carveout.kptr &&
  606. !(heap->options.carveout.put_kptr && heap->options.carveout.get_kptr)) {
  607. pr_warn("%s no kernel mapping method available!\n",
  608. __func__);
  609. return -EINVAL;
  610. }
  611. if (heap->options.carveout.phys & ((1<<pool_order)-1)) {
  612. pr_err("%s phys addr (%#llx) is not aligned to allocation order!\n",
  613. __func__, (unsigned long long)heap->options.carveout.phys);
  614. return -EINVAL;
  615. }
  616. if (heap->options.carveout.size == 0) {
  617. pr_err("%s size cannot be zero!\n", __func__);
  618. return -EINVAL;
  619. }
  620. heap_data = kmalloc(sizeof(struct heap_data), GFP_KERNEL);
  621. if (!heap_data)
  622. return -ENOMEM;
  623. heap_data->pool = gen_pool_create(pool_order, -1);
  624. if (!heap_data->pool) {
  625. pr_err("%s gen_pool_create failed\n", __func__);
  626. ret = -ENOMEM;
  627. goto pool_create_failed;
  628. }
  629. /* Operating in no offset mode -> virtual == phys
  630. * However when physical address == 0 (unlikely) we need to distinguish
  631. * if address returned from gen_pool_alloc is an error or valid address,
  632. * so add a const offset.
  633. */
  634. virt_start = (unsigned long)heap->options.carveout.phys;
  635. if (!virt_start)
  636. virt_start = 1<<pool_order;
  637. ret = gen_pool_add_virt(heap_data->pool, virt_start,
  638. heap->options.carveout.phys,
  639. heap->options.carveout.size,
  640. -1);
  641. if (ret) {
  642. pr_err("%s gen_pool_add_virt failed\n", __func__);
  643. goto pool_add_failed;
  644. }
  645. heap->ops = &carveout_heap_ops;
  646. heap->priv = heap_data;
  647. return 0;
  648. pool_add_failed:
  649. gen_pool_destroy(heap_data->pool);
  650. pool_create_failed:
  651. kfree(heap_data);
  652. return ret;
  653. }
  654. /*
  655. * coding style for emacs
  656. *
  657. * Local variables:
  658. * indent-tabs-mode: t
  659. * tab-width: 8
  660. * c-basic-offset: 8
  661. * End:
  662. */