lima_gem.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419
  1. // SPDX-License-Identifier: GPL-2.0 OR MIT
  2. /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
  3. #include <linux/mm.h>
  4. #include <linux/sync_file.h>
  5. #include <linux/pagemap.h>
  6. #include <linux/shmem_fs.h>
  7. #include <linux/dma-mapping.h>
  8. #include <drm/drm_file.h>
  9. #include <drm/drm_syncobj.h>
  10. #include <drm/drm_utils.h>
  11. #include <drm/lima_drm.h>
  12. #include "lima_drv.h"
  13. #include "lima_gem.h"
  14. #include "lima_vm.h"
  15. int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
  16. {
  17. struct page **pages;
  18. struct address_space *mapping = bo->base.base.filp->f_mapping;
  19. struct device *dev = bo->base.base.dev->dev;
  20. size_t old_size = bo->heap_size;
  21. size_t new_size = bo->heap_size ? bo->heap_size * 2 :
  22. (lima_heap_init_nr_pages << PAGE_SHIFT);
  23. struct sg_table sgt;
  24. int i, ret;
  25. if (bo->heap_size >= bo->base.base.size)
  26. return -ENOSPC;
  27. new_size = min(new_size, bo->base.base.size);
  28. mutex_lock(&bo->base.pages_lock);
  29. if (bo->base.pages) {
  30. pages = bo->base.pages;
  31. } else {
  32. pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
  33. sizeof(*pages), GFP_KERNEL | __GFP_ZERO);
  34. if (!pages) {
  35. mutex_unlock(&bo->base.pages_lock);
  36. return -ENOMEM;
  37. }
  38. bo->base.pages = pages;
  39. bo->base.pages_use_count = 1;
  40. mapping_set_unevictable(mapping);
  41. }
  42. for (i = old_size >> PAGE_SHIFT; i < new_size >> PAGE_SHIFT; i++) {
  43. struct page *page = shmem_read_mapping_page(mapping, i);
  44. if (IS_ERR(page)) {
  45. mutex_unlock(&bo->base.pages_lock);
  46. return PTR_ERR(page);
  47. }
  48. pages[i] = page;
  49. }
  50. mutex_unlock(&bo->base.pages_lock);
  51. ret = sg_alloc_table_from_pages(&sgt, pages, i, 0,
  52. new_size, GFP_KERNEL);
  53. if (ret)
  54. return ret;
  55. if (bo->base.sgt) {
  56. dma_unmap_sgtable(dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
  57. sg_free_table(bo->base.sgt);
  58. } else {
  59. bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL);
  60. if (!bo->base.sgt) {
  61. sg_free_table(&sgt);
  62. return -ENOMEM;
  63. }
  64. }
  65. ret = dma_map_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0);
  66. if (ret) {
  67. sg_free_table(&sgt);
  68. kfree(bo->base.sgt);
  69. bo->base.sgt = NULL;
  70. return ret;
  71. }
  72. *bo->base.sgt = sgt;
  73. if (vm) {
  74. ret = lima_vm_map_bo(vm, bo, old_size >> PAGE_SHIFT);
  75. if (ret)
  76. return ret;
  77. }
  78. bo->heap_size = new_size;
  79. return 0;
  80. }
  81. int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
  82. u32 size, u32 flags, u32 *handle)
  83. {
  84. int err;
  85. gfp_t mask;
  86. struct drm_gem_shmem_object *shmem;
  87. struct drm_gem_object *obj;
  88. struct lima_bo *bo;
  89. bool is_heap = flags & LIMA_BO_FLAG_HEAP;
  90. shmem = drm_gem_shmem_create(dev, size);
  91. if (IS_ERR(shmem))
  92. return PTR_ERR(shmem);
  93. obj = &shmem->base;
  94. /* Mali Utgard GPU can only support 32bit address space */
  95. mask = mapping_gfp_mask(obj->filp->f_mapping);
  96. mask &= ~__GFP_HIGHMEM;
  97. mask |= __GFP_DMA32;
  98. mapping_set_gfp_mask(obj->filp->f_mapping, mask);
  99. if (is_heap) {
  100. bo = to_lima_bo(obj);
  101. err = lima_heap_alloc(bo, NULL);
  102. if (err)
  103. goto out;
  104. } else {
  105. struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(obj);
  106. if (IS_ERR(sgt)) {
  107. err = PTR_ERR(sgt);
  108. goto out;
  109. }
  110. }
  111. err = drm_gem_handle_create(file, obj, handle);
  112. out:
  113. /* drop reference from allocate - handle holds it now */
  114. drm_gem_object_put(obj);
  115. return err;
  116. }
  117. static void lima_gem_free_object(struct drm_gem_object *obj)
  118. {
  119. struct lima_bo *bo = to_lima_bo(obj);
  120. if (!list_empty(&bo->va))
  121. dev_err(obj->dev->dev, "lima gem free bo still has va\n");
  122. drm_gem_shmem_free_object(obj);
  123. }
  124. static int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
  125. {
  126. struct lima_bo *bo = to_lima_bo(obj);
  127. struct lima_drm_priv *priv = to_lima_drm_priv(file);
  128. struct lima_vm *vm = priv->vm;
  129. return lima_vm_bo_add(vm, bo, true);
  130. }
  131. static void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
  132. {
  133. struct lima_bo *bo = to_lima_bo(obj);
  134. struct lima_drm_priv *priv = to_lima_drm_priv(file);
  135. struct lima_vm *vm = priv->vm;
  136. lima_vm_bo_del(vm, bo);
  137. }
  138. static int lima_gem_pin(struct drm_gem_object *obj)
  139. {
  140. struct lima_bo *bo = to_lima_bo(obj);
  141. if (bo->heap_size)
  142. return -EINVAL;
  143. return drm_gem_shmem_pin(obj);
  144. }
  145. static void *lima_gem_vmap(struct drm_gem_object *obj)
  146. {
  147. struct lima_bo *bo = to_lima_bo(obj);
  148. if (bo->heap_size)
  149. return ERR_PTR(-EINVAL);
  150. return drm_gem_shmem_vmap(obj);
  151. }
  152. static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
  153. {
  154. struct lima_bo *bo = to_lima_bo(obj);
  155. if (bo->heap_size)
  156. return -EINVAL;
  157. return drm_gem_shmem_mmap(obj, vma);
  158. }
  159. static const struct drm_gem_object_funcs lima_gem_funcs = {
  160. .free = lima_gem_free_object,
  161. .open = lima_gem_object_open,
  162. .close = lima_gem_object_close,
  163. .print_info = drm_gem_shmem_print_info,
  164. .pin = lima_gem_pin,
  165. .unpin = drm_gem_shmem_unpin,
  166. .get_sg_table = drm_gem_shmem_get_sg_table,
  167. .vmap = lima_gem_vmap,
  168. .vunmap = drm_gem_shmem_vunmap,
  169. .mmap = lima_gem_mmap,
  170. };
  171. struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size)
  172. {
  173. struct lima_bo *bo;
  174. bo = kzalloc(sizeof(*bo), GFP_KERNEL);
  175. if (!bo)
  176. return NULL;
  177. mutex_init(&bo->lock);
  178. INIT_LIST_HEAD(&bo->va);
  179. bo->base.base.funcs = &lima_gem_funcs;
  180. return &bo->base.base;
  181. }
  182. int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
  183. {
  184. struct drm_gem_object *obj;
  185. struct lima_bo *bo;
  186. struct lima_drm_priv *priv = to_lima_drm_priv(file);
  187. struct lima_vm *vm = priv->vm;
  188. obj = drm_gem_object_lookup(file, handle);
  189. if (!obj)
  190. return -ENOENT;
  191. bo = to_lima_bo(obj);
  192. *va = lima_vm_get_va(vm, bo);
  193. *offset = drm_vma_node_offset_addr(&obj->vma_node);
  194. drm_gem_object_put(obj);
  195. return 0;
  196. }
  197. static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
  198. bool write, bool explicit)
  199. {
  200. int err = 0;
  201. if (!write) {
  202. err = dma_resv_reserve_shared(lima_bo_resv(bo), 1);
  203. if (err)
  204. return err;
  205. }
  206. /* explicit sync use user passed dep fence */
  207. if (explicit)
  208. return 0;
  209. return drm_gem_fence_array_add_implicit(&task->deps, &bo->base.base, write);
  210. }
  211. static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
  212. {
  213. int i, err;
  214. for (i = 0; i < ARRAY_SIZE(submit->in_sync); i++) {
  215. struct dma_fence *fence = NULL;
  216. if (!submit->in_sync[i])
  217. continue;
  218. err = drm_syncobj_find_fence(file, submit->in_sync[i],
  219. 0, 0, &fence);
  220. if (err)
  221. return err;
  222. err = drm_gem_fence_array_add(&submit->task->deps, fence);
  223. if (err) {
  224. dma_fence_put(fence);
  225. return err;
  226. }
  227. }
  228. return 0;
  229. }
  230. int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
  231. {
  232. int i, err = 0;
  233. struct ww_acquire_ctx ctx;
  234. struct lima_drm_priv *priv = to_lima_drm_priv(file);
  235. struct lima_vm *vm = priv->vm;
  236. struct drm_syncobj *out_sync = NULL;
  237. struct dma_fence *fence;
  238. struct lima_bo **bos = submit->lbos;
  239. if (submit->out_sync) {
  240. out_sync = drm_syncobj_find(file, submit->out_sync);
  241. if (!out_sync)
  242. return -ENOENT;
  243. }
  244. for (i = 0; i < submit->nr_bos; i++) {
  245. struct drm_gem_object *obj;
  246. struct lima_bo *bo;
  247. obj = drm_gem_object_lookup(file, submit->bos[i].handle);
  248. if (!obj) {
  249. err = -ENOENT;
  250. goto err_out0;
  251. }
  252. bo = to_lima_bo(obj);
  253. /* increase refcnt of gpu va map to prevent unmapped when executing,
  254. * will be decreased when task done
  255. */
  256. err = lima_vm_bo_add(vm, bo, false);
  257. if (err) {
  258. drm_gem_object_put(obj);
  259. goto err_out0;
  260. }
  261. bos[i] = bo;
  262. }
  263. err = drm_gem_lock_reservations((struct drm_gem_object **)bos,
  264. submit->nr_bos, &ctx);
  265. if (err)
  266. goto err_out0;
  267. err = lima_sched_task_init(
  268. submit->task, submit->ctx->context + submit->pipe,
  269. bos, submit->nr_bos, vm);
  270. if (err)
  271. goto err_out1;
  272. err = lima_gem_add_deps(file, submit);
  273. if (err)
  274. goto err_out2;
  275. for (i = 0; i < submit->nr_bos; i++) {
  276. err = lima_gem_sync_bo(
  277. submit->task, bos[i],
  278. submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE,
  279. submit->flags & LIMA_SUBMIT_FLAG_EXPLICIT_FENCE);
  280. if (err)
  281. goto err_out2;
  282. }
  283. fence = lima_sched_context_queue_task(
  284. submit->ctx->context + submit->pipe, submit->task);
  285. for (i = 0; i < submit->nr_bos; i++) {
  286. if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
  287. dma_resv_add_excl_fence(lima_bo_resv(bos[i]), fence);
  288. else
  289. dma_resv_add_shared_fence(lima_bo_resv(bos[i]), fence);
  290. }
  291. drm_gem_unlock_reservations((struct drm_gem_object **)bos,
  292. submit->nr_bos, &ctx);
  293. for (i = 0; i < submit->nr_bos; i++)
  294. drm_gem_object_put(&bos[i]->base.base);
  295. if (out_sync) {
  296. drm_syncobj_replace_fence(out_sync, fence);
  297. drm_syncobj_put(out_sync);
  298. }
  299. dma_fence_put(fence);
  300. return 0;
  301. err_out2:
  302. lima_sched_task_fini(submit->task);
  303. err_out1:
  304. drm_gem_unlock_reservations((struct drm_gem_object **)bos,
  305. submit->nr_bos, &ctx);
  306. err_out0:
  307. for (i = 0; i < submit->nr_bos; i++) {
  308. if (!bos[i])
  309. break;
  310. lima_vm_bo_del(vm, bos[i]);
  311. drm_gem_object_put(&bos[i]->base.base);
  312. }
  313. if (out_sync)
  314. drm_syncobj_put(out_sync);
  315. return err;
  316. }
  317. int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns)
  318. {
  319. bool write = op & LIMA_GEM_WAIT_WRITE;
  320. long ret, timeout;
  321. if (!op)
  322. return 0;
  323. timeout = drm_timeout_abs_to_jiffies(timeout_ns);
  324. ret = drm_gem_dma_resv_wait(file, handle, write, timeout);
  325. if (ret == -ETIME)
  326. ret = timeout ? -ETIMEDOUT : -EBUSY;
  327. return ret;
  328. }