etnaviv_gem_submit.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2015 Etnaviv Project
  4. */
  5. #include <drm/drm_file.h>
  6. #include <linux/dma-fence-array.h>
  7. #include <linux/file.h>
  8. #include <linux/pm_runtime.h>
  9. #include <linux/dma-resv.h>
  10. #include <linux/sync_file.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/vmalloc.h>
  13. #include "etnaviv_cmdbuf.h"
  14. #include "etnaviv_drv.h"
  15. #include "etnaviv_gpu.h"
  16. #include "etnaviv_gem.h"
  17. #include "etnaviv_perfmon.h"
  18. #include "etnaviv_sched.h"
  19. /*
  20. * Cmdstream submission:
  21. */
  22. #define BO_INVALID_FLAGS ~(ETNA_SUBMIT_BO_READ | ETNA_SUBMIT_BO_WRITE)
  23. /* make sure these don't conflict w/ ETNAVIV_SUBMIT_BO_x */
  24. #define BO_LOCKED 0x4000
  25. #define BO_PINNED 0x2000
  26. static struct etnaviv_gem_submit *submit_create(struct drm_device *dev,
  27. struct etnaviv_gpu *gpu, size_t nr_bos, size_t nr_pmrs)
  28. {
  29. struct etnaviv_gem_submit *submit;
  30. size_t sz = size_vstruct(nr_bos, sizeof(submit->bos[0]), sizeof(*submit));
  31. submit = kzalloc(sz, GFP_KERNEL);
  32. if (!submit)
  33. return NULL;
  34. submit->pmrs = kcalloc(nr_pmrs, sizeof(struct etnaviv_perfmon_request),
  35. GFP_KERNEL);
  36. if (!submit->pmrs) {
  37. kfree(submit);
  38. return NULL;
  39. }
  40. submit->nr_pmrs = nr_pmrs;
  41. submit->gpu = gpu;
  42. kref_init(&submit->refcount);
  43. return submit;
  44. }
  45. static int submit_lookup_objects(struct etnaviv_gem_submit *submit,
  46. struct drm_file *file, struct drm_etnaviv_gem_submit_bo *submit_bos,
  47. unsigned nr_bos)
  48. {
  49. struct drm_etnaviv_gem_submit_bo *bo;
  50. unsigned i;
  51. int ret = 0;
  52. spin_lock(&file->table_lock);
  53. for (i = 0, bo = submit_bos; i < nr_bos; i++, bo++) {
  54. struct drm_gem_object *obj;
  55. if (bo->flags & BO_INVALID_FLAGS) {
  56. DRM_ERROR("invalid flags: %x\n", bo->flags);
  57. ret = -EINVAL;
  58. goto out_unlock;
  59. }
  60. submit->bos[i].flags = bo->flags;
  61. if (submit->flags & ETNA_SUBMIT_SOFTPIN) {
  62. if (bo->presumed < ETNAVIV_SOFTPIN_START_ADDRESS) {
  63. DRM_ERROR("invalid softpin address\n");
  64. ret = -EINVAL;
  65. goto out_unlock;
  66. }
  67. submit->bos[i].va = bo->presumed;
  68. }
  69. /* normally use drm_gem_object_lookup(), but for bulk lookup
  70. * all under single table_lock just hit object_idr directly:
  71. */
  72. obj = idr_find(&file->object_idr, bo->handle);
  73. if (!obj) {
  74. DRM_ERROR("invalid handle %u at index %u\n",
  75. bo->handle, i);
  76. ret = -EINVAL;
  77. goto out_unlock;
  78. }
  79. /*
  80. * Take a refcount on the object. The file table lock
  81. * prevents the object_idr's refcount on this being dropped.
  82. */
  83. drm_gem_object_get(obj);
  84. submit->bos[i].obj = to_etnaviv_bo(obj);
  85. }
  86. out_unlock:
  87. submit->nr_bos = i;
  88. spin_unlock(&file->table_lock);
  89. return ret;
  90. }
  91. static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i)
  92. {
  93. if (submit->bos[i].flags & BO_LOCKED) {
  94. struct drm_gem_object *obj = &submit->bos[i].obj->base;
  95. dma_resv_unlock(obj->resv);
  96. submit->bos[i].flags &= ~BO_LOCKED;
  97. }
  98. }
  99. static int submit_lock_objects(struct etnaviv_gem_submit *submit,
  100. struct ww_acquire_ctx *ticket)
  101. {
  102. int contended, slow_locked = -1, i, ret = 0;
  103. retry:
  104. for (i = 0; i < submit->nr_bos; i++) {
  105. struct drm_gem_object *obj = &submit->bos[i].obj->base;
  106. if (slow_locked == i)
  107. slow_locked = -1;
  108. contended = i;
  109. if (!(submit->bos[i].flags & BO_LOCKED)) {
  110. ret = dma_resv_lock_interruptible(obj->resv, ticket);
  111. if (ret == -EALREADY)
  112. DRM_ERROR("BO at index %u already on submit list\n",
  113. i);
  114. if (ret)
  115. goto fail;
  116. submit->bos[i].flags |= BO_LOCKED;
  117. }
  118. }
  119. ww_acquire_done(ticket);
  120. return 0;
  121. fail:
  122. for (; i >= 0; i--)
  123. submit_unlock_object(submit, i);
  124. if (slow_locked > 0)
  125. submit_unlock_object(submit, slow_locked);
  126. if (ret == -EDEADLK) {
  127. struct drm_gem_object *obj;
  128. obj = &submit->bos[contended].obj->base;
  129. /* we lost out in a seqno race, lock and retry.. */
  130. ret = dma_resv_lock_slow_interruptible(obj->resv, ticket);
  131. if (!ret) {
  132. submit->bos[contended].flags |= BO_LOCKED;
  133. slow_locked = contended;
  134. goto retry;
  135. }
  136. }
  137. return ret;
  138. }
  139. static int submit_fence_sync(struct etnaviv_gem_submit *submit)
  140. {
  141. int i, ret = 0;
  142. for (i = 0; i < submit->nr_bos; i++) {
  143. struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
  144. struct dma_resv *robj = bo->obj->base.resv;
  145. if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) {
  146. ret = dma_resv_reserve_shared(robj, 1);
  147. if (ret)
  148. return ret;
  149. }
  150. if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
  151. continue;
  152. if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
  153. ret = dma_resv_get_fences_rcu(robj, &bo->excl,
  154. &bo->nr_shared,
  155. &bo->shared);
  156. if (ret)
  157. return ret;
  158. } else {
  159. bo->excl = dma_resv_get_excl_rcu(robj);
  160. }
  161. }
  162. return ret;
  163. }
  164. static void submit_attach_object_fences(struct etnaviv_gem_submit *submit)
  165. {
  166. int i;
  167. for (i = 0; i < submit->nr_bos; i++) {
  168. struct drm_gem_object *obj = &submit->bos[i].obj->base;
  169. if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
  170. dma_resv_add_excl_fence(obj->resv,
  171. submit->out_fence);
  172. else
  173. dma_resv_add_shared_fence(obj->resv,
  174. submit->out_fence);
  175. submit_unlock_object(submit, i);
  176. }
  177. }
  178. static int submit_pin_objects(struct etnaviv_gem_submit *submit)
  179. {
  180. int i, ret = 0;
  181. for (i = 0; i < submit->nr_bos; i++) {
  182. struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
  183. struct etnaviv_vram_mapping *mapping;
  184. mapping = etnaviv_gem_mapping_get(&etnaviv_obj->base,
  185. submit->mmu_context,
  186. submit->bos[i].va);
  187. if (IS_ERR(mapping)) {
  188. ret = PTR_ERR(mapping);
  189. break;
  190. }
  191. if ((submit->flags & ETNA_SUBMIT_SOFTPIN) &&
  192. submit->bos[i].va != mapping->iova) {
  193. etnaviv_gem_mapping_unreference(mapping);
  194. return -EINVAL;
  195. }
  196. atomic_inc(&etnaviv_obj->gpu_active);
  197. submit->bos[i].flags |= BO_PINNED;
  198. submit->bos[i].mapping = mapping;
  199. }
  200. return ret;
  201. }
  202. static int submit_bo(struct etnaviv_gem_submit *submit, u32 idx,
  203. struct etnaviv_gem_submit_bo **bo)
  204. {
  205. if (idx >= submit->nr_bos) {
  206. DRM_ERROR("invalid buffer index: %u (out of %u)\n",
  207. idx, submit->nr_bos);
  208. return -EINVAL;
  209. }
  210. *bo = &submit->bos[idx];
  211. return 0;
  212. }
  213. /* process the reloc's and patch up the cmdstream as needed: */
  214. static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
  215. u32 size, const struct drm_etnaviv_gem_submit_reloc *relocs,
  216. u32 nr_relocs)
  217. {
  218. u32 i, last_offset = 0;
  219. u32 *ptr = stream;
  220. int ret;
  221. /* Submits using softpin don't blend with relocs */
  222. if ((submit->flags & ETNA_SUBMIT_SOFTPIN) && nr_relocs != 0)
  223. return -EINVAL;
  224. for (i = 0; i < nr_relocs; i++) {
  225. const struct drm_etnaviv_gem_submit_reloc *r = relocs + i;
  226. struct etnaviv_gem_submit_bo *bo;
  227. u32 off;
  228. if (unlikely(r->flags)) {
  229. DRM_ERROR("invalid reloc flags\n");
  230. return -EINVAL;
  231. }
  232. if (r->submit_offset % 4) {
  233. DRM_ERROR("non-aligned reloc offset: %u\n",
  234. r->submit_offset);
  235. return -EINVAL;
  236. }
  237. /* offset in dwords: */
  238. off = r->submit_offset / 4;
  239. if ((off >= size ) ||
  240. (off < last_offset)) {
  241. DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
  242. return -EINVAL;
  243. }
  244. ret = submit_bo(submit, r->reloc_idx, &bo);
  245. if (ret)
  246. return ret;
  247. if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) {
  248. DRM_ERROR("relocation %u outside object\n", i);
  249. return -EINVAL;
  250. }
  251. ptr[off] = bo->mapping->iova + r->reloc_offset;
  252. last_offset = off;
  253. }
  254. return 0;
  255. }
  256. static int submit_perfmon_validate(struct etnaviv_gem_submit *submit,
  257. u32 exec_state, const struct drm_etnaviv_gem_submit_pmr *pmrs)
  258. {
  259. u32 i;
  260. for (i = 0; i < submit->nr_pmrs; i++) {
  261. const struct drm_etnaviv_gem_submit_pmr *r = pmrs + i;
  262. struct etnaviv_gem_submit_bo *bo;
  263. int ret;
  264. ret = submit_bo(submit, r->read_idx, &bo);
  265. if (ret)
  266. return ret;
  267. /* at offset 0 a sequence number gets stored used for userspace sync */
  268. if (r->read_offset == 0) {
  269. DRM_ERROR("perfmon request: offset is 0");
  270. return -EINVAL;
  271. }
  272. if (r->read_offset >= bo->obj->base.size - sizeof(u32)) {
  273. DRM_ERROR("perfmon request: offset %u outside object", i);
  274. return -EINVAL;
  275. }
  276. if (r->flags & ~(ETNA_PM_PROCESS_PRE | ETNA_PM_PROCESS_POST)) {
  277. DRM_ERROR("perfmon request: flags are not valid");
  278. return -EINVAL;
  279. }
  280. if (etnaviv_pm_req_validate(r, exec_state)) {
  281. DRM_ERROR("perfmon request: domain or signal not valid");
  282. return -EINVAL;
  283. }
  284. submit->pmrs[i].flags = r->flags;
  285. submit->pmrs[i].domain = r->domain;
  286. submit->pmrs[i].signal = r->signal;
  287. submit->pmrs[i].sequence = r->sequence;
  288. submit->pmrs[i].offset = r->read_offset;
  289. submit->pmrs[i].bo_vma = etnaviv_gem_vmap(&bo->obj->base);
  290. }
  291. return 0;
  292. }
  293. static void submit_cleanup(struct kref *kref)
  294. {
  295. struct etnaviv_gem_submit *submit =
  296. container_of(kref, struct etnaviv_gem_submit, refcount);
  297. unsigned i;
  298. if (submit->runtime_resumed)
  299. pm_runtime_put_autosuspend(submit->gpu->dev);
  300. if (submit->cmdbuf.suballoc)
  301. etnaviv_cmdbuf_free(&submit->cmdbuf);
  302. if (submit->mmu_context)
  303. etnaviv_iommu_context_put(submit->mmu_context);
  304. if (submit->prev_mmu_context)
  305. etnaviv_iommu_context_put(submit->prev_mmu_context);
  306. for (i = 0; i < submit->nr_bos; i++) {
  307. struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
  308. /* unpin all objects */
  309. if (submit->bos[i].flags & BO_PINNED) {
  310. etnaviv_gem_mapping_unreference(submit->bos[i].mapping);
  311. atomic_dec(&etnaviv_obj->gpu_active);
  312. submit->bos[i].mapping = NULL;
  313. submit->bos[i].flags &= ~BO_PINNED;
  314. }
  315. /* if the GPU submit failed, objects might still be locked */
  316. submit_unlock_object(submit, i);
  317. drm_gem_object_put(&etnaviv_obj->base);
  318. }
  319. wake_up_all(&submit->gpu->fence_event);
  320. if (submit->in_fence)
  321. dma_fence_put(submit->in_fence);
  322. if (submit->out_fence) {
  323. /* first remove from IDR, so fence can not be found anymore */
  324. mutex_lock(&submit->gpu->fence_lock);
  325. idr_remove(&submit->gpu->fence_idr, submit->out_fence_id);
  326. mutex_unlock(&submit->gpu->fence_lock);
  327. dma_fence_put(submit->out_fence);
  328. }
  329. kfree(submit->pmrs);
  330. kfree(submit);
  331. }
  332. void etnaviv_submit_put(struct etnaviv_gem_submit *submit)
  333. {
  334. kref_put(&submit->refcount, submit_cleanup);
  335. }
  336. int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
  337. struct drm_file *file)
  338. {
  339. struct etnaviv_file_private *ctx = file->driver_priv;
  340. struct etnaviv_drm_private *priv = dev->dev_private;
  341. struct drm_etnaviv_gem_submit *args = data;
  342. struct drm_etnaviv_gem_submit_reloc *relocs;
  343. struct drm_etnaviv_gem_submit_pmr *pmrs;
  344. struct drm_etnaviv_gem_submit_bo *bos;
  345. struct etnaviv_gem_submit *submit;
  346. struct etnaviv_gpu *gpu;
  347. struct sync_file *sync_file = NULL;
  348. struct ww_acquire_ctx ticket;
  349. int out_fence_fd = -1;
  350. void *stream;
  351. int ret;
  352. if (args->pipe >= ETNA_MAX_PIPES)
  353. return -EINVAL;
  354. gpu = priv->gpu[args->pipe];
  355. if (!gpu)
  356. return -ENXIO;
  357. if (args->stream_size % 4) {
  358. DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
  359. args->stream_size);
  360. return -EINVAL;
  361. }
  362. if (args->exec_state != ETNA_PIPE_3D &&
  363. args->exec_state != ETNA_PIPE_2D &&
  364. args->exec_state != ETNA_PIPE_VG) {
  365. DRM_ERROR("invalid exec_state: 0x%x\n", args->exec_state);
  366. return -EINVAL;
  367. }
  368. if (args->flags & ~ETNA_SUBMIT_FLAGS) {
  369. DRM_ERROR("invalid flags: 0x%x\n", args->flags);
  370. return -EINVAL;
  371. }
  372. if ((args->flags & ETNA_SUBMIT_SOFTPIN) &&
  373. priv->mmu_global->version != ETNAVIV_IOMMU_V2) {
  374. DRM_ERROR("softpin requested on incompatible MMU\n");
  375. return -EINVAL;
  376. }
  377. if (args->stream_size > SZ_128K || args->nr_relocs > SZ_128K ||
  378. args->nr_bos > SZ_128K || args->nr_pmrs > 128) {
  379. DRM_ERROR("submit arguments out of size limits\n");
  380. return -EINVAL;
  381. }
  382. /*
  383. * Copy the command submission and bo array to kernel space in
  384. * one go, and do this outside of any locks.
  385. */
  386. bos = kvmalloc_array(args->nr_bos, sizeof(*bos), GFP_KERNEL);
  387. relocs = kvmalloc_array(args->nr_relocs, sizeof(*relocs), GFP_KERNEL);
  388. pmrs = kvmalloc_array(args->nr_pmrs, sizeof(*pmrs), GFP_KERNEL);
  389. stream = kvmalloc_array(1, args->stream_size, GFP_KERNEL);
  390. if (!bos || !relocs || !pmrs || !stream) {
  391. ret = -ENOMEM;
  392. goto err_submit_cmds;
  393. }
  394. ret = copy_from_user(bos, u64_to_user_ptr(args->bos),
  395. args->nr_bos * sizeof(*bos));
  396. if (ret) {
  397. ret = -EFAULT;
  398. goto err_submit_cmds;
  399. }
  400. ret = copy_from_user(relocs, u64_to_user_ptr(args->relocs),
  401. args->nr_relocs * sizeof(*relocs));
  402. if (ret) {
  403. ret = -EFAULT;
  404. goto err_submit_cmds;
  405. }
  406. ret = copy_from_user(pmrs, u64_to_user_ptr(args->pmrs),
  407. args->nr_pmrs * sizeof(*pmrs));
  408. if (ret) {
  409. ret = -EFAULT;
  410. goto err_submit_cmds;
  411. }
  412. ret = copy_from_user(stream, u64_to_user_ptr(args->stream),
  413. args->stream_size);
  414. if (ret) {
  415. ret = -EFAULT;
  416. goto err_submit_cmds;
  417. }
  418. if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
  419. out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
  420. if (out_fence_fd < 0) {
  421. ret = out_fence_fd;
  422. goto err_submit_cmds;
  423. }
  424. }
  425. ww_acquire_init(&ticket, &reservation_ww_class);
  426. submit = submit_create(dev, gpu, args->nr_bos, args->nr_pmrs);
  427. if (!submit) {
  428. ret = -ENOMEM;
  429. goto err_submit_ww_acquire;
  430. }
  431. ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &submit->cmdbuf,
  432. ALIGN(args->stream_size, 8) + 8);
  433. if (ret)
  434. goto err_submit_objects;
  435. submit->ctx = file->driver_priv;
  436. submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
  437. submit->exec_state = args->exec_state;
  438. submit->flags = args->flags;
  439. ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
  440. if (ret)
  441. goto err_submit_objects;
  442. if ((priv->mmu_global->version != ETNAVIV_IOMMU_V2) &&
  443. !etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4,
  444. relocs, args->nr_relocs)) {
  445. ret = -EINVAL;
  446. goto err_submit_objects;
  447. }
  448. if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) {
  449. submit->in_fence = sync_file_get_fence(args->fence_fd);
  450. if (!submit->in_fence) {
  451. ret = -EINVAL;
  452. goto err_submit_objects;
  453. }
  454. }
  455. ret = submit_pin_objects(submit);
  456. if (ret)
  457. goto err_submit_objects;
  458. ret = submit_reloc(submit, stream, args->stream_size / 4,
  459. relocs, args->nr_relocs);
  460. if (ret)
  461. goto err_submit_objects;
  462. ret = submit_perfmon_validate(submit, args->exec_state, pmrs);
  463. if (ret)
  464. goto err_submit_objects;
  465. memcpy(submit->cmdbuf.vaddr, stream, args->stream_size);
  466. ret = submit_lock_objects(submit, &ticket);
  467. if (ret)
  468. goto err_submit_objects;
  469. ret = submit_fence_sync(submit);
  470. if (ret)
  471. goto err_submit_objects;
  472. ret = etnaviv_sched_push_job(&ctx->sched_entity[args->pipe], submit);
  473. if (ret)
  474. goto err_submit_objects;
  475. submit_attach_object_fences(submit);
  476. if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
  477. /*
  478. * This can be improved: ideally we want to allocate the sync
  479. * file before kicking off the GPU job and just attach the
  480. * fence to the sync file here, eliminating the ENOMEM
  481. * possibility at this stage.
  482. */
  483. sync_file = sync_file_create(submit->out_fence);
  484. if (!sync_file) {
  485. ret = -ENOMEM;
  486. goto err_submit_objects;
  487. }
  488. fd_install(out_fence_fd, sync_file->file);
  489. }
  490. args->fence_fd = out_fence_fd;
  491. args->fence = submit->out_fence_id;
  492. err_submit_objects:
  493. etnaviv_submit_put(submit);
  494. err_submit_ww_acquire:
  495. ww_acquire_fini(&ticket);
  496. err_submit_cmds:
  497. if (ret && (out_fence_fd >= 0))
  498. put_unused_fd(out_fence_fd);
  499. if (stream)
  500. kvfree(stream);
  501. if (bos)
  502. kvfree(bos);
  503. if (relocs)
  504. kvfree(relocs);
  505. if (pmrs)
  506. kvfree(pmrs);
  507. return ret;
  508. }