drm_syncobj.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482
  1. /*
  2. * Copyright 2017 Red Hat
  3. * Parts ported from amdgpu (fence wait code).
  4. * Copyright 2016 Advanced Micro Devices, Inc.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice (including the next
  14. * paragraph) shall be included in all copies or substantial portions of the
  15. * Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  22. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  23. * IN THE SOFTWARE.
  24. *
  25. * Authors:
  26. *
  27. */
  28. /**
  29. * DOC: Overview
  30. *
  31. * DRM synchronisation objects (syncobj, see struct &drm_syncobj) provide a
  32. * container for a synchronization primitive which can be used by userspace
  33. * to explicitly synchronize GPU commands, can be shared between userspace
  34. * processes, and can be shared between different DRM drivers.
  35. * Their primary use-case is to implement Vulkan fences and semaphores.
  36. * The syncobj userspace API provides ioctls for several operations:
  37. *
  38. * - Creation and destruction of syncobjs
  39. * - Import and export of syncobjs to/from a syncobj file descriptor
  40. * - Import and export a syncobj's underlying fence to/from a sync file
  41. * - Reset a syncobj (set its fence to NULL)
  42. * - Signal a syncobj (set a trivially signaled fence)
  43. * - Wait for a syncobj's fence to appear and be signaled
  44. *
  45. * The syncobj userspace API also provides operations to manipulate a syncobj
  46. * in terms of a timeline of struct &dma_fence_chain rather than a single
  47. * struct &dma_fence, through the following operations:
  48. *
  49. * - Signal a given point on the timeline
  50. * - Wait for a given point to appear and/or be signaled
  51. * - Import and export from/to a given point of a timeline
  52. *
  53. * At it's core, a syncobj is simply a wrapper around a pointer to a struct
  54. * &dma_fence which may be NULL.
  55. * When a syncobj is first created, its pointer is either NULL or a pointer
  56. * to an already signaled fence depending on whether the
  57. * &DRM_SYNCOBJ_CREATE_SIGNALED flag is passed to
  58. * &DRM_IOCTL_SYNCOBJ_CREATE.
  59. *
  60. * If the syncobj is considered as a binary (its state is either signaled or
  61. * unsignaled) primitive, when GPU work is enqueued in a DRM driver to signal
  62. * the syncobj, the syncobj's fence is replaced with a fence which will be
  63. * signaled by the completion of that work.
  64. * If the syncobj is considered as a timeline primitive, when GPU work is
  65. * enqueued in a DRM driver to signal the a given point of the syncobj, a new
  66. * struct &dma_fence_chain pointing to the DRM driver's fence and also
  67. * pointing to the previous fence that was in the syncobj. The new struct
  68. * &dma_fence_chain fence replace the syncobj's fence and will be signaled by
  69. * completion of the DRM driver's work and also any work associated with the
  70. * fence previously in the syncobj.
  71. *
  72. * When GPU work which waits on a syncobj is enqueued in a DRM driver, at the
  73. * time the work is enqueued, it waits on the syncobj's fence before
  74. * submitting the work to hardware. That fence is either :
  75. *
  76. * - The syncobj's current fence if the syncobj is considered as a binary
  77. * primitive.
  78. * - The struct &dma_fence associated with a given point if the syncobj is
  79. * considered as a timeline primitive.
  80. *
  81. * If the syncobj's fence is NULL or not present in the syncobj's timeline,
  82. * the enqueue operation is expected to fail.
  83. *
  84. * With binary syncobj, all manipulation of the syncobjs's fence happens in
  85. * terms of the current fence at the time the ioctl is called by userspace
  86. * regardless of whether that operation is an immediate host-side operation
  87. * (signal or reset) or or an operation which is enqueued in some driver
  88. * queue. &DRM_IOCTL_SYNCOBJ_RESET and &DRM_IOCTL_SYNCOBJ_SIGNAL can be used
  89. * to manipulate a syncobj from the host by resetting its pointer to NULL or
  90. * setting its pointer to a fence which is already signaled.
  91. *
  92. * With a timeline syncobj, all manipulation of the synobj's fence happens in
  93. * terms of a u64 value referring to point in the timeline. See
  94. * dma_fence_chain_find_seqno() to see how a given point is found in the
  95. * timeline.
  96. *
  97. * Note that applications should be careful to always use timeline set of
  98. * ioctl() when dealing with syncobj considered as timeline. Using a binary
  99. * set of ioctl() with a syncobj considered as timeline could result incorrect
  100. * synchronization. The use of binary syncobj is supported through the
  101. * timeline set of ioctl() by using a point value of 0, this will reproduce
  102. * the behavior of the binary set of ioctl() (for example replace the
  103. * syncobj's fence when signaling).
  104. *
  105. *
  106. * Host-side wait on syncobjs
  107. * --------------------------
  108. *
  109. * &DRM_IOCTL_SYNCOBJ_WAIT takes an array of syncobj handles and does a
  110. * host-side wait on all of the syncobj fences simultaneously.
  111. * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL is set, the wait ioctl will wait on
  112. * all of the syncobj fences to be signaled before it returns.
  113. * Otherwise, it returns once at least one syncobj fence has been signaled
  114. * and the index of a signaled fence is written back to the client.
  115. *
  116. * Unlike the enqueued GPU work dependencies which fail if they see a NULL
  117. * fence in a syncobj, if &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is set,
  118. * the host-side wait will first wait for the syncobj to receive a non-NULL
  119. * fence and then wait on that fence.
  120. * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is not set and any one of the
  121. * syncobjs in the array has a NULL fence, -EINVAL will be returned.
  122. * Assuming the syncobj starts off with a NULL fence, this allows a client
  123. * to do a host wait in one thread (or process) which waits on GPU work
  124. * submitted in another thread (or process) without having to manually
  125. * synchronize between the two.
  126. * This requirement is inherited from the Vulkan fence API.
  127. *
  128. * Similarly, &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT takes an array of syncobj
  129. * handles as well as an array of u64 points and does a host-side wait on all
  130. * of syncobj fences at the given points simultaneously.
  131. *
  132. * &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT also adds the ability to wait for a given
  133. * fence to materialize on the timeline without waiting for the fence to be
  134. * signaled by using the &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE flag. This
  135. * requirement is inherited from the wait-before-signal behavior required by
  136. * the Vulkan timeline semaphore API.
  137. *
  138. *
  139. * Import/export of syncobjs
  140. * -------------------------
  141. *
  142. * &DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE and &DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD
  143. * provide two mechanisms for import/export of syncobjs.
  144. *
  145. * The first lets the client import or export an entire syncobj to a file
  146. * descriptor.
  147. * These fd's are opaque and have no other use case, except passing the
  148. * syncobj between processes.
  149. * All exported file descriptors and any syncobj handles created as a
  150. * result of importing those file descriptors own a reference to the
  151. * same underlying struct &drm_syncobj and the syncobj can be used
  152. * persistently across all the processes with which it is shared.
  153. * The syncobj is freed only once the last reference is dropped.
  154. * Unlike dma-buf, importing a syncobj creates a new handle (with its own
  155. * reference) for every import instead of de-duplicating.
  156. * The primary use-case of this persistent import/export is for shared
  157. * Vulkan fences and semaphores.
  158. *
  159. * The second import/export mechanism, which is indicated by
  160. * &DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE or
  161. * &DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE lets the client
  162. * import/export the syncobj's current fence from/to a &sync_file.
  163. * When a syncobj is exported to a sync file, that sync file wraps the
  164. * sycnobj's fence at the time of export and any later signal or reset
  165. * operations on the syncobj will not affect the exported sync file.
  166. * When a sync file is imported into a syncobj, the syncobj's fence is set
  167. * to the fence wrapped by that sync file.
  168. * Because sync files are immutable, resetting or signaling the syncobj
  169. * will not affect any sync files whose fences have been imported into the
  170. * syncobj.
  171. *
  172. *
  173. * Import/export of timeline points in timeline syncobjs
  174. * -----------------------------------------------------
  175. *
  176. * &DRM_IOCTL_SYNCOBJ_TRANSFER provides a mechanism to transfer a struct
  177. * &dma_fence_chain of a syncobj at a given u64 point to another u64 point
  178. * into another syncobj.
  179. *
  180. * Note that if you want to transfer a struct &dma_fence_chain from a given
  181. * point on a timeline syncobj from/into a binary syncobj, you can use the
  182. * point 0 to mean take/replace the fence in the syncobj.
  183. */
  184. #include <linux/anon_inodes.h>
  185. #include <linux/file.h>
  186. #include <linux/fs.h>
  187. #include <linux/sched/signal.h>
  188. #include <linux/sync_file.h>
  189. #include <linux/uaccess.h>
  190. #include <drm/drm.h>
  191. #include <drm/drm_drv.h>
  192. #include <drm/drm_file.h>
  193. #include <drm/drm_gem.h>
  194. #include <drm/drm_print.h>
  195. #include <drm/drm_syncobj.h>
  196. #include <drm/drm_utils.h>
  197. #include "drm_internal.h"
  198. struct syncobj_wait_entry {
  199. struct list_head node;
  200. struct task_struct *task;
  201. struct dma_fence *fence;
  202. struct dma_fence_cb fence_cb;
  203. u64 point;
  204. };
  205. static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
  206. struct syncobj_wait_entry *wait);
  207. /**
  208. * drm_syncobj_find - lookup and reference a sync object.
  209. * @file_private: drm file private pointer
  210. * @handle: sync object handle to lookup.
  211. *
  212. * Returns a reference to the syncobj pointed to by handle or NULL. The
  213. * reference must be released by calling drm_syncobj_put().
  214. */
  215. struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
  216. u32 handle)
  217. {
  218. struct drm_syncobj *syncobj;
  219. spin_lock(&file_private->syncobj_table_lock);
  220. /* Check if we currently have a reference on the object */
  221. syncobj = idr_find(&file_private->syncobj_idr, handle);
  222. if (syncobj)
  223. drm_syncobj_get(syncobj);
  224. spin_unlock(&file_private->syncobj_table_lock);
  225. return syncobj;
  226. }
  227. EXPORT_SYMBOL(drm_syncobj_find);
  228. static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
  229. struct syncobj_wait_entry *wait)
  230. {
  231. struct dma_fence *fence;
  232. if (wait->fence)
  233. return;
  234. spin_lock(&syncobj->lock);
  235. /* We've already tried once to get a fence and failed. Now that we
  236. * have the lock, try one more time just to be sure we don't add a
  237. * callback when a fence has already been set.
  238. */
  239. fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
  240. if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
  241. dma_fence_put(fence);
  242. list_add_tail(&wait->node, &syncobj->cb_list);
  243. } else if (!fence) {
  244. wait->fence = dma_fence_get_stub();
  245. } else {
  246. wait->fence = fence;
  247. }
  248. spin_unlock(&syncobj->lock);
  249. }
  250. static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
  251. struct syncobj_wait_entry *wait)
  252. {
  253. if (!wait->node.next)
  254. return;
  255. spin_lock(&syncobj->lock);
  256. list_del_init(&wait->node);
  257. spin_unlock(&syncobj->lock);
  258. }
  259. /**
  260. * drm_syncobj_add_point - add new timeline point to the syncobj
  261. * @syncobj: sync object to add timeline point do
  262. * @chain: chain node to use to add the point
  263. * @fence: fence to encapsulate in the chain node
  264. * @point: sequence number to use for the point
  265. *
  266. * Add the chain node as new timeline point to the syncobj.
  267. */
  268. void drm_syncobj_add_point(struct drm_syncobj *syncobj,
  269. struct dma_fence_chain *chain,
  270. struct dma_fence *fence,
  271. uint64_t point)
  272. {
  273. struct syncobj_wait_entry *cur, *tmp;
  274. struct dma_fence *prev;
  275. dma_fence_get(fence);
  276. spin_lock(&syncobj->lock);
  277. prev = drm_syncobj_fence_get(syncobj);
  278. /* You are adding an unorder point to timeline, which could cause payload returned from query_ioctl is 0! */
  279. if (prev && prev->seqno >= point)
  280. DRM_DEBUG("You are adding an unorder point to timeline!\n");
  281. dma_fence_chain_init(chain, prev, fence, point);
  282. rcu_assign_pointer(syncobj->fence, &chain->base);
  283. list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
  284. syncobj_wait_syncobj_func(syncobj, cur);
  285. spin_unlock(&syncobj->lock);
  286. /* Walk the chain once to trigger garbage collection */
  287. dma_fence_chain_for_each(fence, prev);
  288. dma_fence_put(prev);
  289. }
  290. EXPORT_SYMBOL(drm_syncobj_add_point);
  291. /**
  292. * drm_syncobj_replace_fence - replace fence in a sync object.
  293. * @syncobj: Sync object to replace fence in
  294. * @fence: fence to install in sync file.
  295. *
  296. * This replaces the fence on a sync object.
  297. */
  298. void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
  299. struct dma_fence *fence)
  300. {
  301. struct dma_fence *old_fence;
  302. struct syncobj_wait_entry *cur, *tmp;
  303. if (fence)
  304. dma_fence_get(fence);
  305. spin_lock(&syncobj->lock);
  306. old_fence = rcu_dereference_protected(syncobj->fence,
  307. lockdep_is_held(&syncobj->lock));
  308. rcu_assign_pointer(syncobj->fence, fence);
  309. if (fence != old_fence) {
  310. list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
  311. syncobj_wait_syncobj_func(syncobj, cur);
  312. }
  313. spin_unlock(&syncobj->lock);
  314. dma_fence_put(old_fence);
  315. }
  316. EXPORT_SYMBOL(drm_syncobj_replace_fence);
  317. /**
  318. * drm_syncobj_assign_null_handle - assign a stub fence to the sync object
  319. * @syncobj: sync object to assign the fence on
  320. *
  321. * Assign a already signaled stub fence to the sync object.
  322. */
  323. static void drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
  324. {
  325. struct dma_fence *fence = dma_fence_get_stub();
  326. drm_syncobj_replace_fence(syncobj, fence);
  327. dma_fence_put(fence);
  328. }
  329. /* 5s default for wait submission */
  330. #define DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT 5000000000ULL
  331. /**
  332. * drm_syncobj_find_fence - lookup and reference the fence in a sync object
  333. * @file_private: drm file private pointer
  334. * @handle: sync object handle to lookup.
  335. * @point: timeline point
  336. * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not
  337. * @fence: out parameter for the fence
  338. *
  339. * This is just a convenience function that combines drm_syncobj_find() and
  340. * drm_syncobj_fence_get().
  341. *
  342. * Returns 0 on success or a negative error value on failure. On success @fence
  343. * contains a reference to the fence, which must be released by calling
  344. * dma_fence_put().
  345. */
  346. int drm_syncobj_find_fence(struct drm_file *file_private,
  347. u32 handle, u64 point, u64 flags,
  348. struct dma_fence **fence)
  349. {
  350. struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
  351. struct syncobj_wait_entry wait;
  352. u64 timeout = nsecs_to_jiffies64(DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT);
  353. int ret;
  354. if (!syncobj)
  355. return -ENOENT;
  356. *fence = drm_syncobj_fence_get(syncobj);
  357. if (*fence) {
  358. ret = dma_fence_chain_find_seqno(fence, point);
  359. if (!ret) {
  360. /* If the requested seqno is already signaled
  361. * drm_syncobj_find_fence may return a NULL
  362. * fence. To make sure the recipient gets
  363. * signalled, use a new fence instead.
  364. */
  365. if (!*fence)
  366. *fence = dma_fence_get_stub();
  367. goto out;
  368. }
  369. dma_fence_put(*fence);
  370. } else {
  371. ret = -EINVAL;
  372. }
  373. if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
  374. goto out;
  375. memset(&wait, 0, sizeof(wait));
  376. wait.task = current;
  377. wait.point = point;
  378. drm_syncobj_fence_add_wait(syncobj, &wait);
  379. do {
  380. set_current_state(TASK_INTERRUPTIBLE);
  381. if (wait.fence) {
  382. ret = 0;
  383. break;
  384. }
  385. if (timeout == 0) {
  386. ret = -ETIME;
  387. break;
  388. }
  389. if (signal_pending(current)) {
  390. ret = -ERESTARTSYS;
  391. break;
  392. }
  393. timeout = schedule_timeout(timeout);
  394. } while (1);
  395. __set_current_state(TASK_RUNNING);
  396. *fence = wait.fence;
  397. if (wait.node.next)
  398. drm_syncobj_remove_wait(syncobj, &wait);
  399. out:
  400. drm_syncobj_put(syncobj);
  401. return ret;
  402. }
  403. EXPORT_SYMBOL(drm_syncobj_find_fence);
  404. /**
  405. * drm_syncobj_free - free a sync object.
  406. * @kref: kref to free.
  407. *
  408. * Only to be called from kref_put in drm_syncobj_put.
  409. */
  410. void drm_syncobj_free(struct kref *kref)
  411. {
  412. struct drm_syncobj *syncobj = container_of(kref,
  413. struct drm_syncobj,
  414. refcount);
  415. drm_syncobj_replace_fence(syncobj, NULL);
  416. kfree(syncobj);
  417. }
  418. EXPORT_SYMBOL(drm_syncobj_free);
  419. /**
  420. * drm_syncobj_create - create a new syncobj
  421. * @out_syncobj: returned syncobj
  422. * @flags: DRM_SYNCOBJ_* flags
  423. * @fence: if non-NULL, the syncobj will represent this fence
  424. *
  425. * This is the first function to create a sync object. After creating, drivers
  426. * probably want to make it available to userspace, either through
  427. * drm_syncobj_get_handle() or drm_syncobj_get_fd().
  428. *
  429. * Returns 0 on success or a negative error value on failure.
  430. */
  431. int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
  432. struct dma_fence *fence)
  433. {
  434. struct drm_syncobj *syncobj;
  435. syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
  436. if (!syncobj)
  437. return -ENOMEM;
  438. kref_init(&syncobj->refcount);
  439. INIT_LIST_HEAD(&syncobj->cb_list);
  440. spin_lock_init(&syncobj->lock);
  441. if (flags & DRM_SYNCOBJ_CREATE_SIGNALED)
  442. drm_syncobj_assign_null_handle(syncobj);
  443. if (fence)
  444. drm_syncobj_replace_fence(syncobj, fence);
  445. *out_syncobj = syncobj;
  446. return 0;
  447. }
  448. EXPORT_SYMBOL(drm_syncobj_create);
  449. /**
  450. * drm_syncobj_get_handle - get a handle from a syncobj
  451. * @file_private: drm file private pointer
  452. * @syncobj: Sync object to export
  453. * @handle: out parameter with the new handle
  454. *
  455. * Exports a sync object created with drm_syncobj_create() as a handle on
  456. * @file_private to userspace.
  457. *
  458. * Returns 0 on success or a negative error value on failure.
  459. */
  460. int drm_syncobj_get_handle(struct drm_file *file_private,
  461. struct drm_syncobj *syncobj, u32 *handle)
  462. {
  463. int ret;
  464. /* take a reference to put in the idr */
  465. drm_syncobj_get(syncobj);
  466. idr_preload(GFP_KERNEL);
  467. spin_lock(&file_private->syncobj_table_lock);
  468. ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
  469. spin_unlock(&file_private->syncobj_table_lock);
  470. idr_preload_end();
  471. if (ret < 0) {
  472. drm_syncobj_put(syncobj);
  473. return ret;
  474. }
  475. *handle = ret;
  476. return 0;
  477. }
  478. EXPORT_SYMBOL(drm_syncobj_get_handle);
  479. static int drm_syncobj_create_as_handle(struct drm_file *file_private,
  480. u32 *handle, uint32_t flags)
  481. {
  482. int ret;
  483. struct drm_syncobj *syncobj;
  484. ret = drm_syncobj_create(&syncobj, flags, NULL);
  485. if (ret)
  486. return ret;
  487. ret = drm_syncobj_get_handle(file_private, syncobj, handle);
  488. drm_syncobj_put(syncobj);
  489. return ret;
  490. }
  491. static int drm_syncobj_destroy(struct drm_file *file_private,
  492. u32 handle)
  493. {
  494. struct drm_syncobj *syncobj;
  495. spin_lock(&file_private->syncobj_table_lock);
  496. syncobj = idr_remove(&file_private->syncobj_idr, handle);
  497. spin_unlock(&file_private->syncobj_table_lock);
  498. if (!syncobj)
  499. return -EINVAL;
  500. drm_syncobj_put(syncobj);
  501. return 0;
  502. }
  503. static int drm_syncobj_file_release(struct inode *inode, struct file *file)
  504. {
  505. struct drm_syncobj *syncobj = file->private_data;
  506. drm_syncobj_put(syncobj);
  507. return 0;
  508. }
  509. static const struct file_operations drm_syncobj_file_fops = {
  510. .release = drm_syncobj_file_release,
  511. };
  512. /**
  513. * drm_syncobj_get_fd - get a file descriptor from a syncobj
  514. * @syncobj: Sync object to export
  515. * @p_fd: out parameter with the new file descriptor
  516. *
  517. * Exports a sync object created with drm_syncobj_create() as a file descriptor.
  518. *
  519. * Returns 0 on success or a negative error value on failure.
  520. */
  521. int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
  522. {
  523. struct file *file;
  524. int fd;
  525. fd = get_unused_fd_flags(O_CLOEXEC);
  526. if (fd < 0)
  527. return fd;
  528. file = anon_inode_getfile("syncobj_file",
  529. &drm_syncobj_file_fops,
  530. syncobj, 0);
  531. if (IS_ERR(file)) {
  532. put_unused_fd(fd);
  533. return PTR_ERR(file);
  534. }
  535. drm_syncobj_get(syncobj);
  536. fd_install(fd, file);
  537. *p_fd = fd;
  538. return 0;
  539. }
  540. EXPORT_SYMBOL(drm_syncobj_get_fd);
  541. static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
  542. u32 handle, int *p_fd)
  543. {
  544. struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
  545. int ret;
  546. if (!syncobj)
  547. return -EINVAL;
  548. ret = drm_syncobj_get_fd(syncobj, p_fd);
  549. drm_syncobj_put(syncobj);
  550. return ret;
  551. }
  552. static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
  553. int fd, u32 *handle)
  554. {
  555. struct drm_syncobj *syncobj;
  556. struct fd f = fdget(fd);
  557. int ret;
  558. if (!f.file)
  559. return -EINVAL;
  560. if (f.file->f_op != &drm_syncobj_file_fops) {
  561. fdput(f);
  562. return -EINVAL;
  563. }
  564. /* take a reference to put in the idr */
  565. syncobj = f.file->private_data;
  566. drm_syncobj_get(syncobj);
  567. idr_preload(GFP_KERNEL);
  568. spin_lock(&file_private->syncobj_table_lock);
  569. ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
  570. spin_unlock(&file_private->syncobj_table_lock);
  571. idr_preload_end();
  572. if (ret > 0) {
  573. *handle = ret;
  574. ret = 0;
  575. } else
  576. drm_syncobj_put(syncobj);
  577. fdput(f);
  578. return ret;
  579. }
  580. static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
  581. int fd, int handle)
  582. {
  583. struct dma_fence *fence = sync_file_get_fence(fd);
  584. struct drm_syncobj *syncobj;
  585. if (!fence)
  586. return -EINVAL;
  587. syncobj = drm_syncobj_find(file_private, handle);
  588. if (!syncobj) {
  589. dma_fence_put(fence);
  590. return -ENOENT;
  591. }
  592. drm_syncobj_replace_fence(syncobj, fence);
  593. dma_fence_put(fence);
  594. drm_syncobj_put(syncobj);
  595. return 0;
  596. }
  597. static int drm_syncobj_export_sync_file(struct drm_file *file_private,
  598. int handle, int *p_fd)
  599. {
  600. int ret;
  601. struct dma_fence *fence;
  602. struct sync_file *sync_file;
  603. int fd = get_unused_fd_flags(O_CLOEXEC);
  604. if (fd < 0)
  605. return fd;
  606. ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
  607. if (ret)
  608. goto err_put_fd;
  609. sync_file = sync_file_create(fence);
  610. dma_fence_put(fence);
  611. if (!sync_file) {
  612. ret = -EINVAL;
  613. goto err_put_fd;
  614. }
  615. fd_install(fd, sync_file->file);
  616. *p_fd = fd;
  617. return 0;
  618. err_put_fd:
  619. put_unused_fd(fd);
  620. return ret;
  621. }
  622. /**
  623. * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time
  624. * @file_private: drm file-private structure to set up
  625. *
  626. * Called at device open time, sets up the structure for handling refcounting
  627. * of sync objects.
  628. */
  629. void
  630. drm_syncobj_open(struct drm_file *file_private)
  631. {
  632. idr_init_base(&file_private->syncobj_idr, 1);
  633. spin_lock_init(&file_private->syncobj_table_lock);
  634. }
  635. static int
  636. drm_syncobj_release_handle(int id, void *ptr, void *data)
  637. {
  638. struct drm_syncobj *syncobj = ptr;
  639. drm_syncobj_put(syncobj);
  640. return 0;
  641. }
  642. /**
  643. * drm_syncobj_release - release file-private sync object resources
  644. * @file_private: drm file-private structure to clean up
  645. *
  646. * Called at close time when the filp is going away.
  647. *
  648. * Releases any remaining references on objects by this filp.
  649. */
  650. void
  651. drm_syncobj_release(struct drm_file *file_private)
  652. {
  653. idr_for_each(&file_private->syncobj_idr,
  654. &drm_syncobj_release_handle, file_private);
  655. idr_destroy(&file_private->syncobj_idr);
  656. }
  657. int
  658. drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
  659. struct drm_file *file_private)
  660. {
  661. struct drm_syncobj_create *args = data;
  662. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
  663. return -EOPNOTSUPP;
  664. /* no valid flags yet */
  665. if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
  666. return -EINVAL;
  667. return drm_syncobj_create_as_handle(file_private,
  668. &args->handle, args->flags);
  669. }
  670. int
  671. drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data,
  672. struct drm_file *file_private)
  673. {
  674. struct drm_syncobj_destroy *args = data;
  675. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
  676. return -EOPNOTSUPP;
  677. /* make sure padding is empty */
  678. if (args->pad)
  679. return -EINVAL;
  680. return drm_syncobj_destroy(file_private, args->handle);
  681. }
  682. int
  683. drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
  684. struct drm_file *file_private)
  685. {
  686. struct drm_syncobj_handle *args = data;
  687. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
  688. return -EOPNOTSUPP;
  689. if (args->pad)
  690. return -EINVAL;
  691. if (args->flags != 0 &&
  692. args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
  693. return -EINVAL;
  694. if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
  695. return drm_syncobj_export_sync_file(file_private, args->handle,
  696. &args->fd);
  697. return drm_syncobj_handle_to_fd(file_private, args->handle,
  698. &args->fd);
  699. }
  700. int
  701. drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
  702. struct drm_file *file_private)
  703. {
  704. struct drm_syncobj_handle *args = data;
  705. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
  706. return -EOPNOTSUPP;
  707. if (args->pad)
  708. return -EINVAL;
  709. if (args->flags != 0 &&
  710. args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
  711. return -EINVAL;
  712. if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
  713. return drm_syncobj_import_sync_file_fence(file_private,
  714. args->fd,
  715. args->handle);
  716. return drm_syncobj_fd_to_handle(file_private, args->fd,
  717. &args->handle);
  718. }
  719. static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private,
  720. struct drm_syncobj_transfer *args)
  721. {
  722. struct drm_syncobj *timeline_syncobj = NULL;
  723. struct dma_fence *fence;
  724. struct dma_fence_chain *chain;
  725. int ret;
  726. timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle);
  727. if (!timeline_syncobj) {
  728. return -ENOENT;
  729. }
  730. ret = drm_syncobj_find_fence(file_private, args->src_handle,
  731. args->src_point, args->flags,
  732. &fence);
  733. if (ret)
  734. goto err;
  735. chain = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
  736. if (!chain) {
  737. ret = -ENOMEM;
  738. goto err1;
  739. }
  740. drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point);
  741. err1:
  742. dma_fence_put(fence);
  743. err:
  744. drm_syncobj_put(timeline_syncobj);
  745. return ret;
  746. }
  747. static int
  748. drm_syncobj_transfer_to_binary(struct drm_file *file_private,
  749. struct drm_syncobj_transfer *args)
  750. {
  751. struct drm_syncobj *binary_syncobj = NULL;
  752. struct dma_fence *fence;
  753. int ret;
  754. binary_syncobj = drm_syncobj_find(file_private, args->dst_handle);
  755. if (!binary_syncobj)
  756. return -ENOENT;
  757. ret = drm_syncobj_find_fence(file_private, args->src_handle,
  758. args->src_point, args->flags, &fence);
  759. if (ret)
  760. goto err;
  761. drm_syncobj_replace_fence(binary_syncobj, fence);
  762. dma_fence_put(fence);
  763. err:
  764. drm_syncobj_put(binary_syncobj);
  765. return ret;
  766. }
  767. int
  768. drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data,
  769. struct drm_file *file_private)
  770. {
  771. struct drm_syncobj_transfer *args = data;
  772. int ret;
  773. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
  774. return -EOPNOTSUPP;
  775. if (args->pad)
  776. return -EINVAL;
  777. if (args->dst_point)
  778. ret = drm_syncobj_transfer_to_timeline(file_private, args);
  779. else
  780. ret = drm_syncobj_transfer_to_binary(file_private, args);
  781. return ret;
  782. }
  783. static void syncobj_wait_fence_func(struct dma_fence *fence,
  784. struct dma_fence_cb *cb)
  785. {
  786. struct syncobj_wait_entry *wait =
  787. container_of(cb, struct syncobj_wait_entry, fence_cb);
  788. wake_up_process(wait->task);
  789. }
  790. static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
  791. struct syncobj_wait_entry *wait)
  792. {
  793. struct dma_fence *fence;
  794. /* This happens inside the syncobj lock */
  795. fence = rcu_dereference_protected(syncobj->fence,
  796. lockdep_is_held(&syncobj->lock));
  797. dma_fence_get(fence);
  798. if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
  799. dma_fence_put(fence);
  800. return;
  801. } else if (!fence) {
  802. wait->fence = dma_fence_get_stub();
  803. } else {
  804. wait->fence = fence;
  805. }
  806. wake_up_process(wait->task);
  807. list_del_init(&wait->node);
  808. }
  809. static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
  810. void __user *user_points,
  811. uint32_t count,
  812. uint32_t flags,
  813. signed long timeout,
  814. uint32_t *idx)
  815. {
  816. struct syncobj_wait_entry *entries;
  817. struct dma_fence *fence;
  818. uint64_t *points;
  819. uint32_t signaled_count, i;
  820. points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
  821. if (points == NULL)
  822. return -ENOMEM;
  823. if (!user_points) {
  824. memset(points, 0, count * sizeof(uint64_t));
  825. } else if (copy_from_user(points, user_points,
  826. sizeof(uint64_t) * count)) {
  827. timeout = -EFAULT;
  828. goto err_free_points;
  829. }
  830. entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
  831. if (!entries) {
  832. timeout = -ENOMEM;
  833. goto err_free_points;
  834. }
  835. /* Walk the list of sync objects and initialize entries. We do
  836. * this up-front so that we can properly return -EINVAL if there is
  837. * a syncobj with a missing fence and then never have the chance of
  838. * returning -EINVAL again.
  839. */
  840. signaled_count = 0;
  841. for (i = 0; i < count; ++i) {
  842. struct dma_fence *fence;
  843. entries[i].task = current;
  844. entries[i].point = points[i];
  845. fence = drm_syncobj_fence_get(syncobjs[i]);
  846. if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
  847. dma_fence_put(fence);
  848. if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
  849. continue;
  850. } else {
  851. timeout = -EINVAL;
  852. goto cleanup_entries;
  853. }
  854. }
  855. if (fence)
  856. entries[i].fence = fence;
  857. else
  858. entries[i].fence = dma_fence_get_stub();
  859. if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
  860. dma_fence_is_signaled(entries[i].fence)) {
  861. if (signaled_count == 0 && idx)
  862. *idx = i;
  863. signaled_count++;
  864. }
  865. }
  866. if (signaled_count == count ||
  867. (signaled_count > 0 &&
  868. !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)))
  869. goto cleanup_entries;
  870. /* There's a very annoying laxness in the dma_fence API here, in
  871. * that backends are not required to automatically report when a
  872. * fence is signaled prior to fence->ops->enable_signaling() being
  873. * called. So here if we fail to match signaled_count, we need to
  874. * fallthough and try a 0 timeout wait!
  875. */
  876. if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
  877. for (i = 0; i < count; ++i)
  878. drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
  879. }
  880. do {
  881. set_current_state(TASK_INTERRUPTIBLE);
  882. signaled_count = 0;
  883. for (i = 0; i < count; ++i) {
  884. fence = entries[i].fence;
  885. if (!fence)
  886. continue;
  887. if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
  888. dma_fence_is_signaled(fence) ||
  889. (!entries[i].fence_cb.func &&
  890. dma_fence_add_callback(fence,
  891. &entries[i].fence_cb,
  892. syncobj_wait_fence_func))) {
  893. /* The fence has been signaled */
  894. if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
  895. signaled_count++;
  896. } else {
  897. if (idx)
  898. *idx = i;
  899. goto done_waiting;
  900. }
  901. }
  902. }
  903. if (signaled_count == count)
  904. goto done_waiting;
  905. if (timeout == 0) {
  906. timeout = -ETIME;
  907. goto done_waiting;
  908. }
  909. if (signal_pending(current)) {
  910. timeout = -ERESTARTSYS;
  911. goto done_waiting;
  912. }
  913. timeout = schedule_timeout(timeout);
  914. } while (1);
  915. done_waiting:
  916. __set_current_state(TASK_RUNNING);
  917. cleanup_entries:
  918. for (i = 0; i < count; ++i) {
  919. drm_syncobj_remove_wait(syncobjs[i], &entries[i]);
  920. if (entries[i].fence_cb.func)
  921. dma_fence_remove_callback(entries[i].fence,
  922. &entries[i].fence_cb);
  923. dma_fence_put(entries[i].fence);
  924. }
  925. kfree(entries);
  926. err_free_points:
  927. kfree(points);
  928. return timeout;
  929. }
  930. /**
  931. * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
  932. *
  933. * @timeout_nsec: timeout nsec component in ns, 0 for poll
  934. *
  935. * Calculate the timeout in jiffies from an absolute time in sec/nsec.
  936. */
  937. signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
  938. {
  939. ktime_t abs_timeout, now;
  940. u64 timeout_ns, timeout_jiffies64;
  941. /* make 0 timeout means poll - absolute 0 doesn't seem valid */
  942. if (timeout_nsec == 0)
  943. return 0;
  944. abs_timeout = ns_to_ktime(timeout_nsec);
  945. now = ktime_get();
  946. if (!ktime_after(abs_timeout, now))
  947. return 0;
  948. timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
  949. timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
  950. /* clamp timeout to avoid infinite timeout */
  951. if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
  952. return MAX_SCHEDULE_TIMEOUT - 1;
  953. return timeout_jiffies64 + 1;
  954. }
  955. EXPORT_SYMBOL(drm_timeout_abs_to_jiffies);
  956. static int drm_syncobj_array_wait(struct drm_device *dev,
  957. struct drm_file *file_private,
  958. struct drm_syncobj_wait *wait,
  959. struct drm_syncobj_timeline_wait *timeline_wait,
  960. struct drm_syncobj **syncobjs, bool timeline)
  961. {
  962. signed long timeout = 0;
  963. uint32_t first = ~0;
  964. if (!timeline) {
  965. timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
  966. timeout = drm_syncobj_array_wait_timeout(syncobjs,
  967. NULL,
  968. wait->count_handles,
  969. wait->flags,
  970. timeout, &first);
  971. if (timeout < 0)
  972. return timeout;
  973. wait->first_signaled = first;
  974. } else {
  975. timeout = drm_timeout_abs_to_jiffies(timeline_wait->timeout_nsec);
  976. timeout = drm_syncobj_array_wait_timeout(syncobjs,
  977. u64_to_user_ptr(timeline_wait->points),
  978. timeline_wait->count_handles,
  979. timeline_wait->flags,
  980. timeout, &first);
  981. if (timeout < 0)
  982. return timeout;
  983. timeline_wait->first_signaled = first;
  984. }
  985. return 0;
  986. }
  987. static int drm_syncobj_array_find(struct drm_file *file_private,
  988. void __user *user_handles,
  989. uint32_t count_handles,
  990. struct drm_syncobj ***syncobjs_out)
  991. {
  992. uint32_t i, *handles;
  993. struct drm_syncobj **syncobjs;
  994. int ret;
  995. handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL);
  996. if (handles == NULL)
  997. return -ENOMEM;
  998. if (copy_from_user(handles, user_handles,
  999. sizeof(uint32_t) * count_handles)) {
  1000. ret = -EFAULT;
  1001. goto err_free_handles;
  1002. }
  1003. syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL);
  1004. if (syncobjs == NULL) {
  1005. ret = -ENOMEM;
  1006. goto err_free_handles;
  1007. }
  1008. for (i = 0; i < count_handles; i++) {
  1009. syncobjs[i] = drm_syncobj_find(file_private, handles[i]);
  1010. if (!syncobjs[i]) {
  1011. ret = -ENOENT;
  1012. goto err_put_syncobjs;
  1013. }
  1014. }
  1015. kfree(handles);
  1016. *syncobjs_out = syncobjs;
  1017. return 0;
  1018. err_put_syncobjs:
  1019. while (i-- > 0)
  1020. drm_syncobj_put(syncobjs[i]);
  1021. kfree(syncobjs);
  1022. err_free_handles:
  1023. kfree(handles);
  1024. return ret;
  1025. }
  1026. static void drm_syncobj_array_free(struct drm_syncobj **syncobjs,
  1027. uint32_t count)
  1028. {
  1029. uint32_t i;
  1030. for (i = 0; i < count; i++)
  1031. drm_syncobj_put(syncobjs[i]);
  1032. kfree(syncobjs);
  1033. }
  1034. int
  1035. drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
  1036. struct drm_file *file_private)
  1037. {
  1038. struct drm_syncobj_wait *args = data;
  1039. struct drm_syncobj **syncobjs;
  1040. int ret = 0;
  1041. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
  1042. return -EOPNOTSUPP;
  1043. if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
  1044. DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
  1045. return -EINVAL;
  1046. if (args->count_handles == 0)
  1047. return -EINVAL;
  1048. ret = drm_syncobj_array_find(file_private,
  1049. u64_to_user_ptr(args->handles),
  1050. args->count_handles,
  1051. &syncobjs);
  1052. if (ret < 0)
  1053. return ret;
  1054. ret = drm_syncobj_array_wait(dev, file_private,
  1055. args, NULL, syncobjs, false);
  1056. drm_syncobj_array_free(syncobjs, args->count_handles);
  1057. return ret;
  1058. }
  1059. int
  1060. drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
  1061. struct drm_file *file_private)
  1062. {
  1063. struct drm_syncobj_timeline_wait *args = data;
  1064. struct drm_syncobj **syncobjs;
  1065. int ret = 0;
  1066. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
  1067. return -EOPNOTSUPP;
  1068. if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
  1069. DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
  1070. DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
  1071. return -EINVAL;
  1072. if (args->count_handles == 0)
  1073. return -EINVAL;
  1074. ret = drm_syncobj_array_find(file_private,
  1075. u64_to_user_ptr(args->handles),
  1076. args->count_handles,
  1077. &syncobjs);
  1078. if (ret < 0)
  1079. return ret;
  1080. ret = drm_syncobj_array_wait(dev, file_private,
  1081. NULL, args, syncobjs, true);
  1082. drm_syncobj_array_free(syncobjs, args->count_handles);
  1083. return ret;
  1084. }
  1085. int
  1086. drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
  1087. struct drm_file *file_private)
  1088. {
  1089. struct drm_syncobj_array *args = data;
  1090. struct drm_syncobj **syncobjs;
  1091. uint32_t i;
  1092. int ret;
  1093. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
  1094. return -EOPNOTSUPP;
  1095. if (args->pad != 0)
  1096. return -EINVAL;
  1097. if (args->count_handles == 0)
  1098. return -EINVAL;
  1099. ret = drm_syncobj_array_find(file_private,
  1100. u64_to_user_ptr(args->handles),
  1101. args->count_handles,
  1102. &syncobjs);
  1103. if (ret < 0)
  1104. return ret;
  1105. for (i = 0; i < args->count_handles; i++)
  1106. drm_syncobj_replace_fence(syncobjs[i], NULL);
  1107. drm_syncobj_array_free(syncobjs, args->count_handles);
  1108. return 0;
  1109. }
  1110. int
  1111. drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
  1112. struct drm_file *file_private)
  1113. {
  1114. struct drm_syncobj_array *args = data;
  1115. struct drm_syncobj **syncobjs;
  1116. uint32_t i;
  1117. int ret;
  1118. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
  1119. return -EOPNOTSUPP;
  1120. if (args->pad != 0)
  1121. return -EINVAL;
  1122. if (args->count_handles == 0)
  1123. return -EINVAL;
  1124. ret = drm_syncobj_array_find(file_private,
  1125. u64_to_user_ptr(args->handles),
  1126. args->count_handles,
  1127. &syncobjs);
  1128. if (ret < 0)
  1129. return ret;
  1130. for (i = 0; i < args->count_handles; i++)
  1131. drm_syncobj_assign_null_handle(syncobjs[i]);
  1132. drm_syncobj_array_free(syncobjs, args->count_handles);
  1133. return ret;
  1134. }
  1135. int
  1136. drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
  1137. struct drm_file *file_private)
  1138. {
  1139. struct drm_syncobj_timeline_array *args = data;
  1140. struct drm_syncobj **syncobjs;
  1141. struct dma_fence_chain **chains;
  1142. uint64_t *points;
  1143. uint32_t i, j;
  1144. int ret;
  1145. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
  1146. return -EOPNOTSUPP;
  1147. if (args->flags != 0)
  1148. return -EINVAL;
  1149. if (args->count_handles == 0)
  1150. return -EINVAL;
  1151. ret = drm_syncobj_array_find(file_private,
  1152. u64_to_user_ptr(args->handles),
  1153. args->count_handles,
  1154. &syncobjs);
  1155. if (ret < 0)
  1156. return ret;
  1157. points = kmalloc_array(args->count_handles, sizeof(*points),
  1158. GFP_KERNEL);
  1159. if (!points) {
  1160. ret = -ENOMEM;
  1161. goto out;
  1162. }
  1163. if (!u64_to_user_ptr(args->points)) {
  1164. memset(points, 0, args->count_handles * sizeof(uint64_t));
  1165. } else if (copy_from_user(points, u64_to_user_ptr(args->points),
  1166. sizeof(uint64_t) * args->count_handles)) {
  1167. ret = -EFAULT;
  1168. goto err_points;
  1169. }
  1170. chains = kmalloc_array(args->count_handles, sizeof(void *), GFP_KERNEL);
  1171. if (!chains) {
  1172. ret = -ENOMEM;
  1173. goto err_points;
  1174. }
  1175. for (i = 0; i < args->count_handles; i++) {
  1176. chains[i] = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
  1177. if (!chains[i]) {
  1178. for (j = 0; j < i; j++)
  1179. kfree(chains[j]);
  1180. ret = -ENOMEM;
  1181. goto err_chains;
  1182. }
  1183. }
  1184. for (i = 0; i < args->count_handles; i++) {
  1185. struct dma_fence *fence = dma_fence_get_stub();
  1186. drm_syncobj_add_point(syncobjs[i], chains[i],
  1187. fence, points[i]);
  1188. dma_fence_put(fence);
  1189. }
  1190. err_chains:
  1191. kfree(chains);
  1192. err_points:
  1193. kfree(points);
  1194. out:
  1195. drm_syncobj_array_free(syncobjs, args->count_handles);
  1196. return ret;
  1197. }
  1198. int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
  1199. struct drm_file *file_private)
  1200. {
  1201. struct drm_syncobj_timeline_array *args = data;
  1202. struct drm_syncobj **syncobjs;
  1203. uint64_t __user *points = u64_to_user_ptr(args->points);
  1204. uint32_t i;
  1205. int ret;
  1206. if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
  1207. return -EOPNOTSUPP;
  1208. if (args->flags & ~DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED)
  1209. return -EINVAL;
  1210. if (args->count_handles == 0)
  1211. return -EINVAL;
  1212. ret = drm_syncobj_array_find(file_private,
  1213. u64_to_user_ptr(args->handles),
  1214. args->count_handles,
  1215. &syncobjs);
  1216. if (ret < 0)
  1217. return ret;
  1218. for (i = 0; i < args->count_handles; i++) {
  1219. struct dma_fence_chain *chain;
  1220. struct dma_fence *fence;
  1221. uint64_t point;
  1222. fence = drm_syncobj_fence_get(syncobjs[i]);
  1223. chain = to_dma_fence_chain(fence);
  1224. if (chain) {
  1225. struct dma_fence *iter, *last_signaled =
  1226. dma_fence_get(fence);
  1227. if (args->flags &
  1228. DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED) {
  1229. point = fence->seqno;
  1230. } else {
  1231. dma_fence_chain_for_each(iter, fence) {
  1232. if (iter->context != fence->context) {
  1233. dma_fence_put(iter);
  1234. /* It is most likely that timeline has
  1235. * unorder points. */
  1236. break;
  1237. }
  1238. dma_fence_put(last_signaled);
  1239. last_signaled = dma_fence_get(iter);
  1240. }
  1241. point = dma_fence_is_signaled(last_signaled) ?
  1242. last_signaled->seqno :
  1243. to_dma_fence_chain(last_signaled)->prev_seqno;
  1244. }
  1245. dma_fence_put(last_signaled);
  1246. } else {
  1247. point = 0;
  1248. }
  1249. dma_fence_put(fence);
  1250. ret = copy_to_user(&points[i], &point, sizeof(uint64_t));
  1251. ret = ret ? -EFAULT : 0;
  1252. if (ret)
  1253. break;
  1254. }
  1255. drm_syncobj_array_free(syncobjs, args->count_handles);
  1256. return ret;
  1257. }