drm_gem.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420
  1. /*
  2. * Copyright © 2008 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. *
  26. */
  27. #include <linux/types.h>
  28. #include <linux/slab.h>
  29. #include <linux/mm.h>
  30. #include <linux/uaccess.h>
  31. #include <linux/fs.h>
  32. #include <linux/file.h>
  33. #include <linux/module.h>
  34. #include <linux/mman.h>
  35. #include <linux/pagemap.h>
  36. #include <linux/shmem_fs.h>
  37. #include <linux/dma-buf.h>
  38. #include <linux/mem_encrypt.h>
  39. #include <linux/pagevec.h>
  40. #include <drm/drm.h>
  41. #include <drm/drm_device.h>
  42. #include <drm/drm_drv.h>
  43. #include <drm/drm_file.h>
  44. #include <drm/drm_gem.h>
  45. #include <drm/drm_managed.h>
  46. #include <drm/drm_print.h>
  47. #include <drm/drm_vma_manager.h>
  48. #include "drm_internal.h"
  49. /** @file drm_gem.c
  50. *
  51. * This file provides some of the base ioctls and library routines for
  52. * the graphics memory manager implemented by each device driver.
  53. *
  54. * Because various devices have different requirements in terms of
  55. * synchronization and migration strategies, implementing that is left up to
  56. * the driver, and all that the general API provides should be generic --
  57. * allocating objects, reading/writing data with the cpu, freeing objects.
  58. * Even there, platform-dependent optimizations for reading/writing data with
  59. * the CPU mean we'll likely hook those out to driver-specific calls. However,
  60. * the DRI2 implementation wants to have at least allocate/mmap be generic.
  61. *
  62. * The goal was to have swap-backed object allocation managed through
  63. * struct file. However, file descriptors as handles to a struct file have
  64. * two major failings:
  65. * - Process limits prevent more than 1024 or so being used at a time by
  66. * default.
  67. * - Inability to allocate high fds will aggravate the X Server's select()
  68. * handling, and likely that of many GL client applications as well.
  69. *
  70. * This led to a plan of using our own integer IDs (called handles, following
  71. * DRM terminology) to mimic fds, and implement the fd syscalls we need as
  72. * ioctls. The objects themselves will still include the struct file so
  73. * that we can transition to fds if the required kernel infrastructure shows
  74. * up at a later date, and as our interface with shmfs for memory allocation.
  75. */
  76. static void
  77. drm_gem_init_release(struct drm_device *dev, void *ptr)
  78. {
  79. drm_vma_offset_manager_destroy(dev->vma_offset_manager);
  80. }
  81. /**
  82. * drm_gem_init - Initialize the GEM device fields
  83. * @dev: drm_devic structure to initialize
  84. */
  85. int
  86. drm_gem_init(struct drm_device *dev)
  87. {
  88. struct drm_vma_offset_manager *vma_offset_manager;
  89. mutex_init(&dev->object_name_lock);
  90. idr_init_base(&dev->object_name_idr, 1);
  91. vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
  92. GFP_KERNEL);
  93. if (!vma_offset_manager) {
  94. DRM_ERROR("out of memory\n");
  95. return -ENOMEM;
  96. }
  97. dev->vma_offset_manager = vma_offset_manager;
  98. drm_vma_offset_manager_init(vma_offset_manager,
  99. DRM_FILE_PAGE_OFFSET_START,
  100. DRM_FILE_PAGE_OFFSET_SIZE);
  101. return drmm_add_action(dev, drm_gem_init_release, NULL);
  102. }
  103. /**
  104. * drm_gem_object_init - initialize an allocated shmem-backed GEM object
  105. * @dev: drm_device the object should be initialized for
  106. * @obj: drm_gem_object to initialize
  107. * @size: object size
  108. *
  109. * Initialize an already allocated GEM object of the specified size with
  110. * shmfs backing store.
  111. */
  112. int drm_gem_object_init(struct drm_device *dev,
  113. struct drm_gem_object *obj, size_t size)
  114. {
  115. struct file *filp;
  116. drm_gem_private_object_init(dev, obj, size);
  117. filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
  118. if (IS_ERR(filp))
  119. return PTR_ERR(filp);
  120. obj->filp = filp;
  121. return 0;
  122. }
  123. EXPORT_SYMBOL(drm_gem_object_init);
  124. /**
  125. * drm_gem_private_object_init - initialize an allocated private GEM object
  126. * @dev: drm_device the object should be initialized for
  127. * @obj: drm_gem_object to initialize
  128. * @size: object size
  129. *
  130. * Initialize an already allocated GEM object of the specified size with
  131. * no GEM provided backing store. Instead the caller is responsible for
  132. * backing the object and handling it.
  133. */
  134. void drm_gem_private_object_init(struct drm_device *dev,
  135. struct drm_gem_object *obj, size_t size)
  136. {
  137. BUG_ON((size & (PAGE_SIZE - 1)) != 0);
  138. obj->dev = dev;
  139. obj->filp = NULL;
  140. kref_init(&obj->refcount);
  141. obj->handle_count = 0;
  142. obj->size = size;
  143. dma_resv_init(&obj->_resv);
  144. if (!obj->resv)
  145. obj->resv = &obj->_resv;
  146. drm_vma_node_reset(&obj->vma_node);
  147. }
  148. EXPORT_SYMBOL(drm_gem_private_object_init);
  149. static void
  150. drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
  151. {
  152. /*
  153. * Note: obj->dma_buf can't disappear as long as we still hold a
  154. * handle reference in obj->handle_count.
  155. */
  156. mutex_lock(&filp->prime.lock);
  157. if (obj->dma_buf) {
  158. drm_prime_remove_buf_handle_locked(&filp->prime,
  159. obj->dma_buf);
  160. }
  161. mutex_unlock(&filp->prime.lock);
  162. }
  163. /**
  164. * drm_gem_object_handle_free - release resources bound to userspace handles
  165. * @obj: GEM object to clean up.
  166. *
  167. * Called after the last handle to the object has been closed
  168. *
  169. * Removes any name for the object. Note that this must be
  170. * called before drm_gem_object_free or we'll be touching
  171. * freed memory
  172. */
  173. static void drm_gem_object_handle_free(struct drm_gem_object *obj)
  174. {
  175. struct drm_device *dev = obj->dev;
  176. /* Remove any name for this object */
  177. if (obj->name) {
  178. idr_remove(&dev->object_name_idr, obj->name);
  179. obj->name = 0;
  180. }
  181. }
  182. static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
  183. {
  184. /* Unbreak the reference cycle if we have an exported dma_buf. */
  185. if (obj->dma_buf) {
  186. dma_buf_put(obj->dma_buf);
  187. obj->dma_buf = NULL;
  188. }
  189. }
  190. static void
  191. drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
  192. {
  193. struct drm_device *dev = obj->dev;
  194. bool final = false;
  195. if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
  196. return;
  197. /*
  198. * Must bump handle count first as this may be the last
  199. * ref, in which case the object would disappear before we
  200. * checked for a name
  201. */
  202. mutex_lock(&dev->object_name_lock);
  203. if (--obj->handle_count == 0) {
  204. drm_gem_object_handle_free(obj);
  205. drm_gem_object_exported_dma_buf_free(obj);
  206. final = true;
  207. }
  208. mutex_unlock(&dev->object_name_lock);
  209. if (final)
  210. drm_gem_object_put(obj);
  211. }
  212. /*
  213. * Called at device or object close to release the file's
  214. * handle references on objects.
  215. */
  216. static int
  217. drm_gem_object_release_handle(int id, void *ptr, void *data)
  218. {
  219. struct drm_file *file_priv = data;
  220. struct drm_gem_object *obj = ptr;
  221. struct drm_device *dev = obj->dev;
  222. if (obj->funcs && obj->funcs->close)
  223. obj->funcs->close(obj, file_priv);
  224. else if (dev->driver->gem_close_object)
  225. dev->driver->gem_close_object(obj, file_priv);
  226. drm_gem_remove_prime_handles(obj, file_priv);
  227. drm_vma_node_revoke(&obj->vma_node, file_priv);
  228. drm_gem_object_handle_put_unlocked(obj);
  229. return 0;
  230. }
  231. /**
  232. * drm_gem_handle_delete - deletes the given file-private handle
  233. * @filp: drm file-private structure to use for the handle look up
  234. * @handle: userspace handle to delete
  235. *
  236. * Removes the GEM handle from the @filp lookup table which has been added with
  237. * drm_gem_handle_create(). If this is the last handle also cleans up linked
  238. * resources like GEM names.
  239. */
  240. int
  241. drm_gem_handle_delete(struct drm_file *filp, u32 handle)
  242. {
  243. struct drm_gem_object *obj;
  244. spin_lock(&filp->table_lock);
  245. /* Check if we currently have a reference on the object */
  246. obj = idr_replace(&filp->object_idr, NULL, handle);
  247. spin_unlock(&filp->table_lock);
  248. if (IS_ERR_OR_NULL(obj))
  249. return -EINVAL;
  250. /* Release driver's reference and decrement refcount. */
  251. drm_gem_object_release_handle(handle, obj, filp);
  252. /* And finally make the handle available for future allocations. */
  253. spin_lock(&filp->table_lock);
  254. idr_remove(&filp->object_idr, handle);
  255. spin_unlock(&filp->table_lock);
  256. return 0;
  257. }
  258. EXPORT_SYMBOL(drm_gem_handle_delete);
  259. /**
  260. * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
  261. * @file: drm file-private structure containing the gem object
  262. * @dev: corresponding drm_device
  263. * @handle: gem object handle
  264. * @offset: return location for the fake mmap offset
  265. *
  266. * This implements the &drm_driver.dumb_map_offset kms driver callback for
  267. * drivers which use gem to manage their backing storage.
  268. *
  269. * Returns:
  270. * 0 on success or a negative error code on failure.
  271. */
  272. int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
  273. u32 handle, u64 *offset)
  274. {
  275. struct drm_gem_object *obj;
  276. int ret;
  277. obj = drm_gem_object_lookup(file, handle);
  278. if (!obj)
  279. return -ENOENT;
  280. /* Don't allow imported objects to be mapped */
  281. if (obj->import_attach) {
  282. ret = -EINVAL;
  283. goto out;
  284. }
  285. ret = drm_gem_create_mmap_offset(obj);
  286. if (ret)
  287. goto out;
  288. *offset = drm_vma_node_offset_addr(&obj->vma_node);
  289. out:
  290. drm_gem_object_put(obj);
  291. return ret;
  292. }
  293. EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
  294. /**
  295. * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
  296. * @file: drm file-private structure to remove the dumb handle from
  297. * @dev: corresponding drm_device
  298. * @handle: the dumb handle to remove
  299. *
  300. * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
  301. * which use gem to manage their backing storage.
  302. */
  303. int drm_gem_dumb_destroy(struct drm_file *file,
  304. struct drm_device *dev,
  305. uint32_t handle)
  306. {
  307. return drm_gem_handle_delete(file, handle);
  308. }
  309. EXPORT_SYMBOL(drm_gem_dumb_destroy);
  310. /**
  311. * drm_gem_handle_create_tail - internal functions to create a handle
  312. * @file_priv: drm file-private structure to register the handle for
  313. * @obj: object to register
  314. * @handlep: pointer to return the created handle to the caller
  315. *
  316. * This expects the &drm_device.object_name_lock to be held already and will
  317. * drop it before returning. Used to avoid races in establishing new handles
  318. * when importing an object from either an flink name or a dma-buf.
  319. *
  320. * Handles must be release again through drm_gem_handle_delete(). This is done
  321. * when userspace closes @file_priv for all attached handles, or through the
  322. * GEM_CLOSE ioctl for individual handles.
  323. */
  324. int
  325. drm_gem_handle_create_tail(struct drm_file *file_priv,
  326. struct drm_gem_object *obj,
  327. u32 *handlep)
  328. {
  329. struct drm_device *dev = obj->dev;
  330. u32 handle;
  331. int ret;
  332. WARN_ON(!mutex_is_locked(&dev->object_name_lock));
  333. if (obj->handle_count++ == 0)
  334. drm_gem_object_get(obj);
  335. /*
  336. * Get the user-visible handle using idr. Preload and perform
  337. * allocation under our spinlock.
  338. */
  339. idr_preload(GFP_KERNEL);
  340. spin_lock(&file_priv->table_lock);
  341. ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
  342. spin_unlock(&file_priv->table_lock);
  343. idr_preload_end();
  344. mutex_unlock(&dev->object_name_lock);
  345. if (ret < 0)
  346. goto err_unref;
  347. handle = ret;
  348. ret = drm_vma_node_allow(&obj->vma_node, file_priv);
  349. if (ret)
  350. goto err_remove;
  351. if (obj->funcs && obj->funcs->open) {
  352. ret = obj->funcs->open(obj, file_priv);
  353. if (ret)
  354. goto err_revoke;
  355. } else if (dev->driver->gem_open_object) {
  356. ret = dev->driver->gem_open_object(obj, file_priv);
  357. if (ret)
  358. goto err_revoke;
  359. }
  360. *handlep = handle;
  361. return 0;
  362. err_revoke:
  363. drm_vma_node_revoke(&obj->vma_node, file_priv);
  364. err_remove:
  365. spin_lock(&file_priv->table_lock);
  366. idr_remove(&file_priv->object_idr, handle);
  367. spin_unlock(&file_priv->table_lock);
  368. err_unref:
  369. drm_gem_object_handle_put_unlocked(obj);
  370. return ret;
  371. }
  372. /**
  373. * drm_gem_handle_create - create a gem handle for an object
  374. * @file_priv: drm file-private structure to register the handle for
  375. * @obj: object to register
  376. * @handlep: pointer to return the created handle to the caller
  377. *
  378. * Create a handle for this object. This adds a handle reference to the object,
  379. * which includes a regular reference count. Callers will likely want to
  380. * dereference the object afterwards.
  381. *
  382. * Since this publishes @obj to userspace it must be fully set up by this point,
  383. * drivers must call this last in their buffer object creation callbacks.
  384. */
  385. int drm_gem_handle_create(struct drm_file *file_priv,
  386. struct drm_gem_object *obj,
  387. u32 *handlep)
  388. {
  389. mutex_lock(&obj->dev->object_name_lock);
  390. return drm_gem_handle_create_tail(file_priv, obj, handlep);
  391. }
  392. EXPORT_SYMBOL(drm_gem_handle_create);
  393. /**
  394. * drm_gem_free_mmap_offset - release a fake mmap offset for an object
  395. * @obj: obj in question
  396. *
  397. * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
  398. *
  399. * Note that drm_gem_object_release() already calls this function, so drivers
  400. * don't have to take care of releasing the mmap offset themselves when freeing
  401. * the GEM object.
  402. */
  403. void
  404. drm_gem_free_mmap_offset(struct drm_gem_object *obj)
  405. {
  406. struct drm_device *dev = obj->dev;
  407. drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
  408. }
  409. EXPORT_SYMBOL(drm_gem_free_mmap_offset);
  410. /**
  411. * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
  412. * @obj: obj in question
  413. * @size: the virtual size
  414. *
  415. * GEM memory mapping works by handing back to userspace a fake mmap offset
  416. * it can use in a subsequent mmap(2) call. The DRM core code then looks
  417. * up the object based on the offset and sets up the various memory mapping
  418. * structures.
  419. *
  420. * This routine allocates and attaches a fake offset for @obj, in cases where
  421. * the virtual size differs from the physical size (ie. &drm_gem_object.size).
  422. * Otherwise just use drm_gem_create_mmap_offset().
  423. *
  424. * This function is idempotent and handles an already allocated mmap offset
  425. * transparently. Drivers do not need to check for this case.
  426. */
  427. int
  428. drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
  429. {
  430. struct drm_device *dev = obj->dev;
  431. return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
  432. size / PAGE_SIZE);
  433. }
  434. EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
  435. /**
  436. * drm_gem_create_mmap_offset - create a fake mmap offset for an object
  437. * @obj: obj in question
  438. *
  439. * GEM memory mapping works by handing back to userspace a fake mmap offset
  440. * it can use in a subsequent mmap(2) call. The DRM core code then looks
  441. * up the object based on the offset and sets up the various memory mapping
  442. * structures.
  443. *
  444. * This routine allocates and attaches a fake offset for @obj.
  445. *
  446. * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
  447. * the fake offset again.
  448. */
  449. int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
  450. {
  451. return drm_gem_create_mmap_offset_size(obj, obj->size);
  452. }
  453. EXPORT_SYMBOL(drm_gem_create_mmap_offset);
  454. /*
  455. * Move pages to appropriate lru and release the pagevec, decrementing the
  456. * ref count of those pages.
  457. */
  458. static void drm_gem_check_release_pagevec(struct pagevec *pvec)
  459. {
  460. check_move_unevictable_pages(pvec);
  461. __pagevec_release(pvec);
  462. cond_resched();
  463. }
  464. /**
  465. * drm_gem_get_pages - helper to allocate backing pages for a GEM object
  466. * from shmem
  467. * @obj: obj in question
  468. *
  469. * This reads the page-array of the shmem-backing storage of the given gem
  470. * object. An array of pages is returned. If a page is not allocated or
  471. * swapped-out, this will allocate/swap-in the required pages. Note that the
  472. * whole object is covered by the page-array and pinned in memory.
  473. *
  474. * Use drm_gem_put_pages() to release the array and unpin all pages.
  475. *
  476. * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
  477. * If you require other GFP-masks, you have to do those allocations yourself.
  478. *
  479. * Note that you are not allowed to change gfp-zones during runtime. That is,
  480. * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
  481. * set during initialization. If you have special zone constraints, set them
  482. * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
  483. * to keep pages in the required zone during swap-in.
  484. *
  485. * This function is only valid on objects initialized with
  486. * drm_gem_object_init(), but not for those initialized with
  487. * drm_gem_private_object_init() only.
  488. */
  489. struct page **drm_gem_get_pages(struct drm_gem_object *obj)
  490. {
  491. struct address_space *mapping;
  492. struct page *p, **pages;
  493. struct pagevec pvec;
  494. int i, npages;
  495. if (WARN_ON(!obj->filp))
  496. return ERR_PTR(-EINVAL);
  497. /* This is the shared memory object that backs the GEM resource */
  498. mapping = obj->filp->f_mapping;
  499. /* We already BUG_ON() for non-page-aligned sizes in
  500. * drm_gem_object_init(), so we should never hit this unless
  501. * driver author is doing something really wrong:
  502. */
  503. WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
  504. npages = obj->size >> PAGE_SHIFT;
  505. pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
  506. if (pages == NULL)
  507. return ERR_PTR(-ENOMEM);
  508. mapping_set_unevictable(mapping);
  509. for (i = 0; i < npages; i++) {
  510. p = shmem_read_mapping_page(mapping, i);
  511. if (IS_ERR(p))
  512. goto fail;
  513. pages[i] = p;
  514. /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
  515. * correct region during swapin. Note that this requires
  516. * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
  517. * so shmem can relocate pages during swapin if required.
  518. */
  519. BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
  520. (page_to_pfn(p) >= 0x00100000UL));
  521. }
  522. return pages;
  523. fail:
  524. mapping_clear_unevictable(mapping);
  525. pagevec_init(&pvec);
  526. while (i--) {
  527. if (!pagevec_add(&pvec, pages[i]))
  528. drm_gem_check_release_pagevec(&pvec);
  529. }
  530. if (pagevec_count(&pvec))
  531. drm_gem_check_release_pagevec(&pvec);
  532. kvfree(pages);
  533. return ERR_CAST(p);
  534. }
  535. EXPORT_SYMBOL(drm_gem_get_pages);
  536. /**
  537. * drm_gem_put_pages - helper to free backing pages for a GEM object
  538. * @obj: obj in question
  539. * @pages: pages to free
  540. * @dirty: if true, pages will be marked as dirty
  541. * @accessed: if true, the pages will be marked as accessed
  542. */
  543. void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
  544. bool dirty, bool accessed)
  545. {
  546. int i, npages;
  547. struct address_space *mapping;
  548. struct pagevec pvec;
  549. mapping = file_inode(obj->filp)->i_mapping;
  550. mapping_clear_unevictable(mapping);
  551. /* We already BUG_ON() for non-page-aligned sizes in
  552. * drm_gem_object_init(), so we should never hit this unless
  553. * driver author is doing something really wrong:
  554. */
  555. WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
  556. npages = obj->size >> PAGE_SHIFT;
  557. pagevec_init(&pvec);
  558. for (i = 0; i < npages; i++) {
  559. if (!pages[i])
  560. continue;
  561. if (dirty)
  562. set_page_dirty(pages[i]);
  563. if (accessed)
  564. mark_page_accessed(pages[i]);
  565. /* Undo the reference we took when populating the table */
  566. if (!pagevec_add(&pvec, pages[i]))
  567. drm_gem_check_release_pagevec(&pvec);
  568. }
  569. if (pagevec_count(&pvec))
  570. drm_gem_check_release_pagevec(&pvec);
  571. kvfree(pages);
  572. }
  573. EXPORT_SYMBOL(drm_gem_put_pages);
  574. static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
  575. struct drm_gem_object **objs)
  576. {
  577. int i, ret = 0;
  578. struct drm_gem_object *obj;
  579. spin_lock(&filp->table_lock);
  580. for (i = 0; i < count; i++) {
  581. /* Check if we currently have a reference on the object */
  582. obj = idr_find(&filp->object_idr, handle[i]);
  583. if (!obj) {
  584. ret = -ENOENT;
  585. break;
  586. }
  587. drm_gem_object_get(obj);
  588. objs[i] = obj;
  589. }
  590. spin_unlock(&filp->table_lock);
  591. return ret;
  592. }
  593. /**
  594. * drm_gem_objects_lookup - look up GEM objects from an array of handles
  595. * @filp: DRM file private date
  596. * @bo_handles: user pointer to array of userspace handle
  597. * @count: size of handle array
  598. * @objs_out: returned pointer to array of drm_gem_object pointers
  599. *
  600. * Takes an array of userspace handles and returns a newly allocated array of
  601. * GEM objects.
  602. *
  603. * For a single handle lookup, use drm_gem_object_lookup().
  604. *
  605. * Returns:
  606. *
  607. * @objs filled in with GEM object pointers. Returned GEM objects need to be
  608. * released with drm_gem_object_put(). -ENOENT is returned on a lookup
  609. * failure. 0 is returned on success.
  610. *
  611. */
  612. int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
  613. int count, struct drm_gem_object ***objs_out)
  614. {
  615. int ret;
  616. u32 *handles;
  617. struct drm_gem_object **objs;
  618. if (!count)
  619. return 0;
  620. objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
  621. GFP_KERNEL | __GFP_ZERO);
  622. if (!objs)
  623. return -ENOMEM;
  624. *objs_out = objs;
  625. handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
  626. if (!handles) {
  627. ret = -ENOMEM;
  628. goto out;
  629. }
  630. if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
  631. ret = -EFAULT;
  632. DRM_DEBUG("Failed to copy in GEM handles\n");
  633. goto out;
  634. }
  635. ret = objects_lookup(filp, handles, count, objs);
  636. out:
  637. kvfree(handles);
  638. return ret;
  639. }
  640. EXPORT_SYMBOL(drm_gem_objects_lookup);
  641. /**
  642. * drm_gem_object_lookup - look up a GEM object from its handle
  643. * @filp: DRM file private date
  644. * @handle: userspace handle
  645. *
  646. * Returns:
  647. *
  648. * A reference to the object named by the handle if such exists on @filp, NULL
  649. * otherwise.
  650. *
  651. * If looking up an array of handles, use drm_gem_objects_lookup().
  652. */
  653. struct drm_gem_object *
  654. drm_gem_object_lookup(struct drm_file *filp, u32 handle)
  655. {
  656. struct drm_gem_object *obj = NULL;
  657. objects_lookup(filp, &handle, 1, &obj);
  658. return obj;
  659. }
  660. EXPORT_SYMBOL(drm_gem_object_lookup);
  661. /**
  662. * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
  663. * shared and/or exclusive fences.
  664. * @filep: DRM file private date
  665. * @handle: userspace handle
  666. * @wait_all: if true, wait on all fences, else wait on just exclusive fence
  667. * @timeout: timeout value in jiffies or zero to return immediately
  668. *
  669. * Returns:
  670. *
  671. * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
  672. * greater than 0 on success.
  673. */
  674. long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
  675. bool wait_all, unsigned long timeout)
  676. {
  677. long ret;
  678. struct drm_gem_object *obj;
  679. obj = drm_gem_object_lookup(filep, handle);
  680. if (!obj) {
  681. DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
  682. return -EINVAL;
  683. }
  684. ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
  685. true, timeout);
  686. if (ret == 0)
  687. ret = -ETIME;
  688. else if (ret > 0)
  689. ret = 0;
  690. drm_gem_object_put(obj);
  691. return ret;
  692. }
  693. EXPORT_SYMBOL(drm_gem_dma_resv_wait);
  694. /**
  695. * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
  696. * @dev: drm_device
  697. * @data: ioctl data
  698. * @file_priv: drm file-private structure
  699. *
  700. * Releases the handle to an mm object.
  701. */
  702. int
  703. drm_gem_close_ioctl(struct drm_device *dev, void *data,
  704. struct drm_file *file_priv)
  705. {
  706. struct drm_gem_close *args = data;
  707. int ret;
  708. if (!drm_core_check_feature(dev, DRIVER_GEM))
  709. return -EOPNOTSUPP;
  710. ret = drm_gem_handle_delete(file_priv, args->handle);
  711. return ret;
  712. }
  713. /**
  714. * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
  715. * @dev: drm_device
  716. * @data: ioctl data
  717. * @file_priv: drm file-private structure
  718. *
  719. * Create a global name for an object, returning the name.
  720. *
  721. * Note that the name does not hold a reference; when the object
  722. * is freed, the name goes away.
  723. */
  724. int
  725. drm_gem_flink_ioctl(struct drm_device *dev, void *data,
  726. struct drm_file *file_priv)
  727. {
  728. struct drm_gem_flink *args = data;
  729. struct drm_gem_object *obj;
  730. int ret;
  731. if (!drm_core_check_feature(dev, DRIVER_GEM))
  732. return -EOPNOTSUPP;
  733. obj = drm_gem_object_lookup(file_priv, args->handle);
  734. if (obj == NULL)
  735. return -ENOENT;
  736. mutex_lock(&dev->object_name_lock);
  737. /* prevent races with concurrent gem_close. */
  738. if (obj->handle_count == 0) {
  739. ret = -ENOENT;
  740. goto err;
  741. }
  742. if (!obj->name) {
  743. ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
  744. if (ret < 0)
  745. goto err;
  746. obj->name = ret;
  747. }
  748. args->name = (uint64_t) obj->name;
  749. ret = 0;
  750. err:
  751. mutex_unlock(&dev->object_name_lock);
  752. drm_gem_object_put(obj);
  753. return ret;
  754. }
  755. /**
  756. * drm_gem_open - implementation of the GEM_OPEN ioctl
  757. * @dev: drm_device
  758. * @data: ioctl data
  759. * @file_priv: drm file-private structure
  760. *
  761. * Open an object using the global name, returning a handle and the size.
  762. *
  763. * This handle (of course) holds a reference to the object, so the object
  764. * will not go away until the handle is deleted.
  765. */
  766. int
  767. drm_gem_open_ioctl(struct drm_device *dev, void *data,
  768. struct drm_file *file_priv)
  769. {
  770. struct drm_gem_open *args = data;
  771. struct drm_gem_object *obj;
  772. int ret;
  773. u32 handle;
  774. if (!drm_core_check_feature(dev, DRIVER_GEM))
  775. return -EOPNOTSUPP;
  776. mutex_lock(&dev->object_name_lock);
  777. obj = idr_find(&dev->object_name_idr, (int) args->name);
  778. if (obj) {
  779. drm_gem_object_get(obj);
  780. } else {
  781. mutex_unlock(&dev->object_name_lock);
  782. return -ENOENT;
  783. }
  784. /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
  785. ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
  786. if (ret)
  787. goto err;
  788. args->handle = handle;
  789. args->size = obj->size;
  790. err:
  791. drm_gem_object_put(obj);
  792. return ret;
  793. }
  794. /**
  795. * gem_gem_open - initalizes GEM file-private structures at devnode open time
  796. * @dev: drm_device which is being opened by userspace
  797. * @file_private: drm file-private structure to set up
  798. *
  799. * Called at device open time, sets up the structure for handling refcounting
  800. * of mm objects.
  801. */
  802. void
  803. drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
  804. {
  805. idr_init_base(&file_private->object_idr, 1);
  806. spin_lock_init(&file_private->table_lock);
  807. }
  808. /**
  809. * drm_gem_release - release file-private GEM resources
  810. * @dev: drm_device which is being closed by userspace
  811. * @file_private: drm file-private structure to clean up
  812. *
  813. * Called at close time when the filp is going away.
  814. *
  815. * Releases any remaining references on objects by this filp.
  816. */
  817. void
  818. drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
  819. {
  820. idr_for_each(&file_private->object_idr,
  821. &drm_gem_object_release_handle, file_private);
  822. idr_destroy(&file_private->object_idr);
  823. }
  824. /**
  825. * drm_gem_object_release - release GEM buffer object resources
  826. * @obj: GEM buffer object
  827. *
  828. * This releases any structures and resources used by @obj and is the invers of
  829. * drm_gem_object_init().
  830. */
  831. void
  832. drm_gem_object_release(struct drm_gem_object *obj)
  833. {
  834. WARN_ON(obj->dma_buf);
  835. if (obj->filp)
  836. fput(obj->filp);
  837. dma_resv_fini(&obj->_resv);
  838. drm_gem_free_mmap_offset(obj);
  839. }
  840. EXPORT_SYMBOL(drm_gem_object_release);
  841. /**
  842. * drm_gem_object_free - free a GEM object
  843. * @kref: kref of the object to free
  844. *
  845. * Called after the last reference to the object has been lost.
  846. *
  847. * Frees the object
  848. */
  849. void
  850. drm_gem_object_free(struct kref *kref)
  851. {
  852. struct drm_gem_object *obj =
  853. container_of(kref, struct drm_gem_object, refcount);
  854. struct drm_device *dev = obj->dev;
  855. if (obj->funcs)
  856. obj->funcs->free(obj);
  857. else if (dev->driver->gem_free_object_unlocked)
  858. dev->driver->gem_free_object_unlocked(obj);
  859. }
  860. EXPORT_SYMBOL(drm_gem_object_free);
  861. /**
  862. * drm_gem_object_put_locked - release a GEM buffer object reference
  863. * @obj: GEM buffer object
  864. *
  865. * This releases a reference to @obj. Callers must hold the
  866. * &drm_device.struct_mutex lock when calling this function, even when the
  867. * driver doesn't use &drm_device.struct_mutex for anything.
  868. *
  869. * For drivers not encumbered with legacy locking use
  870. * drm_gem_object_put() instead.
  871. */
  872. void
  873. drm_gem_object_put_locked(struct drm_gem_object *obj)
  874. {
  875. if (obj) {
  876. WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
  877. kref_put(&obj->refcount, drm_gem_object_free);
  878. }
  879. }
  880. EXPORT_SYMBOL(drm_gem_object_put_locked);
  881. /**
  882. * drm_gem_vm_open - vma->ops->open implementation for GEM
  883. * @vma: VM area structure
  884. *
  885. * This function implements the #vm_operations_struct open() callback for GEM
  886. * drivers. This must be used together with drm_gem_vm_close().
  887. */
  888. void drm_gem_vm_open(struct vm_area_struct *vma)
  889. {
  890. struct drm_gem_object *obj = vma->vm_private_data;
  891. drm_gem_object_get(obj);
  892. }
  893. EXPORT_SYMBOL(drm_gem_vm_open);
  894. /**
  895. * drm_gem_vm_close - vma->ops->close implementation for GEM
  896. * @vma: VM area structure
  897. *
  898. * This function implements the #vm_operations_struct close() callback for GEM
  899. * drivers. This must be used together with drm_gem_vm_open().
  900. */
  901. void drm_gem_vm_close(struct vm_area_struct *vma)
  902. {
  903. struct drm_gem_object *obj = vma->vm_private_data;
  904. drm_gem_object_put(obj);
  905. }
  906. EXPORT_SYMBOL(drm_gem_vm_close);
  907. /**
  908. * drm_gem_mmap_obj - memory map a GEM object
  909. * @obj: the GEM object to map
  910. * @obj_size: the object size to be mapped, in bytes
  911. * @vma: VMA for the area to be mapped
  912. *
  913. * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
  914. * provided by the driver. Depending on their requirements, drivers can either
  915. * provide a fault handler in their gem_vm_ops (in which case any accesses to
  916. * the object will be trapped, to perform migration, GTT binding, surface
  917. * register allocation, or performance monitoring), or mmap the buffer memory
  918. * synchronously after calling drm_gem_mmap_obj.
  919. *
  920. * This function is mainly intended to implement the DMABUF mmap operation, when
  921. * the GEM object is not looked up based on its fake offset. To implement the
  922. * DRM mmap operation, drivers should use the drm_gem_mmap() function.
  923. *
  924. * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
  925. * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
  926. * callers must verify access restrictions before calling this helper.
  927. *
  928. * Return 0 or success or -EINVAL if the object size is smaller than the VMA
  929. * size, or if no gem_vm_ops are provided.
  930. */
  931. int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
  932. struct vm_area_struct *vma)
  933. {
  934. struct drm_device *dev = obj->dev;
  935. int ret;
  936. /* Check for valid size. */
  937. if (obj_size < vma->vm_end - vma->vm_start)
  938. return -EINVAL;
  939. /* Take a ref for this mapping of the object, so that the fault
  940. * handler can dereference the mmap offset's pointer to the object.
  941. * This reference is cleaned up by the corresponding vm_close
  942. * (which should happen whether the vma was created by this call, or
  943. * by a vm_open due to mremap or partial unmap or whatever).
  944. */
  945. drm_gem_object_get(obj);
  946. vma->vm_private_data = obj;
  947. if (obj->funcs && obj->funcs->mmap) {
  948. ret = obj->funcs->mmap(obj, vma);
  949. if (ret) {
  950. drm_gem_object_put(obj);
  951. return ret;
  952. }
  953. WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
  954. } else {
  955. if (obj->funcs && obj->funcs->vm_ops)
  956. vma->vm_ops = obj->funcs->vm_ops;
  957. else if (dev->driver->gem_vm_ops)
  958. vma->vm_ops = dev->driver->gem_vm_ops;
  959. else {
  960. drm_gem_object_put(obj);
  961. return -EINVAL;
  962. }
  963. vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
  964. vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  965. vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
  966. }
  967. return 0;
  968. }
  969. EXPORT_SYMBOL(drm_gem_mmap_obj);
  970. /**
  971. * drm_gem_mmap - memory map routine for GEM objects
  972. * @filp: DRM file pointer
  973. * @vma: VMA for the area to be mapped
  974. *
  975. * If a driver supports GEM object mapping, mmap calls on the DRM file
  976. * descriptor will end up here.
  977. *
  978. * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
  979. * contain the fake offset we created when the GTT map ioctl was called on
  980. * the object) and map it with a call to drm_gem_mmap_obj().
  981. *
  982. * If the caller is not granted access to the buffer object, the mmap will fail
  983. * with EACCES. Please see the vma manager for more information.
  984. */
  985. int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  986. {
  987. struct drm_file *priv = filp->private_data;
  988. struct drm_device *dev = priv->minor->dev;
  989. struct drm_gem_object *obj = NULL;
  990. struct drm_vma_offset_node *node;
  991. int ret;
  992. if (drm_dev_is_unplugged(dev))
  993. return -ENODEV;
  994. drm_vma_offset_lock_lookup(dev->vma_offset_manager);
  995. node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
  996. vma->vm_pgoff,
  997. vma_pages(vma));
  998. if (likely(node)) {
  999. obj = container_of(node, struct drm_gem_object, vma_node);
  1000. /*
  1001. * When the object is being freed, after it hits 0-refcnt it
  1002. * proceeds to tear down the object. In the process it will
  1003. * attempt to remove the VMA offset and so acquire this
  1004. * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
  1005. * that matches our range, we know it is in the process of being
  1006. * destroyed and will be freed as soon as we release the lock -
  1007. * so we have to check for the 0-refcnted object and treat it as
  1008. * invalid.
  1009. */
  1010. if (!kref_get_unless_zero(&obj->refcount))
  1011. obj = NULL;
  1012. }
  1013. drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
  1014. if (!obj)
  1015. return -EINVAL;
  1016. if (!drm_vma_node_is_allowed(node, priv)) {
  1017. drm_gem_object_put(obj);
  1018. return -EACCES;
  1019. }
  1020. if (node->readonly) {
  1021. if (vma->vm_flags & VM_WRITE) {
  1022. drm_gem_object_put(obj);
  1023. return -EINVAL;
  1024. }
  1025. vma->vm_flags &= ~VM_MAYWRITE;
  1026. }
  1027. ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
  1028. vma);
  1029. drm_gem_object_put(obj);
  1030. return ret;
  1031. }
  1032. EXPORT_SYMBOL(drm_gem_mmap);
  1033. void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
  1034. const struct drm_gem_object *obj)
  1035. {
  1036. drm_printf_indent(p, indent, "name=%d\n", obj->name);
  1037. drm_printf_indent(p, indent, "refcount=%u\n",
  1038. kref_read(&obj->refcount));
  1039. drm_printf_indent(p, indent, "start=%08lx\n",
  1040. drm_vma_node_start(&obj->vma_node));
  1041. drm_printf_indent(p, indent, "size=%zu\n", obj->size);
  1042. drm_printf_indent(p, indent, "imported=%s\n",
  1043. obj->import_attach ? "yes" : "no");
  1044. if (obj->funcs && obj->funcs->print_info)
  1045. obj->funcs->print_info(p, indent, obj);
  1046. }
  1047. int drm_gem_pin(struct drm_gem_object *obj)
  1048. {
  1049. if (obj->funcs && obj->funcs->pin)
  1050. return obj->funcs->pin(obj);
  1051. else if (obj->dev->driver->gem_prime_pin)
  1052. return obj->dev->driver->gem_prime_pin(obj);
  1053. else
  1054. return 0;
  1055. }
  1056. void drm_gem_unpin(struct drm_gem_object *obj)
  1057. {
  1058. if (obj->funcs && obj->funcs->unpin)
  1059. obj->funcs->unpin(obj);
  1060. else if (obj->dev->driver->gem_prime_unpin)
  1061. obj->dev->driver->gem_prime_unpin(obj);
  1062. }
  1063. void *drm_gem_vmap(struct drm_gem_object *obj)
  1064. {
  1065. void *vaddr;
  1066. if (obj->funcs && obj->funcs->vmap)
  1067. vaddr = obj->funcs->vmap(obj);
  1068. else if (obj->dev->driver->gem_prime_vmap)
  1069. vaddr = obj->dev->driver->gem_prime_vmap(obj);
  1070. else
  1071. vaddr = ERR_PTR(-EOPNOTSUPP);
  1072. if (!vaddr)
  1073. vaddr = ERR_PTR(-ENOMEM);
  1074. return vaddr;
  1075. }
  1076. void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
  1077. {
  1078. if (!vaddr)
  1079. return;
  1080. if (obj->funcs && obj->funcs->vunmap)
  1081. obj->funcs->vunmap(obj, vaddr);
  1082. else if (obj->dev->driver->gem_prime_vunmap)
  1083. obj->dev->driver->gem_prime_vunmap(obj, vaddr);
  1084. }
  1085. /**
  1086. * drm_gem_lock_reservations - Sets up the ww context and acquires
  1087. * the lock on an array of GEM objects.
  1088. *
  1089. * Once you've locked your reservations, you'll want to set up space
  1090. * for your shared fences (if applicable), submit your job, then
  1091. * drm_gem_unlock_reservations().
  1092. *
  1093. * @objs: drm_gem_objects to lock
  1094. * @count: Number of objects in @objs
  1095. * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
  1096. * part of tracking this set of locked reservations.
  1097. */
  1098. int
  1099. drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
  1100. struct ww_acquire_ctx *acquire_ctx)
  1101. {
  1102. int contended = -1;
  1103. int i, ret;
  1104. ww_acquire_init(acquire_ctx, &reservation_ww_class);
  1105. retry:
  1106. if (contended != -1) {
  1107. struct drm_gem_object *obj = objs[contended];
  1108. ret = dma_resv_lock_slow_interruptible(obj->resv,
  1109. acquire_ctx);
  1110. if (ret) {
  1111. ww_acquire_done(acquire_ctx);
  1112. return ret;
  1113. }
  1114. }
  1115. for (i = 0; i < count; i++) {
  1116. if (i == contended)
  1117. continue;
  1118. ret = dma_resv_lock_interruptible(objs[i]->resv,
  1119. acquire_ctx);
  1120. if (ret) {
  1121. int j;
  1122. for (j = 0; j < i; j++)
  1123. dma_resv_unlock(objs[j]->resv);
  1124. if (contended != -1 && contended >= i)
  1125. dma_resv_unlock(objs[contended]->resv);
  1126. if (ret == -EDEADLK) {
  1127. contended = i;
  1128. goto retry;
  1129. }
  1130. ww_acquire_done(acquire_ctx);
  1131. return ret;
  1132. }
  1133. }
  1134. ww_acquire_done(acquire_ctx);
  1135. return 0;
  1136. }
  1137. EXPORT_SYMBOL(drm_gem_lock_reservations);
  1138. void
  1139. drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
  1140. struct ww_acquire_ctx *acquire_ctx)
  1141. {
  1142. int i;
  1143. for (i = 0; i < count; i++)
  1144. dma_resv_unlock(objs[i]->resv);
  1145. ww_acquire_fini(acquire_ctx);
  1146. }
  1147. EXPORT_SYMBOL(drm_gem_unlock_reservations);
  1148. /**
  1149. * drm_gem_fence_array_add - Adds the fence to an array of fences to be
  1150. * waited on, deduplicating fences from the same context.
  1151. *
  1152. * @fence_array: array of dma_fence * for the job to block on.
  1153. * @fence: the dma_fence to add to the list of dependencies.
  1154. *
  1155. * Returns:
  1156. * 0 on success, or an error on failing to expand the array.
  1157. */
  1158. int drm_gem_fence_array_add(struct xarray *fence_array,
  1159. struct dma_fence *fence)
  1160. {
  1161. struct dma_fence *entry;
  1162. unsigned long index;
  1163. u32 id = 0;
  1164. int ret;
  1165. if (!fence)
  1166. return 0;
  1167. /* Deduplicate if we already depend on a fence from the same context.
  1168. * This lets the size of the array of deps scale with the number of
  1169. * engines involved, rather than the number of BOs.
  1170. */
  1171. xa_for_each(fence_array, index, entry) {
  1172. if (entry->context != fence->context)
  1173. continue;
  1174. if (dma_fence_is_later(fence, entry)) {
  1175. dma_fence_put(entry);
  1176. xa_store(fence_array, index, fence, GFP_KERNEL);
  1177. } else {
  1178. dma_fence_put(fence);
  1179. }
  1180. return 0;
  1181. }
  1182. ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
  1183. if (ret != 0)
  1184. dma_fence_put(fence);
  1185. return ret;
  1186. }
  1187. EXPORT_SYMBOL(drm_gem_fence_array_add);
  1188. /**
  1189. * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
  1190. * in the GEM object's reservation object to an array of dma_fences for use in
  1191. * scheduling a rendering job.
  1192. *
  1193. * This should be called after drm_gem_lock_reservations() on your array of
  1194. * GEM objects used in the job but before updating the reservations with your
  1195. * own fences.
  1196. *
  1197. * @fence_array: array of dma_fence * for the job to block on.
  1198. * @obj: the gem object to add new dependencies from.
  1199. * @write: whether the job might write the object (so we need to depend on
  1200. * shared fences in the reservation object).
  1201. */
  1202. int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
  1203. struct drm_gem_object *obj,
  1204. bool write)
  1205. {
  1206. int ret;
  1207. struct dma_fence **fences;
  1208. unsigned int i, fence_count;
  1209. if (!write) {
  1210. struct dma_fence *fence =
  1211. dma_resv_get_excl_rcu(obj->resv);
  1212. return drm_gem_fence_array_add(fence_array, fence);
  1213. }
  1214. ret = dma_resv_get_fences_rcu(obj->resv, NULL,
  1215. &fence_count, &fences);
  1216. if (ret || !fence_count)
  1217. return ret;
  1218. for (i = 0; i < fence_count; i++) {
  1219. ret = drm_gem_fence_array_add(fence_array, fences[i]);
  1220. if (ret)
  1221. break;
  1222. }
  1223. for (; i < fence_count; i++)
  1224. dma_fence_put(fences[i]);
  1225. kfree(fences);
  1226. return ret;
  1227. }
  1228. EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);