drm_prime.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067
  1. /*
  2. * Copyright © 2012 Red Hat
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Dave Airlie <airlied@redhat.com>
  25. * Rob Clark <rob.clark@linaro.org>
  26. *
  27. */
  28. #include <linux/export.h>
  29. #include <linux/dma-buf.h>
  30. #include <linux/rbtree.h>
  31. #include <drm/drm.h>
  32. #include <drm/drm_drv.h>
  33. #include <drm/drm_file.h>
  34. #include <drm/drm_framebuffer.h>
  35. #include <drm/drm_gem.h>
  36. #include <drm/drm_prime.h>
  37. #include "drm_internal.h"
  38. /**
  39. * DOC: overview and lifetime rules
  40. *
  41. * Similar to GEM global names, PRIME file descriptors are also used to share
  42. * buffer objects across processes. They offer additional security: as file
  43. * descriptors must be explicitly sent over UNIX domain sockets to be shared
  44. * between applications, they can't be guessed like the globally unique GEM
  45. * names.
  46. *
  47. * Drivers that support the PRIME API implement the
  48. * &drm_driver.prime_handle_to_fd and &drm_driver.prime_fd_to_handle operations.
  49. * GEM based drivers must use drm_gem_prime_handle_to_fd() and
  50. * drm_gem_prime_fd_to_handle() to implement these. For GEM based drivers the
  51. * actual driver interfaces is provided through the &drm_gem_object_funcs.export
  52. * and &drm_driver.gem_prime_import hooks.
  53. *
  54. * &dma_buf_ops implementations for GEM drivers are all individually exported
  55. * for drivers which need to overwrite or reimplement some of them.
  56. *
  57. * Reference Counting for GEM Drivers
  58. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  59. *
  60. * On the export the &dma_buf holds a reference to the exported buffer object,
  61. * usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD
  62. * IOCTL, when it first calls &drm_gem_object_funcs.export
  63. * and stores the exporting GEM object in the &dma_buf.priv field. This
  64. * reference needs to be released when the final reference to the &dma_buf
  65. * itself is dropped and its &dma_buf_ops.release function is called. For
  66. * GEM-based drivers, the &dma_buf should be exported using
  67. * drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release().
  68. *
  69. * Thus the chain of references always flows in one direction, avoiding loops:
  70. * importing GEM object -> dma-buf -> exported GEM bo. A further complication
  71. * are the lookup caches for import and export. These are required to guarantee
  72. * that any given object will always have only one uniqe userspace handle. This
  73. * is required to allow userspace to detect duplicated imports, since some GEM
  74. * drivers do fail command submissions if a given buffer object is listed more
  75. * than once. These import and export caches in &drm_prime_file_private only
  76. * retain a weak reference, which is cleaned up when the corresponding object is
  77. * released.
  78. *
  79. * Self-importing: If userspace is using PRIME as a replacement for flink then
  80. * it will get a fd->handle request for a GEM object that it created. Drivers
  81. * should detect this situation and return back the underlying object from the
  82. * dma-buf private. For GEM based drivers this is handled in
  83. * drm_gem_prime_import() already.
  84. */
  85. struct drm_prime_member {
  86. struct dma_buf *dma_buf;
  87. uint32_t handle;
  88. struct rb_node dmabuf_rb;
  89. struct rb_node handle_rb;
  90. };
  91. static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
  92. struct dma_buf *dma_buf, uint32_t handle)
  93. {
  94. struct drm_prime_member *member;
  95. struct rb_node **p, *rb;
  96. member = kmalloc(sizeof(*member), GFP_KERNEL);
  97. if (!member)
  98. return -ENOMEM;
  99. get_dma_buf(dma_buf);
  100. member->dma_buf = dma_buf;
  101. member->handle = handle;
  102. rb = NULL;
  103. p = &prime_fpriv->dmabufs.rb_node;
  104. while (*p) {
  105. struct drm_prime_member *pos;
  106. rb = *p;
  107. pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
  108. if (dma_buf > pos->dma_buf)
  109. p = &rb->rb_right;
  110. else
  111. p = &rb->rb_left;
  112. }
  113. rb_link_node(&member->dmabuf_rb, rb, p);
  114. rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
  115. rb = NULL;
  116. p = &prime_fpriv->handles.rb_node;
  117. while (*p) {
  118. struct drm_prime_member *pos;
  119. rb = *p;
  120. pos = rb_entry(rb, struct drm_prime_member, handle_rb);
  121. if (handle > pos->handle)
  122. p = &rb->rb_right;
  123. else
  124. p = &rb->rb_left;
  125. }
  126. rb_link_node(&member->handle_rb, rb, p);
  127. rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
  128. return 0;
  129. }
  130. static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
  131. uint32_t handle)
  132. {
  133. struct rb_node *rb;
  134. rb = prime_fpriv->handles.rb_node;
  135. while (rb) {
  136. struct drm_prime_member *member;
  137. member = rb_entry(rb, struct drm_prime_member, handle_rb);
  138. if (member->handle == handle)
  139. return member->dma_buf;
  140. else if (member->handle < handle)
  141. rb = rb->rb_right;
  142. else
  143. rb = rb->rb_left;
  144. }
  145. return NULL;
  146. }
  147. static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
  148. struct dma_buf *dma_buf,
  149. uint32_t *handle)
  150. {
  151. struct rb_node *rb;
  152. rb = prime_fpriv->dmabufs.rb_node;
  153. while (rb) {
  154. struct drm_prime_member *member;
  155. member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
  156. if (member->dma_buf == dma_buf) {
  157. *handle = member->handle;
  158. return 0;
  159. } else if (member->dma_buf < dma_buf) {
  160. rb = rb->rb_right;
  161. } else {
  162. rb = rb->rb_left;
  163. }
  164. }
  165. return -ENOENT;
  166. }
  167. void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
  168. struct dma_buf *dma_buf)
  169. {
  170. struct rb_node *rb;
  171. rb = prime_fpriv->dmabufs.rb_node;
  172. while (rb) {
  173. struct drm_prime_member *member;
  174. member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
  175. if (member->dma_buf == dma_buf) {
  176. rb_erase(&member->handle_rb, &prime_fpriv->handles);
  177. rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
  178. dma_buf_put(dma_buf);
  179. kfree(member);
  180. return;
  181. } else if (member->dma_buf < dma_buf) {
  182. rb = rb->rb_right;
  183. } else {
  184. rb = rb->rb_left;
  185. }
  186. }
  187. }
  188. void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
  189. {
  190. mutex_init(&prime_fpriv->lock);
  191. prime_fpriv->dmabufs = RB_ROOT;
  192. prime_fpriv->handles = RB_ROOT;
  193. }
  194. void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
  195. {
  196. /* by now drm_gem_release should've made sure the list is empty */
  197. WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
  198. }
  199. /**
  200. * drm_gem_dmabuf_export - &dma_buf export implementation for GEM
  201. * @dev: parent device for the exported dmabuf
  202. * @exp_info: the export information used by dma_buf_export()
  203. *
  204. * This wraps dma_buf_export() for use by generic GEM drivers that are using
  205. * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
  206. * a reference to the &drm_device and the exported &drm_gem_object (stored in
  207. * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release().
  208. *
  209. * Returns the new dmabuf.
  210. */
  211. struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
  212. struct dma_buf_export_info *exp_info)
  213. {
  214. struct drm_gem_object *obj = exp_info->priv;
  215. struct dma_buf *dma_buf;
  216. dma_buf = dma_buf_export(exp_info);
  217. if (IS_ERR(dma_buf))
  218. return dma_buf;
  219. drm_dev_get(dev);
  220. drm_gem_object_get(obj);
  221. dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping;
  222. return dma_buf;
  223. }
  224. EXPORT_SYMBOL(drm_gem_dmabuf_export);
  225. /**
  226. * drm_gem_dmabuf_release - &dma_buf release implementation for GEM
  227. * @dma_buf: buffer to be released
  228. *
  229. * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
  230. * must use this in their &dma_buf_ops structure as the release callback.
  231. * drm_gem_dmabuf_release() should be used in conjunction with
  232. * drm_gem_dmabuf_export().
  233. */
  234. void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
  235. {
  236. struct drm_gem_object *obj = dma_buf->priv;
  237. struct drm_device *dev = obj->dev;
  238. /* drop the reference on the export fd holds */
  239. drm_gem_object_put(obj);
  240. drm_dev_put(dev);
  241. }
  242. EXPORT_SYMBOL(drm_gem_dmabuf_release);
  243. /**
  244. * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
  245. * @dev: dev to export the buffer from
  246. * @file_priv: drm file-private structure
  247. * @prime_fd: fd id of the dma-buf which should be imported
  248. * @handle: pointer to storage for the handle of the imported buffer object
  249. *
  250. * This is the PRIME import function which must be used mandatorily by GEM
  251. * drivers to ensure correct lifetime management of the underlying GEM object.
  252. * The actual importing of GEM object from the dma-buf is done through the
  253. * &drm_driver.gem_prime_import driver callback.
  254. *
  255. * Returns 0 on success or a negative error code on failure.
  256. */
  257. int drm_gem_prime_fd_to_handle(struct drm_device *dev,
  258. struct drm_file *file_priv, int prime_fd,
  259. uint32_t *handle)
  260. {
  261. struct dma_buf *dma_buf;
  262. struct drm_gem_object *obj;
  263. int ret;
  264. dma_buf = dma_buf_get(prime_fd);
  265. if (IS_ERR(dma_buf))
  266. return PTR_ERR(dma_buf);
  267. mutex_lock(&file_priv->prime.lock);
  268. ret = drm_prime_lookup_buf_handle(&file_priv->prime,
  269. dma_buf, handle);
  270. if (ret == 0)
  271. goto out_put;
  272. /* never seen this one, need to import */
  273. mutex_lock(&dev->object_name_lock);
  274. if (dev->driver->gem_prime_import)
  275. obj = dev->driver->gem_prime_import(dev, dma_buf);
  276. else
  277. obj = drm_gem_prime_import(dev, dma_buf);
  278. if (IS_ERR(obj)) {
  279. ret = PTR_ERR(obj);
  280. goto out_unlock;
  281. }
  282. if (obj->dma_buf) {
  283. WARN_ON(obj->dma_buf != dma_buf);
  284. } else {
  285. obj->dma_buf = dma_buf;
  286. get_dma_buf(dma_buf);
  287. }
  288. /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
  289. ret = drm_gem_handle_create_tail(file_priv, obj, handle);
  290. drm_gem_object_put(obj);
  291. if (ret)
  292. goto out_put;
  293. ret = drm_prime_add_buf_handle(&file_priv->prime,
  294. dma_buf, *handle);
  295. mutex_unlock(&file_priv->prime.lock);
  296. if (ret)
  297. goto fail;
  298. dma_buf_put(dma_buf);
  299. return 0;
  300. fail:
  301. /* hmm, if driver attached, we are relying on the free-object path
  302. * to detach.. which seems ok..
  303. */
  304. drm_gem_handle_delete(file_priv, *handle);
  305. dma_buf_put(dma_buf);
  306. return ret;
  307. out_unlock:
  308. mutex_unlock(&dev->object_name_lock);
  309. out_put:
  310. mutex_unlock(&file_priv->prime.lock);
  311. dma_buf_put(dma_buf);
  312. return ret;
  313. }
  314. EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
  315. int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
  316. struct drm_file *file_priv)
  317. {
  318. struct drm_prime_handle *args = data;
  319. if (!dev->driver->prime_fd_to_handle)
  320. return -ENOSYS;
  321. return dev->driver->prime_fd_to_handle(dev, file_priv,
  322. args->fd, &args->handle);
  323. }
  324. static struct dma_buf *export_and_register_object(struct drm_device *dev,
  325. struct drm_gem_object *obj,
  326. uint32_t flags)
  327. {
  328. struct dma_buf *dmabuf;
  329. /* prevent races with concurrent gem_close. */
  330. if (obj->handle_count == 0) {
  331. dmabuf = ERR_PTR(-ENOENT);
  332. return dmabuf;
  333. }
  334. if (obj->funcs && obj->funcs->export)
  335. dmabuf = obj->funcs->export(obj, flags);
  336. else if (dev->driver->gem_prime_export)
  337. dmabuf = dev->driver->gem_prime_export(obj, flags);
  338. else
  339. dmabuf = drm_gem_prime_export(obj, flags);
  340. if (IS_ERR(dmabuf)) {
  341. /* normally the created dma-buf takes ownership of the ref,
  342. * but if that fails then drop the ref
  343. */
  344. return dmabuf;
  345. }
  346. /*
  347. * Note that callers do not need to clean up the export cache
  348. * since the check for obj->handle_count guarantees that someone
  349. * will clean it up.
  350. */
  351. obj->dma_buf = dmabuf;
  352. get_dma_buf(obj->dma_buf);
  353. return dmabuf;
  354. }
  355. /**
  356. * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
  357. * @dev: dev to export the buffer from
  358. * @file_priv: drm file-private structure
  359. * @handle: buffer handle to export
  360. * @flags: flags like DRM_CLOEXEC
  361. * @prime_fd: pointer to storage for the fd id of the create dma-buf
  362. *
  363. * This is the PRIME export function which must be used mandatorily by GEM
  364. * drivers to ensure correct lifetime management of the underlying GEM object.
  365. * The actual exporting from GEM object to a dma-buf is done through the
  366. * &drm_driver.gem_prime_export driver callback.
  367. */
  368. int drm_gem_prime_handle_to_fd(struct drm_device *dev,
  369. struct drm_file *file_priv, uint32_t handle,
  370. uint32_t flags,
  371. int *prime_fd)
  372. {
  373. struct drm_gem_object *obj;
  374. int ret = 0;
  375. struct dma_buf *dmabuf;
  376. mutex_lock(&file_priv->prime.lock);
  377. obj = drm_gem_object_lookup(file_priv, handle);
  378. if (!obj) {
  379. ret = -ENOENT;
  380. goto out_unlock;
  381. }
  382. dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
  383. if (dmabuf) {
  384. get_dma_buf(dmabuf);
  385. goto out_have_handle;
  386. }
  387. mutex_lock(&dev->object_name_lock);
  388. /* re-export the original imported object */
  389. if (obj->import_attach) {
  390. dmabuf = obj->import_attach->dmabuf;
  391. get_dma_buf(dmabuf);
  392. goto out_have_obj;
  393. }
  394. if (obj->dma_buf) {
  395. get_dma_buf(obj->dma_buf);
  396. dmabuf = obj->dma_buf;
  397. goto out_have_obj;
  398. }
  399. dmabuf = export_and_register_object(dev, obj, flags);
  400. if (IS_ERR(dmabuf)) {
  401. /* normally the created dma-buf takes ownership of the ref,
  402. * but if that fails then drop the ref
  403. */
  404. ret = PTR_ERR(dmabuf);
  405. mutex_unlock(&dev->object_name_lock);
  406. goto out;
  407. }
  408. out_have_obj:
  409. /*
  410. * If we've exported this buffer then cheat and add it to the import list
  411. * so we get the correct handle back. We must do this under the
  412. * protection of dev->object_name_lock to ensure that a racing gem close
  413. * ioctl doesn't miss to remove this buffer handle from the cache.
  414. */
  415. ret = drm_prime_add_buf_handle(&file_priv->prime,
  416. dmabuf, handle);
  417. mutex_unlock(&dev->object_name_lock);
  418. if (ret)
  419. goto fail_put_dmabuf;
  420. out_have_handle:
  421. ret = dma_buf_fd(dmabuf, flags);
  422. /*
  423. * We must _not_ remove the buffer from the handle cache since the newly
  424. * created dma buf is already linked in the global obj->dma_buf pointer,
  425. * and that is invariant as long as a userspace gem handle exists.
  426. * Closing the handle will clean out the cache anyway, so we don't leak.
  427. */
  428. if (ret < 0) {
  429. goto fail_put_dmabuf;
  430. } else {
  431. *prime_fd = ret;
  432. ret = 0;
  433. }
  434. goto out;
  435. fail_put_dmabuf:
  436. dma_buf_put(dmabuf);
  437. out:
  438. drm_gem_object_put(obj);
  439. out_unlock:
  440. mutex_unlock(&file_priv->prime.lock);
  441. return ret;
  442. }
  443. EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
  444. int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
  445. struct drm_file *file_priv)
  446. {
  447. struct drm_prime_handle *args = data;
  448. if (!dev->driver->prime_handle_to_fd)
  449. return -ENOSYS;
  450. /* check flags are valid */
  451. if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
  452. return -EINVAL;
  453. return dev->driver->prime_handle_to_fd(dev, file_priv,
  454. args->handle, args->flags, &args->fd);
  455. }
  456. /**
  457. * DOC: PRIME Helpers
  458. *
  459. * Drivers can implement &drm_gem_object_funcs.export and
  460. * &drm_driver.gem_prime_import in terms of simpler APIs by using the helper
  461. * functions drm_gem_prime_export() and drm_gem_prime_import(). These functions
  462. * implement dma-buf support in terms of some lower-level helpers, which are
  463. * again exported for drivers to use individually:
  464. *
  465. * Exporting buffers
  466. * ~~~~~~~~~~~~~~~~~
  467. *
  468. * Optional pinning of buffers is handled at dma-buf attach and detach time in
  469. * drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is
  470. * handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on
  471. * &drm_gem_object_funcs.get_sg_table.
  472. *
  473. * For kernel-internal access there's drm_gem_dmabuf_vmap() and
  474. * drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by
  475. * drm_gem_dmabuf_mmap().
  476. *
  477. * Note that these export helpers can only be used if the underlying backing
  478. * storage is fully coherent and either permanently pinned, or it is safe to pin
  479. * it indefinitely.
  480. *
  481. * FIXME: The underlying helper functions are named rather inconsistently.
  482. *
  483. * Exporting buffers
  484. * ~~~~~~~~~~~~~~~~~
  485. *
  486. * Importing dma-bufs using drm_gem_prime_import() relies on
  487. * &drm_driver.gem_prime_import_sg_table.
  488. *
  489. * Note that similarly to the export helpers this permanently pins the
  490. * underlying backing storage. Which is ok for scanout, but is not the best
  491. * option for sharing lots of buffers for rendering.
  492. */
  493. /**
  494. * drm_gem_map_attach - dma_buf attach implementation for GEM
  495. * @dma_buf: buffer to attach device to
  496. * @attach: buffer attachment data
  497. *
  498. * Calls &drm_gem_object_funcs.pin for device specific handling. This can be
  499. * used as the &dma_buf_ops.attach callback. Must be used together with
  500. * drm_gem_map_detach().
  501. *
  502. * Returns 0 on success, negative error code on failure.
  503. */
  504. int drm_gem_map_attach(struct dma_buf *dma_buf,
  505. struct dma_buf_attachment *attach)
  506. {
  507. struct drm_gem_object *obj = dma_buf->priv;
  508. return drm_gem_pin(obj);
  509. }
  510. EXPORT_SYMBOL(drm_gem_map_attach);
  511. /**
  512. * drm_gem_map_detach - dma_buf detach implementation for GEM
  513. * @dma_buf: buffer to detach from
  514. * @attach: attachment to be detached
  515. *
  516. * Calls &drm_gem_object_funcs.pin for device specific handling. Cleans up
  517. * &dma_buf_attachment from drm_gem_map_attach(). This can be used as the
  518. * &dma_buf_ops.detach callback.
  519. */
  520. void drm_gem_map_detach(struct dma_buf *dma_buf,
  521. struct dma_buf_attachment *attach)
  522. {
  523. struct drm_gem_object *obj = dma_buf->priv;
  524. drm_gem_unpin(obj);
  525. }
  526. EXPORT_SYMBOL(drm_gem_map_detach);
  527. /**
  528. * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
  529. * @attach: attachment whose scatterlist is to be returned
  530. * @dir: direction of DMA transfer
  531. *
  532. * Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This
  533. * can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together
  534. * with drm_gem_unmap_dma_buf().
  535. *
  536. * Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR
  537. * on error. May return -EINTR if it is interrupted by a signal.
  538. */
  539. struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
  540. enum dma_data_direction dir)
  541. {
  542. struct drm_gem_object *obj = attach->dmabuf->priv;
  543. struct sg_table *sgt;
  544. int ret;
  545. if (WARN_ON(dir == DMA_NONE))
  546. return ERR_PTR(-EINVAL);
  547. if (obj->funcs)
  548. sgt = obj->funcs->get_sg_table(obj);
  549. else
  550. sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
  551. ret = dma_map_sgtable(attach->dev, sgt, dir,
  552. DMA_ATTR_SKIP_CPU_SYNC);
  553. if (ret) {
  554. sg_free_table(sgt);
  555. kfree(sgt);
  556. sgt = ERR_PTR(ret);
  557. }
  558. return sgt;
  559. }
  560. EXPORT_SYMBOL(drm_gem_map_dma_buf);
  561. /**
  562. * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
  563. * @attach: attachment to unmap buffer from
  564. * @sgt: scatterlist info of the buffer to unmap
  565. * @dir: direction of DMA transfer
  566. *
  567. * This can be used as the &dma_buf_ops.unmap_dma_buf callback.
  568. */
  569. void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
  570. struct sg_table *sgt,
  571. enum dma_data_direction dir)
  572. {
  573. if (!sgt)
  574. return;
  575. dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
  576. sg_free_table(sgt);
  577. kfree(sgt);
  578. }
  579. EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
  580. /**
  581. * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
  582. * @dma_buf: buffer to be mapped
  583. *
  584. * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
  585. * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
  586. *
  587. * Returns the kernel virtual address or NULL on failure.
  588. */
  589. void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
  590. {
  591. struct drm_gem_object *obj = dma_buf->priv;
  592. void *vaddr;
  593. vaddr = drm_gem_vmap(obj);
  594. if (IS_ERR(vaddr))
  595. vaddr = NULL;
  596. return vaddr;
  597. }
  598. EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
  599. /**
  600. * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
  601. * @dma_buf: buffer to be unmapped
  602. * @vaddr: the virtual address of the buffer
  603. *
  604. * Releases a kernel virtual mapping. This can be used as the
  605. * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling.
  606. */
  607. void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
  608. {
  609. struct drm_gem_object *obj = dma_buf->priv;
  610. drm_gem_vunmap(obj, vaddr);
  611. }
  612. EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
  613. /**
  614. * drm_gem_prime_mmap - PRIME mmap function for GEM drivers
  615. * @obj: GEM object
  616. * @vma: Virtual address range
  617. *
  618. * This function sets up a userspace mapping for PRIME exported buffers using
  619. * the same codepath that is used for regular GEM buffer mapping on the DRM fd.
  620. * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is
  621. * called to set up the mapping.
  622. *
  623. * Drivers can use this as their &drm_driver.gem_prime_mmap callback.
  624. */
  625. int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
  626. {
  627. struct drm_file *priv;
  628. struct file *fil;
  629. int ret;
  630. /* Add the fake offset */
  631. vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
  632. if (obj->funcs && obj->funcs->mmap) {
  633. ret = obj->funcs->mmap(obj, vma);
  634. if (ret)
  635. return ret;
  636. vma->vm_private_data = obj;
  637. drm_gem_object_get(obj);
  638. return 0;
  639. }
  640. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  641. fil = kzalloc(sizeof(*fil), GFP_KERNEL);
  642. if (!priv || !fil) {
  643. ret = -ENOMEM;
  644. goto out;
  645. }
  646. /* Used by drm_gem_mmap() to lookup the GEM object */
  647. priv->minor = obj->dev->primary;
  648. fil->private_data = priv;
  649. ret = drm_vma_node_allow(&obj->vma_node, priv);
  650. if (ret)
  651. goto out;
  652. ret = obj->dev->driver->fops->mmap(fil, vma);
  653. drm_vma_node_revoke(&obj->vma_node, priv);
  654. out:
  655. kfree(priv);
  656. kfree(fil);
  657. return ret;
  658. }
  659. EXPORT_SYMBOL(drm_gem_prime_mmap);
  660. /**
  661. * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
  662. * @dma_buf: buffer to be mapped
  663. * @vma: virtual address range
  664. *
  665. * Provides memory mapping for the buffer. This can be used as the
  666. * &dma_buf_ops.mmap callback. It just forwards to &drm_driver.gem_prime_mmap,
  667. * which should be set to drm_gem_prime_mmap().
  668. *
  669. * FIXME: There's really no point to this wrapper, drivers which need anything
  670. * else but drm_gem_prime_mmap can roll their own &dma_buf_ops.mmap callback.
  671. *
  672. * Returns 0 on success or a negative error code on failure.
  673. */
  674. int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
  675. {
  676. struct drm_gem_object *obj = dma_buf->priv;
  677. struct drm_device *dev = obj->dev;
  678. if (!dev->driver->gem_prime_mmap)
  679. return -ENOSYS;
  680. return dev->driver->gem_prime_mmap(obj, vma);
  681. }
  682. EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
  683. /**
  684. * drm_gem_dmabuf_get_uuid - dma_buf get_uuid implementation for GEM
  685. * @dma_buf: buffer to query
  686. * @uuid: uuid outparam
  687. *
  688. * Queries the buffer's virtio UUID. This can be used as the
  689. * &dma_buf_ops.get_uuid callback. Calls into &drm_driver.gem_prime_get_uuid.
  690. *
  691. * Returns 0 on success or a negative error code on failure.
  692. */
  693. int drm_gem_dmabuf_get_uuid(struct dma_buf *dma_buf, uuid_t *uuid)
  694. {
  695. struct drm_gem_object *obj = dma_buf->priv;
  696. struct drm_device *dev = obj->dev;
  697. if (!dev->driver->gem_prime_get_uuid)
  698. return -ENODEV;
  699. return dev->driver->gem_prime_get_uuid(obj, uuid);
  700. }
  701. EXPORT_SYMBOL(drm_gem_dmabuf_get_uuid);
  702. static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
  703. .cache_sgt_mapping = true,
  704. .attach = drm_gem_map_attach,
  705. .detach = drm_gem_map_detach,
  706. .map_dma_buf = drm_gem_map_dma_buf,
  707. .unmap_dma_buf = drm_gem_unmap_dma_buf,
  708. .release = drm_gem_dmabuf_release,
  709. .mmap = drm_gem_dmabuf_mmap,
  710. .vmap = drm_gem_dmabuf_vmap,
  711. .vunmap = drm_gem_dmabuf_vunmap,
  712. .get_uuid = drm_gem_dmabuf_get_uuid,
  713. };
  714. /**
  715. * drm_prime_pages_to_sg - converts a page array into an sg list
  716. * @dev: DRM device
  717. * @pages: pointer to the array of page pointers to convert
  718. * @nr_pages: length of the page vector
  719. *
  720. * This helper creates an sg table object from a set of pages
  721. * the driver is responsible for mapping the pages into the
  722. * importers address space for use with dma_buf itself.
  723. *
  724. * This is useful for implementing &drm_gem_object_funcs.get_sg_table.
  725. */
  726. struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
  727. struct page **pages, unsigned int nr_pages)
  728. {
  729. struct sg_table *sg;
  730. struct scatterlist *sge;
  731. size_t max_segment = 0;
  732. sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
  733. if (!sg)
  734. return ERR_PTR(-ENOMEM);
  735. if (dev)
  736. max_segment = dma_max_mapping_size(dev->dev);
  737. if (max_segment == 0 || max_segment > SCATTERLIST_MAX_SEGMENT)
  738. max_segment = SCATTERLIST_MAX_SEGMENT;
  739. sge = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
  740. nr_pages << PAGE_SHIFT,
  741. max_segment,
  742. NULL, 0, GFP_KERNEL);
  743. if (IS_ERR(sge)) {
  744. kfree(sg);
  745. sg = ERR_CAST(sge);
  746. }
  747. return sg;
  748. }
  749. EXPORT_SYMBOL(drm_prime_pages_to_sg);
  750. /**
  751. * drm_prime_get_contiguous_size - returns the contiguous size of the buffer
  752. * @sgt: sg_table describing the buffer to check
  753. *
  754. * This helper calculates the contiguous size in the DMA address space
  755. * of the the buffer described by the provided sg_table.
  756. *
  757. * This is useful for implementing
  758. * &drm_gem_object_funcs.gem_prime_import_sg_table.
  759. */
  760. unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt)
  761. {
  762. dma_addr_t expected = sg_dma_address(sgt->sgl);
  763. struct scatterlist *sg;
  764. unsigned long size = 0;
  765. int i;
  766. for_each_sgtable_dma_sg(sgt, sg, i) {
  767. unsigned int len = sg_dma_len(sg);
  768. if (!len)
  769. break;
  770. if (sg_dma_address(sg) != expected)
  771. break;
  772. expected += len;
  773. size += len;
  774. }
  775. return size;
  776. }
  777. EXPORT_SYMBOL(drm_prime_get_contiguous_size);
  778. /**
  779. * drm_gem_prime_export - helper library implementation of the export callback
  780. * @obj: GEM object to export
  781. * @flags: flags like DRM_CLOEXEC and DRM_RDWR
  782. *
  783. * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
  784. * using the PRIME helpers. It is used as the default in
  785. * drm_gem_prime_handle_to_fd().
  786. */
  787. struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
  788. int flags)
  789. {
  790. struct drm_device *dev = obj->dev;
  791. struct dma_buf_export_info exp_info = {
  792. .exp_name = KBUILD_MODNAME, /* white lie for debug */
  793. .owner = dev->driver->fops->owner,
  794. .ops = &drm_gem_prime_dmabuf_ops,
  795. .size = obj->size,
  796. .flags = flags,
  797. .priv = obj,
  798. .resv = obj->resv,
  799. };
  800. return drm_gem_dmabuf_export(dev, &exp_info);
  801. }
  802. EXPORT_SYMBOL(drm_gem_prime_export);
  803. /**
  804. * drm_gem_prime_import_dev - core implementation of the import callback
  805. * @dev: drm_device to import into
  806. * @dma_buf: dma-buf object to import
  807. * @attach_dev: struct device to dma_buf attach
  808. *
  809. * This is the core of drm_gem_prime_import(). It's designed to be called by
  810. * drivers who want to use a different device structure than &drm_device.dev for
  811. * attaching via dma_buf. This function calls
  812. * &drm_driver.gem_prime_import_sg_table internally.
  813. *
  814. * Drivers must arrange to call drm_prime_gem_destroy() from their
  815. * &drm_gem_object_funcs.free hook when using this function.
  816. */
  817. struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
  818. struct dma_buf *dma_buf,
  819. struct device *attach_dev)
  820. {
  821. struct dma_buf_attachment *attach;
  822. struct sg_table *sgt;
  823. struct drm_gem_object *obj;
  824. int ret;
  825. if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
  826. obj = dma_buf->priv;
  827. if (obj->dev == dev) {
  828. /*
  829. * Importing dmabuf exported from out own gem increases
  830. * refcount on gem itself instead of f_count of dmabuf.
  831. */
  832. drm_gem_object_get(obj);
  833. return obj;
  834. }
  835. }
  836. if (!dev->driver->gem_prime_import_sg_table)
  837. return ERR_PTR(-EINVAL);
  838. attach = dma_buf_attach(dma_buf, attach_dev);
  839. if (IS_ERR(attach))
  840. return ERR_CAST(attach);
  841. get_dma_buf(dma_buf);
  842. sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  843. if (IS_ERR(sgt)) {
  844. ret = PTR_ERR(sgt);
  845. goto fail_detach;
  846. }
  847. obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
  848. if (IS_ERR(obj)) {
  849. ret = PTR_ERR(obj);
  850. goto fail_unmap;
  851. }
  852. obj->import_attach = attach;
  853. obj->resv = dma_buf->resv;
  854. return obj;
  855. fail_unmap:
  856. dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
  857. fail_detach:
  858. dma_buf_detach(dma_buf, attach);
  859. dma_buf_put(dma_buf);
  860. return ERR_PTR(ret);
  861. }
  862. EXPORT_SYMBOL(drm_gem_prime_import_dev);
  863. /**
  864. * drm_gem_prime_import - helper library implementation of the import callback
  865. * @dev: drm_device to import into
  866. * @dma_buf: dma-buf object to import
  867. *
  868. * This is the implementation of the gem_prime_import functions for GEM drivers
  869. * using the PRIME helpers. Drivers can use this as their
  870. * &drm_driver.gem_prime_import implementation. It is used as the default
  871. * implementation in drm_gem_prime_fd_to_handle().
  872. *
  873. * Drivers must arrange to call drm_prime_gem_destroy() from their
  874. * &drm_gem_object_funcs.free hook when using this function.
  875. */
  876. struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
  877. struct dma_buf *dma_buf)
  878. {
  879. return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
  880. }
  881. EXPORT_SYMBOL(drm_gem_prime_import);
  882. /**
  883. * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
  884. * @sgt: scatter-gather table to convert
  885. * @pages: optional array of page pointers to store the page array in
  886. * @addrs: optional array to store the dma bus address of each page
  887. * @max_entries: size of both the passed-in arrays
  888. *
  889. * Exports an sg table into an array of pages and addresses. This is currently
  890. * required by the TTM driver in order to do correct fault handling.
  891. *
  892. * Drivers can use this in their &drm_driver.gem_prime_import_sg_table
  893. * implementation.
  894. */
  895. int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
  896. dma_addr_t *addrs, int max_entries)
  897. {
  898. struct sg_dma_page_iter dma_iter;
  899. struct sg_page_iter page_iter;
  900. struct page **p = pages;
  901. dma_addr_t *a = addrs;
  902. if (pages) {
  903. for_each_sgtable_page(sgt, &page_iter, 0) {
  904. if (WARN_ON(p - pages >= max_entries))
  905. return -1;
  906. *p++ = sg_page_iter_page(&page_iter);
  907. }
  908. }
  909. if (addrs) {
  910. for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
  911. if (WARN_ON(a - addrs >= max_entries))
  912. return -1;
  913. *a++ = sg_page_iter_dma_address(&dma_iter);
  914. }
  915. }
  916. return 0;
  917. }
  918. EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
  919. /**
  920. * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
  921. * @obj: GEM object which was created from a dma-buf
  922. * @sg: the sg-table which was pinned at import time
  923. *
  924. * This is the cleanup functions which GEM drivers need to call when they use
  925. * drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs.
  926. */
  927. void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
  928. {
  929. struct dma_buf_attachment *attach;
  930. struct dma_buf *dma_buf;
  931. attach = obj->import_attach;
  932. if (sg)
  933. dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
  934. dma_buf = attach->dmabuf;
  935. dma_buf_detach(attach->dmabuf, attach);
  936. /* remove the reference */
  937. dma_buf_put(dma_buf);
  938. }
  939. EXPORT_SYMBOL(drm_prime_gem_destroy);