gntdev-dmabuf.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Xen dma-buf functionality for gntdev.
  4. *
  5. * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c.
  6. *
  7. * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/errno.h>
  11. #include <linux/dma-buf.h>
  12. #include <linux/slab.h>
  13. #include <linux/types.h>
  14. #include <linux/uaccess.h>
  15. #include <xen/xen.h>
  16. #include <xen/grant_table.h>
  17. #include "gntdev-common.h"
  18. #include "gntdev-dmabuf.h"
  19. #ifndef GRANT_INVALID_REF
  20. /*
  21. * Note on usage of grant reference 0 as invalid grant reference:
  22. * grant reference 0 is valid, but never exposed to a driver,
  23. * because of the fact it is already in use/reserved by the PV console.
  24. */
  25. #define GRANT_INVALID_REF 0
  26. #endif
  27. struct gntdev_dmabuf {
  28. struct gntdev_dmabuf_priv *priv;
  29. struct dma_buf *dmabuf;
  30. struct list_head next;
  31. int fd;
  32. union {
  33. struct {
  34. /* Exported buffers are reference counted. */
  35. struct kref refcount;
  36. struct gntdev_priv *priv;
  37. struct gntdev_grant_map *map;
  38. } exp;
  39. struct {
  40. /* Granted references of the imported buffer. */
  41. grant_ref_t *refs;
  42. /* Scatter-gather table of the imported buffer. */
  43. struct sg_table *sgt;
  44. /* dma-buf attachment of the imported buffer. */
  45. struct dma_buf_attachment *attach;
  46. } imp;
  47. } u;
  48. /* Number of pages this buffer has. */
  49. int nr_pages;
  50. /* Pages of this buffer. */
  51. struct page **pages;
  52. };
  53. struct gntdev_dmabuf_wait_obj {
  54. struct list_head next;
  55. struct gntdev_dmabuf *gntdev_dmabuf;
  56. struct completion completion;
  57. };
  58. struct gntdev_dmabuf_attachment {
  59. struct sg_table *sgt;
  60. enum dma_data_direction dir;
  61. };
  62. struct gntdev_dmabuf_priv {
  63. /* List of exported DMA buffers. */
  64. struct list_head exp_list;
  65. /* List of wait objects. */
  66. struct list_head exp_wait_list;
  67. /* List of imported DMA buffers. */
  68. struct list_head imp_list;
  69. /* This is the lock which protects dma_buf_xxx lists. */
  70. struct mutex lock;
  71. /*
  72. * We reference this file while exporting dma-bufs, so
  73. * the grant device context is not destroyed while there are
  74. * external users alive.
  75. */
  76. struct file *filp;
  77. };
  78. /* DMA buffer export support. */
  79. /* Implementation of wait for exported DMA buffer to be released. */
  80. static void dmabuf_exp_release(struct kref *kref);
  81. static struct gntdev_dmabuf_wait_obj *
  82. dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv,
  83. struct gntdev_dmabuf *gntdev_dmabuf)
  84. {
  85. struct gntdev_dmabuf_wait_obj *obj;
  86. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  87. if (!obj)
  88. return ERR_PTR(-ENOMEM);
  89. init_completion(&obj->completion);
  90. obj->gntdev_dmabuf = gntdev_dmabuf;
  91. mutex_lock(&priv->lock);
  92. list_add(&obj->next, &priv->exp_wait_list);
  93. /* Put our reference and wait for gntdev_dmabuf's release to fire. */
  94. kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
  95. mutex_unlock(&priv->lock);
  96. return obj;
  97. }
  98. static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv,
  99. struct gntdev_dmabuf_wait_obj *obj)
  100. {
  101. mutex_lock(&priv->lock);
  102. list_del(&obj->next);
  103. mutex_unlock(&priv->lock);
  104. kfree(obj);
  105. }
  106. static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj,
  107. u32 wait_to_ms)
  108. {
  109. if (wait_for_completion_timeout(&obj->completion,
  110. msecs_to_jiffies(wait_to_ms)) <= 0)
  111. return -ETIMEDOUT;
  112. return 0;
  113. }
  114. static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv,
  115. struct gntdev_dmabuf *gntdev_dmabuf)
  116. {
  117. struct gntdev_dmabuf_wait_obj *obj;
  118. list_for_each_entry(obj, &priv->exp_wait_list, next)
  119. if (obj->gntdev_dmabuf == gntdev_dmabuf) {
  120. pr_debug("Found gntdev_dmabuf in the wait list, wake\n");
  121. complete_all(&obj->completion);
  122. break;
  123. }
  124. }
  125. static struct gntdev_dmabuf *
  126. dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv *priv, int fd)
  127. {
  128. struct gntdev_dmabuf *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
  129. mutex_lock(&priv->lock);
  130. list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next)
  131. if (gntdev_dmabuf->fd == fd) {
  132. pr_debug("Found gntdev_dmabuf in the wait list\n");
  133. kref_get(&gntdev_dmabuf->u.exp.refcount);
  134. ret = gntdev_dmabuf;
  135. break;
  136. }
  137. mutex_unlock(&priv->lock);
  138. return ret;
  139. }
  140. static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd,
  141. int wait_to_ms)
  142. {
  143. struct gntdev_dmabuf *gntdev_dmabuf;
  144. struct gntdev_dmabuf_wait_obj *obj;
  145. int ret;
  146. pr_debug("Will wait for dma-buf with fd %d\n", fd);
  147. /*
  148. * Try to find the DMA buffer: if not found means that
  149. * either the buffer has already been released or file descriptor
  150. * provided is wrong.
  151. */
  152. gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd);
  153. if (IS_ERR(gntdev_dmabuf))
  154. return PTR_ERR(gntdev_dmabuf);
  155. /*
  156. * gntdev_dmabuf still exists and is reference count locked by us now,
  157. * so prepare to wait: allocate wait object and add it to the wait list,
  158. * so we can find it on release.
  159. */
  160. obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf);
  161. if (IS_ERR(obj))
  162. return PTR_ERR(obj);
  163. ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms);
  164. dmabuf_exp_wait_obj_free(priv, obj);
  165. return ret;
  166. }
  167. /* DMA buffer export support. */
  168. static struct sg_table *
  169. dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages)
  170. {
  171. struct sg_table *sgt;
  172. int ret;
  173. sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
  174. if (!sgt) {
  175. ret = -ENOMEM;
  176. goto out;
  177. }
  178. ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0,
  179. nr_pages << PAGE_SHIFT,
  180. GFP_KERNEL);
  181. if (ret)
  182. goto out;
  183. return sgt;
  184. out:
  185. kfree(sgt);
  186. return ERR_PTR(ret);
  187. }
  188. static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf,
  189. struct dma_buf_attachment *attach)
  190. {
  191. struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach;
  192. gntdev_dmabuf_attach = kzalloc(sizeof(*gntdev_dmabuf_attach),
  193. GFP_KERNEL);
  194. if (!gntdev_dmabuf_attach)
  195. return -ENOMEM;
  196. gntdev_dmabuf_attach->dir = DMA_NONE;
  197. attach->priv = gntdev_dmabuf_attach;
  198. return 0;
  199. }
  200. static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
  201. struct dma_buf_attachment *attach)
  202. {
  203. struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
  204. if (gntdev_dmabuf_attach) {
  205. struct sg_table *sgt = gntdev_dmabuf_attach->sgt;
  206. if (sgt) {
  207. if (gntdev_dmabuf_attach->dir != DMA_NONE)
  208. dma_unmap_sgtable(attach->dev, sgt,
  209. gntdev_dmabuf_attach->dir,
  210. DMA_ATTR_SKIP_CPU_SYNC);
  211. sg_free_table(sgt);
  212. }
  213. kfree(sgt);
  214. kfree(gntdev_dmabuf_attach);
  215. attach->priv = NULL;
  216. }
  217. }
  218. static struct sg_table *
  219. dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
  220. enum dma_data_direction dir)
  221. {
  222. struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
  223. struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv;
  224. struct sg_table *sgt;
  225. pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages,
  226. attach->dev);
  227. if (dir == DMA_NONE || !gntdev_dmabuf_attach)
  228. return ERR_PTR(-EINVAL);
  229. /* Return the cached mapping when possible. */
  230. if (gntdev_dmabuf_attach->dir == dir)
  231. return gntdev_dmabuf_attach->sgt;
  232. /*
  233. * Two mappings with different directions for the same attachment are
  234. * not allowed.
  235. */
  236. if (gntdev_dmabuf_attach->dir != DMA_NONE)
  237. return ERR_PTR(-EBUSY);
  238. sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
  239. gntdev_dmabuf->nr_pages);
  240. if (!IS_ERR(sgt)) {
  241. if (dma_map_sgtable(attach->dev, sgt, dir,
  242. DMA_ATTR_SKIP_CPU_SYNC)) {
  243. sg_free_table(sgt);
  244. kfree(sgt);
  245. sgt = ERR_PTR(-ENOMEM);
  246. } else {
  247. gntdev_dmabuf_attach->sgt = sgt;
  248. gntdev_dmabuf_attach->dir = dir;
  249. }
  250. }
  251. if (IS_ERR(sgt))
  252. pr_debug("Failed to map sg table for dev %p\n", attach->dev);
  253. return sgt;
  254. }
  255. static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach,
  256. struct sg_table *sgt,
  257. enum dma_data_direction dir)
  258. {
  259. /* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */
  260. }
  261. static void dmabuf_exp_release(struct kref *kref)
  262. {
  263. struct gntdev_dmabuf *gntdev_dmabuf =
  264. container_of(kref, struct gntdev_dmabuf, u.exp.refcount);
  265. dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
  266. list_del(&gntdev_dmabuf->next);
  267. fput(gntdev_dmabuf->priv->filp);
  268. kfree(gntdev_dmabuf);
  269. }
  270. static void dmabuf_exp_remove_map(struct gntdev_priv *priv,
  271. struct gntdev_grant_map *map)
  272. {
  273. mutex_lock(&priv->lock);
  274. list_del(&map->next);
  275. gntdev_put_map(NULL /* already removed */, map);
  276. mutex_unlock(&priv->lock);
  277. }
  278. static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
  279. {
  280. struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv;
  281. struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv;
  282. dmabuf_exp_remove_map(gntdev_dmabuf->u.exp.priv,
  283. gntdev_dmabuf->u.exp.map);
  284. mutex_lock(&priv->lock);
  285. kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
  286. mutex_unlock(&priv->lock);
  287. }
  288. static const struct dma_buf_ops dmabuf_exp_ops = {
  289. .attach = dmabuf_exp_ops_attach,
  290. .detach = dmabuf_exp_ops_detach,
  291. .map_dma_buf = dmabuf_exp_ops_map_dma_buf,
  292. .unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
  293. .release = dmabuf_exp_ops_release,
  294. };
  295. struct gntdev_dmabuf_export_args {
  296. struct gntdev_priv *priv;
  297. struct gntdev_grant_map *map;
  298. struct gntdev_dmabuf_priv *dmabuf_priv;
  299. struct device *dev;
  300. int count;
  301. struct page **pages;
  302. u32 fd;
  303. };
  304. static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
  305. {
  306. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  307. struct gntdev_dmabuf *gntdev_dmabuf;
  308. int ret;
  309. gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
  310. if (!gntdev_dmabuf)
  311. return -ENOMEM;
  312. kref_init(&gntdev_dmabuf->u.exp.refcount);
  313. gntdev_dmabuf->priv = args->dmabuf_priv;
  314. gntdev_dmabuf->nr_pages = args->count;
  315. gntdev_dmabuf->pages = args->pages;
  316. gntdev_dmabuf->u.exp.priv = args->priv;
  317. gntdev_dmabuf->u.exp.map = args->map;
  318. exp_info.exp_name = KBUILD_MODNAME;
  319. if (args->dev->driver && args->dev->driver->owner)
  320. exp_info.owner = args->dev->driver->owner;
  321. else
  322. exp_info.owner = THIS_MODULE;
  323. exp_info.ops = &dmabuf_exp_ops;
  324. exp_info.size = args->count << PAGE_SHIFT;
  325. exp_info.flags = O_RDWR;
  326. exp_info.priv = gntdev_dmabuf;
  327. gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info);
  328. if (IS_ERR(gntdev_dmabuf->dmabuf)) {
  329. ret = PTR_ERR(gntdev_dmabuf->dmabuf);
  330. gntdev_dmabuf->dmabuf = NULL;
  331. goto fail;
  332. }
  333. ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC);
  334. if (ret < 0)
  335. goto fail;
  336. gntdev_dmabuf->fd = ret;
  337. args->fd = ret;
  338. pr_debug("Exporting DMA buffer with fd %d\n", ret);
  339. mutex_lock(&args->dmabuf_priv->lock);
  340. list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
  341. mutex_unlock(&args->dmabuf_priv->lock);
  342. get_file(gntdev_dmabuf->priv->filp);
  343. return 0;
  344. fail:
  345. if (gntdev_dmabuf->dmabuf)
  346. dma_buf_put(gntdev_dmabuf->dmabuf);
  347. kfree(gntdev_dmabuf);
  348. return ret;
  349. }
  350. static struct gntdev_grant_map *
  351. dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
  352. int count)
  353. {
  354. struct gntdev_grant_map *map;
  355. if (unlikely(gntdev_test_page_count(count)))
  356. return ERR_PTR(-EINVAL);
  357. if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
  358. (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) {
  359. pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags);
  360. return ERR_PTR(-EINVAL);
  361. }
  362. map = gntdev_alloc_map(priv, count, dmabuf_flags);
  363. if (!map)
  364. return ERR_PTR(-ENOMEM);
  365. return map;
  366. }
  367. static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
  368. int count, u32 domid, u32 *refs, u32 *fd)
  369. {
  370. struct gntdev_grant_map *map;
  371. struct gntdev_dmabuf_export_args args;
  372. int i, ret;
  373. map = dmabuf_exp_alloc_backing_storage(priv, flags, count);
  374. if (IS_ERR(map))
  375. return PTR_ERR(map);
  376. for (i = 0; i < count; i++) {
  377. map->grants[i].domid = domid;
  378. map->grants[i].ref = refs[i];
  379. }
  380. mutex_lock(&priv->lock);
  381. gntdev_add_map(priv, map);
  382. mutex_unlock(&priv->lock);
  383. map->flags |= GNTMAP_host_map;
  384. #if defined(CONFIG_X86)
  385. map->flags |= GNTMAP_device_map;
  386. #endif
  387. ret = gntdev_map_grant_pages(map);
  388. if (ret < 0)
  389. goto out;
  390. args.priv = priv;
  391. args.map = map;
  392. args.dev = priv->dma_dev;
  393. args.dmabuf_priv = priv->dmabuf_priv;
  394. args.count = map->count;
  395. args.pages = map->pages;
  396. args.fd = -1; /* Shut up unnecessary gcc warning for i386 */
  397. ret = dmabuf_exp_from_pages(&args);
  398. if (ret < 0)
  399. goto out;
  400. *fd = args.fd;
  401. return 0;
  402. out:
  403. dmabuf_exp_remove_map(priv, map);
  404. return ret;
  405. }
  406. /* DMA buffer import support. */
  407. static int
  408. dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
  409. int count, int domid)
  410. {
  411. grant_ref_t priv_gref_head;
  412. int i, ret;
  413. ret = gnttab_alloc_grant_references(count, &priv_gref_head);
  414. if (ret < 0) {
  415. pr_debug("Cannot allocate grant references, ret %d\n", ret);
  416. return ret;
  417. }
  418. for (i = 0; i < count; i++) {
  419. int cur_ref;
  420. cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
  421. if (cur_ref < 0) {
  422. ret = cur_ref;
  423. pr_debug("Cannot claim grant reference, ret %d\n", ret);
  424. goto out;
  425. }
  426. gnttab_grant_foreign_access_ref(cur_ref, domid,
  427. xen_page_to_gfn(pages[i]), 0);
  428. refs[i] = cur_ref;
  429. }
  430. return 0;
  431. out:
  432. gnttab_free_grant_references(priv_gref_head);
  433. return ret;
  434. }
  435. static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
  436. {
  437. int i;
  438. for (i = 0; i < count; i++)
  439. if (refs[i] != GRANT_INVALID_REF)
  440. gnttab_end_foreign_access(refs[i], 0, 0UL);
  441. }
  442. static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
  443. {
  444. kfree(gntdev_dmabuf->pages);
  445. kfree(gntdev_dmabuf->u.imp.refs);
  446. kfree(gntdev_dmabuf);
  447. }
  448. static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
  449. {
  450. struct gntdev_dmabuf *gntdev_dmabuf;
  451. int i;
  452. gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
  453. if (!gntdev_dmabuf)
  454. goto fail_no_free;
  455. gntdev_dmabuf->u.imp.refs = kcalloc(count,
  456. sizeof(gntdev_dmabuf->u.imp.refs[0]),
  457. GFP_KERNEL);
  458. if (!gntdev_dmabuf->u.imp.refs)
  459. goto fail;
  460. gntdev_dmabuf->pages = kcalloc(count,
  461. sizeof(gntdev_dmabuf->pages[0]),
  462. GFP_KERNEL);
  463. if (!gntdev_dmabuf->pages)
  464. goto fail;
  465. gntdev_dmabuf->nr_pages = count;
  466. for (i = 0; i < count; i++)
  467. gntdev_dmabuf->u.imp.refs[i] = GRANT_INVALID_REF;
  468. return gntdev_dmabuf;
  469. fail:
  470. dmabuf_imp_free_storage(gntdev_dmabuf);
  471. fail_no_free:
  472. return ERR_PTR(-ENOMEM);
  473. }
  474. static struct gntdev_dmabuf *
  475. dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
  476. int fd, int count, int domid)
  477. {
  478. struct gntdev_dmabuf *gntdev_dmabuf, *ret;
  479. struct dma_buf *dma_buf;
  480. struct dma_buf_attachment *attach;
  481. struct sg_table *sgt;
  482. struct sg_page_iter sg_iter;
  483. int i;
  484. dma_buf = dma_buf_get(fd);
  485. if (IS_ERR(dma_buf))
  486. return ERR_CAST(dma_buf);
  487. gntdev_dmabuf = dmabuf_imp_alloc_storage(count);
  488. if (IS_ERR(gntdev_dmabuf)) {
  489. ret = gntdev_dmabuf;
  490. goto fail_put;
  491. }
  492. gntdev_dmabuf->priv = priv;
  493. gntdev_dmabuf->fd = fd;
  494. attach = dma_buf_attach(dma_buf, dev);
  495. if (IS_ERR(attach)) {
  496. ret = ERR_CAST(attach);
  497. goto fail_free_obj;
  498. }
  499. gntdev_dmabuf->u.imp.attach = attach;
  500. sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  501. if (IS_ERR(sgt)) {
  502. ret = ERR_CAST(sgt);
  503. goto fail_detach;
  504. }
  505. /* Check that we have zero offset. */
  506. if (sgt->sgl->offset) {
  507. ret = ERR_PTR(-EINVAL);
  508. pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n",
  509. sgt->sgl->offset);
  510. goto fail_unmap;
  511. }
  512. /* Check number of pages that imported buffer has. */
  513. if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
  514. ret = ERR_PTR(-EINVAL);
  515. pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
  516. attach->dmabuf->size, gntdev_dmabuf->nr_pages);
  517. goto fail_unmap;
  518. }
  519. gntdev_dmabuf->u.imp.sgt = sgt;
  520. /* Now convert sgt to array of pages and check for page validity. */
  521. i = 0;
  522. for_each_sgtable_page(sgt, &sg_iter, 0) {
  523. struct page *page = sg_page_iter_page(&sg_iter);
  524. /*
  525. * Check if page is valid: this can happen if we are given
  526. * a page from VRAM or other resources which are not backed
  527. * by a struct page.
  528. */
  529. if (!pfn_valid(page_to_pfn(page))) {
  530. ret = ERR_PTR(-EINVAL);
  531. goto fail_unmap;
  532. }
  533. gntdev_dmabuf->pages[i++] = page;
  534. }
  535. ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
  536. gntdev_dmabuf->u.imp.refs,
  537. count, domid));
  538. if (IS_ERR(ret))
  539. goto fail_end_access;
  540. pr_debug("Imported DMA buffer with fd %d\n", fd);
  541. mutex_lock(&priv->lock);
  542. list_add(&gntdev_dmabuf->next, &priv->imp_list);
  543. mutex_unlock(&priv->lock);
  544. return gntdev_dmabuf;
  545. fail_end_access:
  546. dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count);
  547. fail_unmap:
  548. dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
  549. fail_detach:
  550. dma_buf_detach(dma_buf, attach);
  551. fail_free_obj:
  552. dmabuf_imp_free_storage(gntdev_dmabuf);
  553. fail_put:
  554. dma_buf_put(dma_buf);
  555. return ret;
  556. }
  557. /*
  558. * Find the hyper dma-buf by its file descriptor and remove
  559. * it from the buffer's list.
  560. */
  561. static struct gntdev_dmabuf *
  562. dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd)
  563. {
  564. struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
  565. mutex_lock(&priv->lock);
  566. list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) {
  567. if (gntdev_dmabuf->fd == fd) {
  568. pr_debug("Found gntdev_dmabuf in the import list\n");
  569. ret = gntdev_dmabuf;
  570. list_del(&gntdev_dmabuf->next);
  571. break;
  572. }
  573. }
  574. mutex_unlock(&priv->lock);
  575. return ret;
  576. }
  577. static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
  578. {
  579. struct gntdev_dmabuf *gntdev_dmabuf;
  580. struct dma_buf_attachment *attach;
  581. struct dma_buf *dma_buf;
  582. gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd);
  583. if (IS_ERR(gntdev_dmabuf))
  584. return PTR_ERR(gntdev_dmabuf);
  585. pr_debug("Releasing DMA buffer with fd %d\n", fd);
  586. dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs,
  587. gntdev_dmabuf->nr_pages);
  588. attach = gntdev_dmabuf->u.imp.attach;
  589. if (gntdev_dmabuf->u.imp.sgt)
  590. dma_buf_unmap_attachment(attach, gntdev_dmabuf->u.imp.sgt,
  591. DMA_BIDIRECTIONAL);
  592. dma_buf = attach->dmabuf;
  593. dma_buf_detach(attach->dmabuf, attach);
  594. dma_buf_put(dma_buf);
  595. dmabuf_imp_free_storage(gntdev_dmabuf);
  596. return 0;
  597. }
  598. static void dmabuf_imp_release_all(struct gntdev_dmabuf_priv *priv)
  599. {
  600. struct gntdev_dmabuf *q, *gntdev_dmabuf;
  601. list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next)
  602. dmabuf_imp_release(priv, gntdev_dmabuf->fd);
  603. }
  604. /* DMA buffer IOCTL support. */
  605. long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
  606. struct ioctl_gntdev_dmabuf_exp_from_refs __user *u)
  607. {
  608. struct ioctl_gntdev_dmabuf_exp_from_refs op;
  609. u32 *refs;
  610. long ret;
  611. if (use_ptemod) {
  612. pr_debug("Cannot provide dma-buf: use_ptemode %d\n",
  613. use_ptemod);
  614. return -EINVAL;
  615. }
  616. if (copy_from_user(&op, u, sizeof(op)) != 0)
  617. return -EFAULT;
  618. if (unlikely(gntdev_test_page_count(op.count)))
  619. return -EINVAL;
  620. refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
  621. if (!refs)
  622. return -ENOMEM;
  623. if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) {
  624. ret = -EFAULT;
  625. goto out;
  626. }
  627. ret = dmabuf_exp_from_refs(priv, op.flags, op.count,
  628. op.domid, refs, &op.fd);
  629. if (ret)
  630. goto out;
  631. if (copy_to_user(u, &op, sizeof(op)) != 0)
  632. ret = -EFAULT;
  633. out:
  634. kfree(refs);
  635. return ret;
  636. }
  637. long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
  638. struct ioctl_gntdev_dmabuf_exp_wait_released __user *u)
  639. {
  640. struct ioctl_gntdev_dmabuf_exp_wait_released op;
  641. if (copy_from_user(&op, u, sizeof(op)) != 0)
  642. return -EFAULT;
  643. return dmabuf_exp_wait_released(priv->dmabuf_priv, op.fd,
  644. op.wait_to_ms);
  645. }
  646. long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
  647. struct ioctl_gntdev_dmabuf_imp_to_refs __user *u)
  648. {
  649. struct ioctl_gntdev_dmabuf_imp_to_refs op;
  650. struct gntdev_dmabuf *gntdev_dmabuf;
  651. long ret;
  652. if (copy_from_user(&op, u, sizeof(op)) != 0)
  653. return -EFAULT;
  654. if (unlikely(gntdev_test_page_count(op.count)))
  655. return -EINVAL;
  656. gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,
  657. priv->dma_dev, op.fd,
  658. op.count, op.domid);
  659. if (IS_ERR(gntdev_dmabuf))
  660. return PTR_ERR(gntdev_dmabuf);
  661. if (copy_to_user(u->refs, gntdev_dmabuf->u.imp.refs,
  662. sizeof(*u->refs) * op.count) != 0) {
  663. ret = -EFAULT;
  664. goto out_release;
  665. }
  666. return 0;
  667. out_release:
  668. dmabuf_imp_release(priv->dmabuf_priv, op.fd);
  669. return ret;
  670. }
  671. long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
  672. struct ioctl_gntdev_dmabuf_imp_release __user *u)
  673. {
  674. struct ioctl_gntdev_dmabuf_imp_release op;
  675. if (copy_from_user(&op, u, sizeof(op)) != 0)
  676. return -EFAULT;
  677. return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
  678. }
  679. struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp)
  680. {
  681. struct gntdev_dmabuf_priv *priv;
  682. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  683. if (!priv)
  684. return ERR_PTR(-ENOMEM);
  685. mutex_init(&priv->lock);
  686. INIT_LIST_HEAD(&priv->exp_list);
  687. INIT_LIST_HEAD(&priv->exp_wait_list);
  688. INIT_LIST_HEAD(&priv->imp_list);
  689. priv->filp = filp;
  690. return priv;
  691. }
  692. void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
  693. {
  694. dmabuf_imp_release_all(priv);
  695. kfree(priv);
  696. }