etnaviv_gem.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2015-2018 Etnaviv Project
  4. */
  5. #include <drm/drm_prime.h>
  6. #include <linux/dma-mapping.h>
  7. #include <linux/shmem_fs.h>
  8. #include <linux/spinlock.h>
  9. #include <linux/vmalloc.h>
  10. #include "etnaviv_drv.h"
  11. #include "etnaviv_gem.h"
  12. #include "etnaviv_gpu.h"
  13. #include "etnaviv_mmu.h"
  14. static struct lock_class_key etnaviv_shm_lock_class;
  15. static struct lock_class_key etnaviv_userptr_lock_class;
  16. static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
  17. {
  18. struct drm_device *dev = etnaviv_obj->base.dev;
  19. struct sg_table *sgt = etnaviv_obj->sgt;
  20. /*
  21. * For non-cached buffers, ensure the new pages are clean
  22. * because display controller, GPU, etc. are not coherent.
  23. */
  24. if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
  25. dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
  26. }
  27. static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
  28. {
  29. struct drm_device *dev = etnaviv_obj->base.dev;
  30. struct sg_table *sgt = etnaviv_obj->sgt;
  31. /*
  32. * For non-cached buffers, ensure the new pages are clean
  33. * because display controller, GPU, etc. are not coherent:
  34. *
  35. * WARNING: The DMA API does not support concurrent CPU
  36. * and device access to the memory area. With BIDIRECTIONAL,
  37. * we will clean the cache lines which overlap the region,
  38. * and invalidate all cache lines (partially) contained in
  39. * the region.
  40. *
  41. * If you have dirty data in the overlapping cache lines,
  42. * that will corrupt the GPU-written data. If you have
  43. * written into the remainder of the region, this can
  44. * discard those writes.
  45. */
  46. if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
  47. dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
  48. }
  49. /* called with etnaviv_obj->lock held */
  50. static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
  51. {
  52. struct drm_device *dev = etnaviv_obj->base.dev;
  53. struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
  54. if (IS_ERR(p)) {
  55. dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
  56. return PTR_ERR(p);
  57. }
  58. etnaviv_obj->pages = p;
  59. return 0;
  60. }
  61. static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
  62. {
  63. if (etnaviv_obj->sgt) {
  64. etnaviv_gem_scatterlist_unmap(etnaviv_obj);
  65. sg_free_table(etnaviv_obj->sgt);
  66. kfree(etnaviv_obj->sgt);
  67. etnaviv_obj->sgt = NULL;
  68. }
  69. if (etnaviv_obj->pages) {
  70. drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
  71. true, false);
  72. etnaviv_obj->pages = NULL;
  73. }
  74. }
  75. struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
  76. {
  77. int ret;
  78. lockdep_assert_held(&etnaviv_obj->lock);
  79. if (!etnaviv_obj->pages) {
  80. ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
  81. if (ret < 0)
  82. return ERR_PTR(ret);
  83. }
  84. if (!etnaviv_obj->sgt) {
  85. struct drm_device *dev = etnaviv_obj->base.dev;
  86. int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
  87. struct sg_table *sgt;
  88. sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev,
  89. etnaviv_obj->pages, npages);
  90. if (IS_ERR(sgt)) {
  91. dev_err(dev->dev, "failed to allocate sgt: %ld\n",
  92. PTR_ERR(sgt));
  93. return ERR_CAST(sgt);
  94. }
  95. etnaviv_obj->sgt = sgt;
  96. etnaviv_gem_scatter_map(etnaviv_obj);
  97. }
  98. return etnaviv_obj->pages;
  99. }
  100. void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
  101. {
  102. lockdep_assert_held(&etnaviv_obj->lock);
  103. /* when we start tracking the pin count, then do something here */
  104. }
  105. static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
  106. struct vm_area_struct *vma)
  107. {
  108. pgprot_t vm_page_prot;
  109. vma->vm_flags &= ~VM_PFNMAP;
  110. vma->vm_flags |= VM_MIXEDMAP;
  111. vm_page_prot = vm_get_page_prot(vma->vm_flags);
  112. if (etnaviv_obj->flags & ETNA_BO_WC) {
  113. vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
  114. } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
  115. vma->vm_page_prot = pgprot_noncached(vm_page_prot);
  116. } else {
  117. /*
  118. * Shunt off cached objs to shmem file so they have their own
  119. * address_space (so unmap_mapping_range does what we want,
  120. * in particular in the case of mmap'd dmabufs)
  121. */
  122. fput(vma->vm_file);
  123. get_file(etnaviv_obj->base.filp);
  124. vma->vm_pgoff = 0;
  125. vma->vm_file = etnaviv_obj->base.filp;
  126. vma->vm_page_prot = vm_page_prot;
  127. }
  128. return 0;
  129. }
  130. int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  131. {
  132. struct etnaviv_gem_object *obj;
  133. int ret;
  134. ret = drm_gem_mmap(filp, vma);
  135. if (ret) {
  136. DBG("mmap failed: %d", ret);
  137. return ret;
  138. }
  139. obj = to_etnaviv_bo(vma->vm_private_data);
  140. return obj->ops->mmap(obj, vma);
  141. }
  142. vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
  143. {
  144. struct vm_area_struct *vma = vmf->vma;
  145. struct drm_gem_object *obj = vma->vm_private_data;
  146. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  147. struct page **pages, *page;
  148. pgoff_t pgoff;
  149. int err;
  150. /*
  151. * Make sure we don't parallel update on a fault, nor move or remove
  152. * something from beneath our feet. Note that vmf_insert_page() is
  153. * specifically coded to take care of this, so we don't have to.
  154. */
  155. err = mutex_lock_interruptible(&etnaviv_obj->lock);
  156. if (err)
  157. return VM_FAULT_NOPAGE;
  158. /* make sure we have pages attached now */
  159. pages = etnaviv_gem_get_pages(etnaviv_obj);
  160. mutex_unlock(&etnaviv_obj->lock);
  161. if (IS_ERR(pages)) {
  162. err = PTR_ERR(pages);
  163. return vmf_error(err);
  164. }
  165. /* We don't use vmf->pgoff since that has the fake offset: */
  166. pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
  167. page = pages[pgoff];
  168. VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
  169. page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
  170. return vmf_insert_page(vma, vmf->address, page);
  171. }
  172. int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
  173. {
  174. int ret;
  175. /* Make it mmapable */
  176. ret = drm_gem_create_mmap_offset(obj);
  177. if (ret)
  178. dev_err(obj->dev->dev, "could not allocate mmap offset\n");
  179. else
  180. *offset = drm_vma_node_offset_addr(&obj->vma_node);
  181. return ret;
  182. }
  183. static struct etnaviv_vram_mapping *
  184. etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
  185. struct etnaviv_iommu_context *context)
  186. {
  187. struct etnaviv_vram_mapping *mapping;
  188. list_for_each_entry(mapping, &obj->vram_list, obj_node) {
  189. if (mapping->context == context)
  190. return mapping;
  191. }
  192. return NULL;
  193. }
  194. void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
  195. {
  196. struct etnaviv_gem_object *etnaviv_obj = mapping->object;
  197. mutex_lock(&etnaviv_obj->lock);
  198. WARN_ON(mapping->use == 0);
  199. mapping->use -= 1;
  200. mutex_unlock(&etnaviv_obj->lock);
  201. drm_gem_object_put(&etnaviv_obj->base);
  202. }
  203. struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
  204. struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
  205. u64 va)
  206. {
  207. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  208. struct etnaviv_vram_mapping *mapping;
  209. struct page **pages;
  210. int ret = 0;
  211. mutex_lock(&etnaviv_obj->lock);
  212. mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
  213. if (mapping) {
  214. /*
  215. * Holding the object lock prevents the use count changing
  216. * beneath us. If the use count is zero, the MMU might be
  217. * reaping this object, so take the lock and re-check that
  218. * the MMU owns this mapping to close this race.
  219. */
  220. if (mapping->use == 0) {
  221. mutex_lock(&mmu_context->lock);
  222. if (mapping->context == mmu_context)
  223. mapping->use += 1;
  224. else
  225. mapping = NULL;
  226. mutex_unlock(&mmu_context->lock);
  227. if (mapping)
  228. goto out;
  229. } else {
  230. mapping->use += 1;
  231. goto out;
  232. }
  233. }
  234. pages = etnaviv_gem_get_pages(etnaviv_obj);
  235. if (IS_ERR(pages)) {
  236. ret = PTR_ERR(pages);
  237. goto out;
  238. }
  239. /*
  240. * See if we have a reaped vram mapping we can re-use before
  241. * allocating a fresh mapping.
  242. */
  243. mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
  244. if (!mapping) {
  245. mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
  246. if (!mapping) {
  247. ret = -ENOMEM;
  248. goto out;
  249. }
  250. INIT_LIST_HEAD(&mapping->scan_node);
  251. mapping->object = etnaviv_obj;
  252. } else {
  253. list_del(&mapping->obj_node);
  254. }
  255. mapping->context = etnaviv_iommu_context_get(mmu_context);
  256. mapping->use = 1;
  257. ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
  258. mmu_context->global->memory_base,
  259. mapping, va);
  260. if (ret < 0) {
  261. etnaviv_iommu_context_put(mmu_context);
  262. kfree(mapping);
  263. } else {
  264. list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
  265. }
  266. out:
  267. mutex_unlock(&etnaviv_obj->lock);
  268. if (ret)
  269. return ERR_PTR(ret);
  270. /* Take a reference on the object */
  271. drm_gem_object_get(obj);
  272. return mapping;
  273. }
  274. void *etnaviv_gem_vmap(struct drm_gem_object *obj)
  275. {
  276. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  277. if (etnaviv_obj->vaddr)
  278. return etnaviv_obj->vaddr;
  279. mutex_lock(&etnaviv_obj->lock);
  280. /*
  281. * Need to check again, as we might have raced with another thread
  282. * while waiting for the mutex.
  283. */
  284. if (!etnaviv_obj->vaddr)
  285. etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
  286. mutex_unlock(&etnaviv_obj->lock);
  287. return etnaviv_obj->vaddr;
  288. }
  289. static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
  290. {
  291. struct page **pages;
  292. lockdep_assert_held(&obj->lock);
  293. pages = etnaviv_gem_get_pages(obj);
  294. if (IS_ERR(pages))
  295. return NULL;
  296. return vmap(pages, obj->base.size >> PAGE_SHIFT,
  297. VM_MAP, pgprot_writecombine(PAGE_KERNEL));
  298. }
  299. static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
  300. {
  301. if (op & ETNA_PREP_READ)
  302. return DMA_FROM_DEVICE;
  303. else if (op & ETNA_PREP_WRITE)
  304. return DMA_TO_DEVICE;
  305. else
  306. return DMA_BIDIRECTIONAL;
  307. }
  308. int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
  309. struct drm_etnaviv_timespec *timeout)
  310. {
  311. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  312. struct drm_device *dev = obj->dev;
  313. bool write = !!(op & ETNA_PREP_WRITE);
  314. int ret;
  315. if (!etnaviv_obj->sgt) {
  316. void *ret;
  317. mutex_lock(&etnaviv_obj->lock);
  318. ret = etnaviv_gem_get_pages(etnaviv_obj);
  319. mutex_unlock(&etnaviv_obj->lock);
  320. if (IS_ERR(ret))
  321. return PTR_ERR(ret);
  322. }
  323. if (op & ETNA_PREP_NOSYNC) {
  324. if (!dma_resv_test_signaled_rcu(obj->resv,
  325. write))
  326. return -EBUSY;
  327. } else {
  328. unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
  329. ret = dma_resv_wait_timeout_rcu(obj->resv,
  330. write, true, remain);
  331. if (ret <= 0)
  332. return ret == 0 ? -ETIMEDOUT : ret;
  333. }
  334. if (etnaviv_obj->flags & ETNA_BO_CACHED) {
  335. dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
  336. etnaviv_op_to_dma_dir(op));
  337. etnaviv_obj->last_cpu_prep_op = op;
  338. }
  339. return 0;
  340. }
  341. int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
  342. {
  343. struct drm_device *dev = obj->dev;
  344. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  345. if (etnaviv_obj->flags & ETNA_BO_CACHED) {
  346. /* fini without a prep is almost certainly a userspace error */
  347. WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
  348. dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
  349. etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
  350. etnaviv_obj->last_cpu_prep_op = 0;
  351. }
  352. return 0;
  353. }
  354. int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
  355. struct drm_etnaviv_timespec *timeout)
  356. {
  357. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  358. return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
  359. }
  360. #ifdef CONFIG_DEBUG_FS
  361. static void etnaviv_gem_describe_fence(struct dma_fence *fence,
  362. const char *type, struct seq_file *m)
  363. {
  364. if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
  365. seq_printf(m, "\t%9s: %s %s seq %llu\n",
  366. type,
  367. fence->ops->get_driver_name(fence),
  368. fence->ops->get_timeline_name(fence),
  369. fence->seqno);
  370. }
  371. static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
  372. {
  373. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  374. struct dma_resv *robj = obj->resv;
  375. struct dma_resv_list *fobj;
  376. struct dma_fence *fence;
  377. unsigned long off = drm_vma_node_start(&obj->vma_node);
  378. seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
  379. etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
  380. obj->name, kref_read(&obj->refcount),
  381. off, etnaviv_obj->vaddr, obj->size);
  382. rcu_read_lock();
  383. fobj = rcu_dereference(robj->fence);
  384. if (fobj) {
  385. unsigned int i, shared_count = fobj->shared_count;
  386. for (i = 0; i < shared_count; i++) {
  387. fence = rcu_dereference(fobj->shared[i]);
  388. etnaviv_gem_describe_fence(fence, "Shared", m);
  389. }
  390. }
  391. fence = rcu_dereference(robj->fence_excl);
  392. if (fence)
  393. etnaviv_gem_describe_fence(fence, "Exclusive", m);
  394. rcu_read_unlock();
  395. }
  396. void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
  397. struct seq_file *m)
  398. {
  399. struct etnaviv_gem_object *etnaviv_obj;
  400. int count = 0;
  401. size_t size = 0;
  402. mutex_lock(&priv->gem_lock);
  403. list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
  404. struct drm_gem_object *obj = &etnaviv_obj->base;
  405. seq_puts(m, " ");
  406. etnaviv_gem_describe(obj, m);
  407. count++;
  408. size += obj->size;
  409. }
  410. mutex_unlock(&priv->gem_lock);
  411. seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
  412. }
  413. #endif
  414. static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
  415. {
  416. vunmap(etnaviv_obj->vaddr);
  417. put_pages(etnaviv_obj);
  418. }
  419. static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
  420. .get_pages = etnaviv_gem_shmem_get_pages,
  421. .release = etnaviv_gem_shmem_release,
  422. .vmap = etnaviv_gem_vmap_impl,
  423. .mmap = etnaviv_gem_mmap_obj,
  424. };
  425. void etnaviv_gem_free_object(struct drm_gem_object *obj)
  426. {
  427. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  428. struct etnaviv_drm_private *priv = obj->dev->dev_private;
  429. struct etnaviv_vram_mapping *mapping, *tmp;
  430. /* object should not be active */
  431. WARN_ON(is_active(etnaviv_obj));
  432. mutex_lock(&priv->gem_lock);
  433. list_del(&etnaviv_obj->gem_node);
  434. mutex_unlock(&priv->gem_lock);
  435. list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
  436. obj_node) {
  437. struct etnaviv_iommu_context *context = mapping->context;
  438. WARN_ON(mapping->use);
  439. if (context) {
  440. etnaviv_iommu_unmap_gem(context, mapping);
  441. etnaviv_iommu_context_put(context);
  442. }
  443. list_del(&mapping->obj_node);
  444. kfree(mapping);
  445. }
  446. drm_gem_free_mmap_offset(obj);
  447. etnaviv_obj->ops->release(etnaviv_obj);
  448. drm_gem_object_release(obj);
  449. kfree(etnaviv_obj);
  450. }
  451. void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
  452. {
  453. struct etnaviv_drm_private *priv = dev->dev_private;
  454. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  455. mutex_lock(&priv->gem_lock);
  456. list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
  457. mutex_unlock(&priv->gem_lock);
  458. }
  459. static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
  460. const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
  461. {
  462. struct etnaviv_gem_object *etnaviv_obj;
  463. unsigned sz = sizeof(*etnaviv_obj);
  464. bool valid = true;
  465. /* validate flags */
  466. switch (flags & ETNA_BO_CACHE_MASK) {
  467. case ETNA_BO_UNCACHED:
  468. case ETNA_BO_CACHED:
  469. case ETNA_BO_WC:
  470. break;
  471. default:
  472. valid = false;
  473. }
  474. if (!valid) {
  475. dev_err(dev->dev, "invalid cache flag: %x\n",
  476. (flags & ETNA_BO_CACHE_MASK));
  477. return -EINVAL;
  478. }
  479. etnaviv_obj = kzalloc(sz, GFP_KERNEL);
  480. if (!etnaviv_obj)
  481. return -ENOMEM;
  482. etnaviv_obj->flags = flags;
  483. etnaviv_obj->ops = ops;
  484. mutex_init(&etnaviv_obj->lock);
  485. INIT_LIST_HEAD(&etnaviv_obj->vram_list);
  486. *obj = &etnaviv_obj->base;
  487. return 0;
  488. }
  489. /* convenience method to construct a GEM buffer object, and userspace handle */
  490. int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
  491. u32 size, u32 flags, u32 *handle)
  492. {
  493. struct etnaviv_drm_private *priv = dev->dev_private;
  494. struct drm_gem_object *obj = NULL;
  495. int ret;
  496. size = PAGE_ALIGN(size);
  497. ret = etnaviv_gem_new_impl(dev, size, flags,
  498. &etnaviv_gem_shmem_ops, &obj);
  499. if (ret)
  500. goto fail;
  501. lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
  502. ret = drm_gem_object_init(dev, obj, size);
  503. if (ret)
  504. goto fail;
  505. /*
  506. * Our buffers are kept pinned, so allocating them from the MOVABLE
  507. * zone is a really bad idea, and conflicts with CMA. See comments
  508. * above new_inode() why this is required _and_ expected if you're
  509. * going to pin these pages.
  510. */
  511. mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask);
  512. etnaviv_gem_obj_add(dev, obj);
  513. ret = drm_gem_handle_create(file, obj, handle);
  514. /* drop reference from allocate - handle holds it now */
  515. fail:
  516. drm_gem_object_put(obj);
  517. return ret;
  518. }
  519. int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
  520. const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
  521. {
  522. struct drm_gem_object *obj;
  523. int ret;
  524. ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj);
  525. if (ret)
  526. return ret;
  527. drm_gem_private_object_init(dev, obj, size);
  528. *res = to_etnaviv_bo(obj);
  529. return 0;
  530. }
  531. static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
  532. {
  533. struct page **pvec = NULL;
  534. struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
  535. int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
  536. might_lock_read(&current->mm->mmap_lock);
  537. if (userptr->mm != current->mm)
  538. return -EPERM;
  539. pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
  540. if (!pvec)
  541. return -ENOMEM;
  542. do {
  543. unsigned num_pages = npages - pinned;
  544. uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
  545. struct page **pages = pvec + pinned;
  546. ret = pin_user_pages_fast(ptr, num_pages,
  547. FOLL_WRITE | FOLL_FORCE, pages);
  548. if (ret < 0) {
  549. unpin_user_pages(pvec, pinned);
  550. kvfree(pvec);
  551. return ret;
  552. }
  553. pinned += ret;
  554. } while (pinned < npages);
  555. etnaviv_obj->pages = pvec;
  556. return 0;
  557. }
  558. static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
  559. {
  560. if (etnaviv_obj->sgt) {
  561. etnaviv_gem_scatterlist_unmap(etnaviv_obj);
  562. sg_free_table(etnaviv_obj->sgt);
  563. kfree(etnaviv_obj->sgt);
  564. }
  565. if (etnaviv_obj->pages) {
  566. int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
  567. unpin_user_pages(etnaviv_obj->pages, npages);
  568. kvfree(etnaviv_obj->pages);
  569. }
  570. }
  571. static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
  572. struct vm_area_struct *vma)
  573. {
  574. return -EINVAL;
  575. }
  576. static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
  577. .get_pages = etnaviv_gem_userptr_get_pages,
  578. .release = etnaviv_gem_userptr_release,
  579. .vmap = etnaviv_gem_vmap_impl,
  580. .mmap = etnaviv_gem_userptr_mmap_obj,
  581. };
  582. int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
  583. uintptr_t ptr, u32 size, u32 flags, u32 *handle)
  584. {
  585. struct etnaviv_gem_object *etnaviv_obj;
  586. int ret;
  587. ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
  588. &etnaviv_gem_userptr_ops, &etnaviv_obj);
  589. if (ret)
  590. return ret;
  591. lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
  592. etnaviv_obj->userptr.ptr = ptr;
  593. etnaviv_obj->userptr.mm = current->mm;
  594. etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
  595. etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
  596. ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
  597. /* drop reference from allocate - handle holds it now */
  598. drm_gem_object_put(&etnaviv_obj->base);
  599. return ret;
  600. }