vs_gem.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2020 VeriSilicon Holdings Co., Ltd.
  4. */
  5. #include <linux/dma-buf.h>
  6. #include "vs_drv.h"
  7. #include "vs_gem.h"
  8. static const struct drm_gem_object_funcs vs_gem_default_funcs;
  9. static void nonseq_free(struct page ** pages, unsigned int nr_page)
  10. {
  11. u32 i;
  12. if (!pages)
  13. return;
  14. for(i =0; i < nr_page; i++)
  15. __free_page(pages[i]);
  16. }
  17. static int __maybe_unused get_pages(unsigned int nr_page, struct vs_gem_object *vs_obj)
  18. {
  19. struct page *pages;
  20. u32 i, num_page, page_count = 0;
  21. int order = 0;
  22. gfp_t gfp = GFP_KERNEL;
  23. if (!vs_obj->pages)
  24. return -EINVAL;
  25. gfp &= ~__GFP_HIGHMEM;
  26. gfp |= __GFP_DMA32;
  27. num_page = nr_page;
  28. do {
  29. pages = NULL;
  30. order = get_order(num_page * PAGE_SIZE);
  31. num_page = 1 << order;
  32. if ((num_page + page_count > nr_page) || (order >= MAX_ORDER)) {
  33. num_page = num_page >> 1;
  34. continue;
  35. }
  36. pages = alloc_pages(gfp, order);
  37. if (!pages) {
  38. if (num_page == 1) {
  39. nonseq_free(vs_obj->pages, page_count);
  40. return -ENOMEM;
  41. }
  42. num_page = num_page >> 1;
  43. }
  44. else {
  45. for(i = 0; i < num_page; i++) {
  46. vs_obj->pages[page_count + i] = &pages[i];
  47. SetPageReserved(vs_obj->pages[page_count + i]);
  48. }
  49. page_count += num_page;
  50. num_page = nr_page - page_count;
  51. }
  52. } while (page_count < nr_page);
  53. vs_obj->get_pages = true;
  54. return 0;
  55. }
  56. static void put_pages(unsigned int nr_page, struct vs_gem_object *vs_obj)
  57. {
  58. u32 i;
  59. for(i = 0; i < nr_page; i++)
  60. ClearPageReserved(vs_obj->pages[i]);
  61. nonseq_free(vs_obj->pages, nr_page);
  62. return;
  63. }
  64. static int vs_gem_alloc_buf(struct vs_gem_object *vs_obj)
  65. {
  66. struct drm_device *dev = vs_obj->base.dev;
  67. unsigned int nr_pages;
  68. struct sg_table sgt;
  69. int ret = -ENOMEM;
  70. #ifdef CONFIG_VERISILICON_MMU
  71. struct vs_drm_private *priv = dev->dev_private;
  72. #endif
  73. if (vs_obj->dma_addr) {
  74. DRM_DEV_DEBUG_KMS(dev->dev, "already allocated.\n");
  75. return 0;
  76. }
  77. vs_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE
  78. | DMA_ATTR_NO_KERNEL_MAPPING;
  79. if (!is_iommu_enabled(dev))
  80. vs_obj->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
  81. nr_pages = vs_obj->size >> PAGE_SHIFT;
  82. vs_obj->pages = kvmalloc_array(nr_pages, sizeof(struct page *),
  83. GFP_KERNEL | __GFP_ZERO);
  84. if (!vs_obj->pages) {
  85. DRM_DEV_ERROR(dev->dev, "failed to allocate pages.\n");
  86. return -ENOMEM;
  87. }
  88. vs_obj->cookie = dma_alloc_attrs(to_dma_dev(dev), vs_obj->size,
  89. &vs_obj->dma_addr, GFP_KERNEL,
  90. vs_obj->dma_attrs);
  91. if (!vs_obj->cookie) {
  92. #ifdef CONFIG_VERISILICON_MMU
  93. ret = get_pages(nr_pages, vs_obj);
  94. if (ret) {
  95. DRM_DEV_ERROR(dev->dev, "fail to allocate buffer.\n");
  96. goto err_free;
  97. }
  98. #else
  99. DRM_DEV_ERROR(dev->dev, "failed to allocate buffer.\n");
  100. goto err_free;
  101. #endif
  102. }
  103. #ifdef CONFIG_VERISILICON_MMU
  104. /* MMU map*/
  105. if (!priv->mmu) {
  106. DRM_DEV_ERROR(dev->dev, "invalid mmu.\n");
  107. ret = -EINVAL;
  108. goto err_mem_free;
  109. }
  110. /* mmu for ree driver */
  111. if (!vs_obj->get_pages)
  112. ret = dc_mmu_map_memory(priv->mmu, (u64)vs_obj->dma_addr,
  113. nr_pages, &vs_obj->iova, true, false);
  114. else
  115. ret = dc_mmu_map_memory(priv->mmu, (u64)vs_obj->pages,
  116. nr_pages, &vs_obj->iova, false, false);
  117. if (ret) {
  118. DRM_DEV_ERROR(dev->dev, "failed to do mmu map.\n");
  119. goto err_mem_free;
  120. }
  121. #else
  122. vs_obj->iova = vs_obj->dma_addr;
  123. #endif
  124. if (!vs_obj->get_pages) {
  125. ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt,
  126. vs_obj->cookie, vs_obj->dma_addr,
  127. vs_obj->size, vs_obj->dma_attrs);
  128. if (ret < 0) {
  129. DRM_DEV_ERROR(dev->dev, "failed to get sgtable.\n");
  130. goto err_mem_free;
  131. }
  132. if (drm_prime_sg_to_page_addr_arrays(&sgt, vs_obj->pages,
  133. NULL, nr_pages)) {
  134. DRM_DEV_ERROR(dev->dev, "invalid sgtable.\n");
  135. ret = -EINVAL;
  136. goto err_sgt_free;
  137. }
  138. sg_free_table(&sgt);
  139. }
  140. return 0;
  141. err_sgt_free:
  142. sg_free_table(&sgt);
  143. err_mem_free:
  144. if (!vs_obj->get_pages)
  145. dma_free_attrs(to_dma_dev(dev), vs_obj->size, vs_obj->cookie,
  146. vs_obj->dma_addr, vs_obj->dma_attrs);
  147. else
  148. put_pages(nr_pages, vs_obj);
  149. err_free:
  150. kvfree(vs_obj->pages);
  151. return ret;
  152. }
  153. static void vs_gem_free_buf(struct vs_gem_object *vs_obj)
  154. {
  155. struct drm_device *dev = vs_obj->base.dev;
  156. #ifdef CONFIG_VERISILICON_MMU
  157. struct vs_drm_private *priv = dev->dev_private;
  158. unsigned int nr_pages;
  159. #endif
  160. if ((!vs_obj->get_pages) && (!vs_obj->dma_addr)) {
  161. DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr is invalid.\n");
  162. return;
  163. }
  164. #ifdef CONFIG_VERISILICON_MMU
  165. if (!priv->mmu) {
  166. DRM_DEV_ERROR(dev->dev, "invalid mmu.\n");
  167. return;
  168. }
  169. nr_pages = vs_obj->size >> PAGE_SHIFT;
  170. dc_mmu_unmap_memory(priv->mmu, vs_obj->iova, nr_pages);
  171. #endif
  172. if (!vs_obj->get_pages)
  173. dma_free_attrs(to_dma_dev(dev), vs_obj->size, vs_obj->cookie,
  174. (dma_addr_t)vs_obj->dma_addr,
  175. vs_obj->dma_attrs);
  176. else
  177. put_pages(vs_obj->size >> PAGE_SHIFT, vs_obj);
  178. kvfree(vs_obj->pages);
  179. }
  180. static void vs_gem_free_object(struct drm_gem_object *obj)
  181. {
  182. struct vs_gem_object *vs_obj = to_vs_gem_object(obj);
  183. if (obj->import_attach) {
  184. drm_prime_gem_destroy(obj, vs_obj->sgt);
  185. kvfree(vs_obj->pages);
  186. }
  187. else {
  188. vs_gem_free_buf(vs_obj);
  189. }
  190. drm_gem_object_release(obj);
  191. kfree(vs_obj);
  192. }
  193. static struct vs_gem_object *vs_gem_alloc_object(struct drm_device *dev,
  194. size_t size)
  195. {
  196. struct vs_gem_object *vs_obj;
  197. struct drm_gem_object *obj;
  198. int ret;
  199. vs_obj = kzalloc(sizeof(*vs_obj), GFP_KERNEL);
  200. if (!vs_obj)
  201. return ERR_PTR(-ENOMEM);
  202. vs_obj->size = size;
  203. obj = &vs_obj->base;
  204. ret = drm_gem_object_init(dev, obj, size);
  205. if (ret)
  206. goto err_free;
  207. vs_obj->base.funcs = &vs_gem_default_funcs;
  208. ret = drm_gem_create_mmap_offset(obj);
  209. if (ret) {
  210. drm_gem_object_release(obj);
  211. goto err_free;
  212. }
  213. return vs_obj;
  214. err_free:
  215. kfree(vs_obj);
  216. return ERR_PTR(ret);
  217. }
  218. struct vs_gem_object *vs_gem_create_object(struct drm_device *dev,
  219. size_t size)
  220. {
  221. struct vs_gem_object *vs_obj;
  222. int ret;
  223. size = PAGE_ALIGN(size);
  224. vs_obj = vs_gem_alloc_object(dev, size);
  225. if (IS_ERR(vs_obj))
  226. return vs_obj;
  227. ret = vs_gem_alloc_buf(vs_obj);
  228. if (ret) {
  229. drm_gem_object_release(&vs_obj->base);
  230. kfree(vs_obj);
  231. return ERR_PTR(ret);
  232. }
  233. return vs_obj;
  234. }
  235. static struct vs_gem_object *vs_gem_create_with_handle(struct drm_device *dev,
  236. struct drm_file *file,
  237. size_t size,
  238. unsigned int *handle)
  239. {
  240. struct vs_gem_object *vs_obj;
  241. struct drm_gem_object *obj;
  242. int ret;
  243. vs_obj = vs_gem_create_object(dev, size);
  244. if (IS_ERR(vs_obj))
  245. return vs_obj;
  246. obj = &vs_obj->base;
  247. ret = drm_gem_handle_create(file, obj, handle);
  248. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)
  249. drm_gem_object_put(obj);
  250. #else
  251. drm_gem_object_put_unlocked(obj);
  252. #endif
  253. if (ret)
  254. return ERR_PTR(ret);
  255. return vs_obj;
  256. }
  257. static int vs_gem_mmap_obj(struct drm_gem_object *obj,
  258. struct vm_area_struct *vma)
  259. {
  260. struct vs_gem_object *vs_obj = to_vs_gem_object(obj);
  261. struct drm_device *drm_dev = vs_obj->base.dev;
  262. unsigned long vm_size;
  263. int ret = 0;
  264. vm_size = vma->vm_end - vma->vm_start;
  265. if (vm_size > vs_obj->size)
  266. return -EINVAL;
  267. vma->vm_pgoff = 0;
  268. if (!vs_obj->get_pages) {
  269. vma->vm_flags &= ~VM_PFNMAP;
  270. ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, vs_obj->cookie,
  271. vs_obj->dma_addr, vs_obj->size,
  272. vs_obj->dma_attrs);
  273. }
  274. else {
  275. u32 i, nr_pages, pfn = 0U;
  276. unsigned long start;
  277. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  278. vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND |
  279. VM_DONTDUMP;
  280. start = vma->vm_start;
  281. vm_size = PAGE_ALIGN(vm_size);
  282. nr_pages = vm_size >> PAGE_SHIFT;
  283. for(i = 0; i < nr_pages; i++) {
  284. pfn = page_to_pfn(vs_obj->pages[i]);
  285. ret = remap_pfn_range(vma, start, pfn, PAGE_SIZE,
  286. vma->vm_page_prot);
  287. if (ret < 0)
  288. break;
  289. start += PAGE_SIZE;
  290. }
  291. }
  292. if (ret)
  293. drm_gem_vm_close(vma);
  294. return ret;
  295. }
  296. struct sg_table *vs_gem_prime_get_sg_table(struct drm_gem_object *obj)
  297. {
  298. struct vs_gem_object *vs_obj = to_vs_gem_object(obj);
  299. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)
  300. return drm_prime_pages_to_sg(obj->dev, vs_obj->pages,
  301. vs_obj->size >> PAGE_SHIFT);
  302. #else
  303. return drm_prime_pages_to_sg(vs_obj->pages, vs_obj->size >> PAGE_SHIFT);
  304. #endif
  305. }
  306. static void *vs_gem_prime_vmap(struct drm_gem_object *obj)
  307. {
  308. struct vs_gem_object *vs_obj = to_vs_gem_object(obj);
  309. return vs_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING ?
  310. page_address(vs_obj->cookie) : vs_obj->cookie;
  311. }
  312. static void vs_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
  313. {
  314. /* Nothing to do */
  315. }
  316. static const struct vm_operations_struct vs_vm_ops = {
  317. .open = drm_gem_vm_open,
  318. .close = drm_gem_vm_close,
  319. };
  320. static const struct drm_gem_object_funcs vs_gem_default_funcs = {
  321. .free = vs_gem_free_object,
  322. .get_sg_table = vs_gem_prime_get_sg_table,
  323. .vmap = vs_gem_prime_vmap,
  324. .vunmap = vs_gem_prime_vunmap,
  325. .vm_ops = &vs_vm_ops,
  326. };
  327. int vs_gem_dumb_create(struct drm_file *file,
  328. struct drm_device *dev,
  329. struct drm_mode_create_dumb *args)
  330. {
  331. struct vs_drm_private *priv = dev->dev_private;
  332. struct vs_gem_object *vs_obj;
  333. unsigned int pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  334. if (args->bpp % 10)
  335. args->pitch = ALIGN(pitch, priv->pitch_alignment);
  336. else
  337. /* for costum 10bit format with no bit gaps */
  338. args->pitch = pitch;
  339. args->size = PAGE_ALIGN(args->pitch * args->height);
  340. vs_obj = vs_gem_create_with_handle(dev, file, args->size,
  341. &args->handle);
  342. return PTR_ERR_OR_ZERO(vs_obj);
  343. }
  344. struct drm_gem_object *vs_gem_prime_import(struct drm_device *dev,
  345. struct dma_buf *dma_buf)
  346. {
  347. return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev));
  348. }
  349. struct drm_gem_object *
  350. vs_gem_prime_import_sg_table(struct drm_device *dev,
  351. struct dma_buf_attachment *attach,
  352. struct sg_table *sgt)
  353. {
  354. struct vs_gem_object *vs_obj;
  355. int npages;
  356. int ret;
  357. struct scatterlist *s;
  358. u32 i;
  359. dma_addr_t expected;
  360. size_t size = attach->dmabuf->size;
  361. #ifdef CONFIG_VERISILICON_MMU
  362. u32 iova;
  363. struct vs_drm_private *priv = dev->dev_private;
  364. if (!priv->mmu) {
  365. DRM_ERROR("invalid mmu.\n");
  366. ret = -EINVAL;
  367. return ERR_PTR(ret);
  368. }
  369. #endif
  370. size = PAGE_ALIGN(size);
  371. vs_obj = vs_gem_alloc_object(dev, size);
  372. if (IS_ERR(vs_obj))
  373. return ERR_CAST(vs_obj);
  374. expected = sg_dma_address(sgt->sgl);
  375. for_each_sg(sgt->sgl, s, sgt->nents, i) {
  376. if (sg_dma_address(s) != expected) {
  377. #ifndef CONFIG_VERISILICON_MMU
  378. DRM_ERROR("sg_table is not contiguous");
  379. ret = -EINVAL;
  380. goto err;
  381. #endif
  382. }
  383. if (sg_dma_len(s) & (PAGE_SIZE-1)) {
  384. ret = -EINVAL;
  385. goto err;
  386. }
  387. #ifdef CONFIG_VERISILICON_MMU
  388. iova = 0;
  389. npages = sg_dma_len(s) >> PAGE_SHIFT;
  390. ret = dc_mmu_map_memory(priv->mmu, (u64)sg_dma_address(s),
  391. npages, &iova, true, false);
  392. if (ret) {
  393. DRM_ERROR("failed to do mmu map.\n");
  394. goto err;
  395. }
  396. if (i == 0)
  397. vs_obj->iova = iova;
  398. #else
  399. if (i == 0)
  400. vs_obj->iova = sg_dma_address(s);
  401. #endif
  402. expected = sg_dma_address(s) + sg_dma_len(s);
  403. }
  404. vs_obj->dma_addr = sg_dma_address(sgt->sgl);
  405. npages = vs_obj->size >> PAGE_SHIFT;
  406. vs_obj->pages = kvmalloc_array(npages, sizeof(struct page *),
  407. GFP_KERNEL);
  408. if (!vs_obj->pages) {
  409. ret = -ENOMEM;
  410. goto err;
  411. }
  412. ret = drm_prime_sg_to_page_addr_arrays(sgt, vs_obj->pages, NULL,
  413. npages);
  414. if (ret)
  415. goto err_free_page;
  416. vs_obj->sgt = sgt;
  417. return &vs_obj->base;
  418. err_free_page:
  419. kvfree(vs_obj->pages);
  420. err:
  421. vs_gem_free_object(&vs_obj->base);
  422. return ERR_PTR(ret);
  423. }
  424. int vs_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
  425. {
  426. int ret = 0;
  427. ret = drm_gem_mmap_obj(obj, obj->size, vma);
  428. if (ret < 0)
  429. return ret;
  430. return vs_gem_mmap_obj(obj, vma);
  431. }
  432. int vs_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  433. {
  434. struct drm_gem_object *obj;
  435. int ret;
  436. ret = drm_gem_mmap(filp, vma);
  437. if (ret)
  438. return ret;
  439. obj = vma->vm_private_data;
  440. if (obj->import_attach)
  441. return dma_buf_mmap(obj->dma_buf, vma, 0);
  442. return vs_gem_mmap_obj(obj, vma);
  443. }