rockchip_drm_gem.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
  4. * Author:Mark Yao <mark.yao@rock-chips.com>
  5. */
  6. #include <linux/dma-buf.h>
  7. #include <linux/iommu.h>
  8. #include <linux/vmalloc.h>
  9. #include <drm/drm.h>
  10. #include <drm/drm_gem.h>
  11. #include <drm/drm_prime.h>
  12. #include <drm/drm_vma_manager.h>
  13. #include "rockchip_drm_drv.h"
  14. #include "rockchip_drm_gem.h"
  15. static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
  16. {
  17. struct drm_device *drm = rk_obj->base.dev;
  18. struct rockchip_drm_private *private = drm->dev_private;
  19. int prot = IOMMU_READ | IOMMU_WRITE;
  20. ssize_t ret;
  21. mutex_lock(&private->mm_lock);
  22. ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
  23. rk_obj->base.size, PAGE_SIZE,
  24. 0, 0);
  25. mutex_unlock(&private->mm_lock);
  26. if (ret < 0) {
  27. DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
  28. return ret;
  29. }
  30. rk_obj->dma_addr = rk_obj->mm.start;
  31. ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt,
  32. prot);
  33. if (ret < rk_obj->base.size) {
  34. DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
  35. ret, rk_obj->base.size);
  36. ret = -ENOMEM;
  37. goto err_remove_node;
  38. }
  39. rk_obj->size = ret;
  40. return 0;
  41. err_remove_node:
  42. mutex_lock(&private->mm_lock);
  43. drm_mm_remove_node(&rk_obj->mm);
  44. mutex_unlock(&private->mm_lock);
  45. return ret;
  46. }
  47. static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
  48. {
  49. struct drm_device *drm = rk_obj->base.dev;
  50. struct rockchip_drm_private *private = drm->dev_private;
  51. iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size);
  52. mutex_lock(&private->mm_lock);
  53. drm_mm_remove_node(&rk_obj->mm);
  54. mutex_unlock(&private->mm_lock);
  55. return 0;
  56. }
  57. static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
  58. {
  59. struct drm_device *drm = rk_obj->base.dev;
  60. int ret, i;
  61. struct scatterlist *s;
  62. rk_obj->pages = drm_gem_get_pages(&rk_obj->base);
  63. if (IS_ERR(rk_obj->pages))
  64. return PTR_ERR(rk_obj->pages);
  65. rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
  66. rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->base.dev,
  67. rk_obj->pages, rk_obj->num_pages);
  68. if (IS_ERR(rk_obj->sgt)) {
  69. ret = PTR_ERR(rk_obj->sgt);
  70. goto err_put_pages;
  71. }
  72. /*
  73. * Fake up the SG table so that dma_sync_sg_for_device() can be used
  74. * to flush the pages associated with it.
  75. *
  76. * TODO: Replace this by drm_clflush_sg() once it can be implemented
  77. * without relying on symbols that are not exported.
  78. */
  79. for_each_sgtable_sg(rk_obj->sgt, s, i)
  80. sg_dma_address(s) = sg_phys(s);
  81. dma_sync_sgtable_for_device(drm->dev, rk_obj->sgt, DMA_TO_DEVICE);
  82. return 0;
  83. err_put_pages:
  84. drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
  85. return ret;
  86. }
  87. static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj)
  88. {
  89. sg_free_table(rk_obj->sgt);
  90. kfree(rk_obj->sgt);
  91. drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true);
  92. }
  93. static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj,
  94. bool alloc_kmap)
  95. {
  96. int ret;
  97. ret = rockchip_gem_get_pages(rk_obj);
  98. if (ret < 0)
  99. return ret;
  100. ret = rockchip_gem_iommu_map(rk_obj);
  101. if (ret < 0)
  102. goto err_free;
  103. if (alloc_kmap) {
  104. rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
  105. pgprot_writecombine(PAGE_KERNEL));
  106. if (!rk_obj->kvaddr) {
  107. DRM_ERROR("failed to vmap() buffer\n");
  108. ret = -ENOMEM;
  109. goto err_unmap;
  110. }
  111. }
  112. return 0;
  113. err_unmap:
  114. rockchip_gem_iommu_unmap(rk_obj);
  115. err_free:
  116. rockchip_gem_put_pages(rk_obj);
  117. return ret;
  118. }
  119. static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj,
  120. bool alloc_kmap)
  121. {
  122. struct drm_gem_object *obj = &rk_obj->base;
  123. struct drm_device *drm = obj->dev;
  124. rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE;
  125. if (!alloc_kmap)
  126. rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
  127. rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
  128. &rk_obj->dma_addr, GFP_KERNEL,
  129. rk_obj->dma_attrs);
  130. if (!rk_obj->kvaddr) {
  131. DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
  132. return -ENOMEM;
  133. }
  134. return 0;
  135. }
  136. static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
  137. bool alloc_kmap)
  138. {
  139. struct drm_gem_object *obj = &rk_obj->base;
  140. struct drm_device *drm = obj->dev;
  141. struct rockchip_drm_private *private = drm->dev_private;
  142. if (private->domain)
  143. return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap);
  144. else
  145. return rockchip_gem_alloc_dma(rk_obj, alloc_kmap);
  146. }
  147. static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj)
  148. {
  149. vunmap(rk_obj->kvaddr);
  150. rockchip_gem_iommu_unmap(rk_obj);
  151. rockchip_gem_put_pages(rk_obj);
  152. }
  153. static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj)
  154. {
  155. struct drm_gem_object *obj = &rk_obj->base;
  156. struct drm_device *drm = obj->dev;
  157. dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
  158. rk_obj->dma_attrs);
  159. }
  160. static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
  161. {
  162. if (rk_obj->pages)
  163. rockchip_gem_free_iommu(rk_obj);
  164. else
  165. rockchip_gem_free_dma(rk_obj);
  166. }
  167. static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
  168. struct vm_area_struct *vma)
  169. {
  170. struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
  171. unsigned int count = obj->size >> PAGE_SHIFT;
  172. unsigned long user_count = vma_pages(vma);
  173. if (user_count == 0)
  174. return -ENXIO;
  175. return vm_map_pages(vma, rk_obj->pages, count);
  176. }
  177. static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
  178. struct vm_area_struct *vma)
  179. {
  180. struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
  181. struct drm_device *drm = obj->dev;
  182. return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
  183. obj->size, rk_obj->dma_attrs);
  184. }
  185. static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
  186. struct vm_area_struct *vma)
  187. {
  188. int ret;
  189. struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
  190. /*
  191. * We allocated a struct page table for rk_obj, so clear
  192. * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
  193. */
  194. vma->vm_flags &= ~VM_PFNMAP;
  195. if (rk_obj->pages)
  196. ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
  197. else
  198. ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
  199. if (ret)
  200. drm_gem_vm_close(vma);
  201. return ret;
  202. }
  203. int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
  204. struct vm_area_struct *vma)
  205. {
  206. int ret;
  207. ret = drm_gem_mmap_obj(obj, obj->size, vma);
  208. if (ret)
  209. return ret;
  210. return rockchip_drm_gem_object_mmap(obj, vma);
  211. }
  212. /* drm driver mmap file operations */
  213. int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  214. {
  215. struct drm_gem_object *obj;
  216. int ret;
  217. ret = drm_gem_mmap(filp, vma);
  218. if (ret)
  219. return ret;
  220. /*
  221. * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
  222. * whole buffer from the start.
  223. */
  224. vma->vm_pgoff = 0;
  225. obj = vma->vm_private_data;
  226. return rockchip_drm_gem_object_mmap(obj, vma);
  227. }
  228. static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
  229. {
  230. drm_gem_object_release(&rk_obj->base);
  231. kfree(rk_obj);
  232. }
  233. static struct rockchip_gem_object *
  234. rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
  235. {
  236. struct rockchip_gem_object *rk_obj;
  237. struct drm_gem_object *obj;
  238. size = round_up(size, PAGE_SIZE);
  239. rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
  240. if (!rk_obj)
  241. return ERR_PTR(-ENOMEM);
  242. obj = &rk_obj->base;
  243. drm_gem_object_init(drm, obj, size);
  244. return rk_obj;
  245. }
  246. struct rockchip_gem_object *
  247. rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
  248. bool alloc_kmap)
  249. {
  250. struct rockchip_gem_object *rk_obj;
  251. int ret;
  252. rk_obj = rockchip_gem_alloc_object(drm, size);
  253. if (IS_ERR(rk_obj))
  254. return rk_obj;
  255. ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
  256. if (ret)
  257. goto err_free_rk_obj;
  258. return rk_obj;
  259. err_free_rk_obj:
  260. rockchip_gem_release_object(rk_obj);
  261. return ERR_PTR(ret);
  262. }
  263. /*
  264. * rockchip_gem_free_object - (struct drm_driver)->gem_free_object_unlocked
  265. * callback function
  266. */
  267. void rockchip_gem_free_object(struct drm_gem_object *obj)
  268. {
  269. struct drm_device *drm = obj->dev;
  270. struct rockchip_drm_private *private = drm->dev_private;
  271. struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
  272. if (obj->import_attach) {
  273. if (private->domain) {
  274. rockchip_gem_iommu_unmap(rk_obj);
  275. } else {
  276. dma_unmap_sgtable(drm->dev, rk_obj->sgt,
  277. DMA_BIDIRECTIONAL, 0);
  278. }
  279. drm_prime_gem_destroy(obj, rk_obj->sgt);
  280. } else {
  281. rockchip_gem_free_buf(rk_obj);
  282. }
  283. rockchip_gem_release_object(rk_obj);
  284. }
  285. /*
  286. * rockchip_gem_create_with_handle - allocate an object with the given
  287. * size and create a gem handle on it
  288. *
  289. * returns a struct rockchip_gem_object* on success or ERR_PTR values
  290. * on failure.
  291. */
  292. static struct rockchip_gem_object *
  293. rockchip_gem_create_with_handle(struct drm_file *file_priv,
  294. struct drm_device *drm, unsigned int size,
  295. unsigned int *handle)
  296. {
  297. struct rockchip_gem_object *rk_obj;
  298. struct drm_gem_object *obj;
  299. int ret;
  300. rk_obj = rockchip_gem_create_object(drm, size, false);
  301. if (IS_ERR(rk_obj))
  302. return ERR_CAST(rk_obj);
  303. obj = &rk_obj->base;
  304. /*
  305. * allocate a id of idr table where the obj is registered
  306. * and handle has the id what user can see.
  307. */
  308. ret = drm_gem_handle_create(file_priv, obj, handle);
  309. if (ret)
  310. goto err_handle_create;
  311. /* drop reference from allocate - handle holds it now. */
  312. drm_gem_object_put(obj);
  313. return rk_obj;
  314. err_handle_create:
  315. rockchip_gem_free_object(obj);
  316. return ERR_PTR(ret);
  317. }
  318. /*
  319. * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
  320. * function
  321. *
  322. * This aligns the pitch and size arguments to the minimum required. wrap
  323. * this into your own function if you need bigger alignment.
  324. */
  325. int rockchip_gem_dumb_create(struct drm_file *file_priv,
  326. struct drm_device *dev,
  327. struct drm_mode_create_dumb *args)
  328. {
  329. struct rockchip_gem_object *rk_obj;
  330. int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  331. /*
  332. * align to 64 bytes since Mali requires it.
  333. */
  334. args->pitch = ALIGN(min_pitch, 64);
  335. args->size = args->pitch * args->height;
  336. rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
  337. &args->handle);
  338. return PTR_ERR_OR_ZERO(rk_obj);
  339. }
  340. /*
  341. * Allocate a sg_table for this GEM object.
  342. * Note: Both the table's contents, and the sg_table itself must be freed by
  343. * the caller.
  344. * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
  345. */
  346. struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
  347. {
  348. struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
  349. struct drm_device *drm = obj->dev;
  350. struct sg_table *sgt;
  351. int ret;
  352. if (rk_obj->pages)
  353. return drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages);
  354. sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  355. if (!sgt)
  356. return ERR_PTR(-ENOMEM);
  357. ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
  358. rk_obj->dma_addr, obj->size,
  359. rk_obj->dma_attrs);
  360. if (ret) {
  361. DRM_ERROR("failed to allocate sgt, %d\n", ret);
  362. kfree(sgt);
  363. return ERR_PTR(ret);
  364. }
  365. return sgt;
  366. }
  367. static int
  368. rockchip_gem_iommu_map_sg(struct drm_device *drm,
  369. struct dma_buf_attachment *attach,
  370. struct sg_table *sg,
  371. struct rockchip_gem_object *rk_obj)
  372. {
  373. rk_obj->sgt = sg;
  374. return rockchip_gem_iommu_map(rk_obj);
  375. }
  376. static int
  377. rockchip_gem_dma_map_sg(struct drm_device *drm,
  378. struct dma_buf_attachment *attach,
  379. struct sg_table *sg,
  380. struct rockchip_gem_object *rk_obj)
  381. {
  382. int err = dma_map_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
  383. if (err)
  384. return err;
  385. if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
  386. DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
  387. dma_unmap_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
  388. return -EINVAL;
  389. }
  390. rk_obj->dma_addr = sg_dma_address(sg->sgl);
  391. rk_obj->sgt = sg;
  392. return 0;
  393. }
  394. struct drm_gem_object *
  395. rockchip_gem_prime_import_sg_table(struct drm_device *drm,
  396. struct dma_buf_attachment *attach,
  397. struct sg_table *sg)
  398. {
  399. struct rockchip_drm_private *private = drm->dev_private;
  400. struct rockchip_gem_object *rk_obj;
  401. int ret;
  402. rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size);
  403. if (IS_ERR(rk_obj))
  404. return ERR_CAST(rk_obj);
  405. if (private->domain)
  406. ret = rockchip_gem_iommu_map_sg(drm, attach, sg, rk_obj);
  407. else
  408. ret = rockchip_gem_dma_map_sg(drm, attach, sg, rk_obj);
  409. if (ret < 0) {
  410. DRM_ERROR("failed to import sg table: %d\n", ret);
  411. goto err_free_rk_obj;
  412. }
  413. return &rk_obj->base;
  414. err_free_rk_obj:
  415. rockchip_gem_release_object(rk_obj);
  416. return ERR_PTR(ret);
  417. }
  418. void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
  419. {
  420. struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
  421. if (rk_obj->pages)
  422. return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
  423. pgprot_writecombine(PAGE_KERNEL));
  424. if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
  425. return NULL;
  426. return rk_obj->kvaddr;
  427. }
  428. void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
  429. {
  430. struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
  431. if (rk_obj->pages) {
  432. vunmap(vaddr);
  433. return;
  434. }
  435. /* Nothing to do if allocated by DMA mapping API. */
  436. }