vmwgfx_ttm_buffer.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842
  1. // SPDX-License-Identifier: GPL-2.0 OR MIT
  2. /**************************************************************************
  3. *
  4. * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_drv.h"
  28. #include <drm/ttm/ttm_bo_driver.h>
  29. #include <drm/ttm/ttm_placement.h>
  30. #include <drm/ttm/ttm_page_alloc.h>
  31. static const struct ttm_place vram_placement_flags = {
  32. .fpfn = 0,
  33. .lpfn = 0,
  34. .mem_type = TTM_PL_VRAM,
  35. .flags = TTM_PL_FLAG_CACHED
  36. };
  37. static const struct ttm_place vram_ne_placement_flags = {
  38. .fpfn = 0,
  39. .lpfn = 0,
  40. .mem_type = TTM_PL_VRAM,
  41. .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
  42. };
  43. static const struct ttm_place sys_placement_flags = {
  44. .fpfn = 0,
  45. .lpfn = 0,
  46. .mem_type = TTM_PL_SYSTEM,
  47. .flags = TTM_PL_FLAG_CACHED
  48. };
  49. static const struct ttm_place sys_ne_placement_flags = {
  50. .fpfn = 0,
  51. .lpfn = 0,
  52. .mem_type = TTM_PL_SYSTEM,
  53. .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
  54. };
  55. static const struct ttm_place gmr_placement_flags = {
  56. .fpfn = 0,
  57. .lpfn = 0,
  58. .mem_type = VMW_PL_GMR,
  59. .flags = TTM_PL_FLAG_CACHED
  60. };
  61. static const struct ttm_place gmr_ne_placement_flags = {
  62. .fpfn = 0,
  63. .lpfn = 0,
  64. .mem_type = VMW_PL_GMR,
  65. .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
  66. };
  67. static const struct ttm_place mob_placement_flags = {
  68. .fpfn = 0,
  69. .lpfn = 0,
  70. .mem_type = VMW_PL_MOB,
  71. .flags = TTM_PL_FLAG_CACHED
  72. };
  73. static const struct ttm_place mob_ne_placement_flags = {
  74. .fpfn = 0,
  75. .lpfn = 0,
  76. .mem_type = VMW_PL_MOB,
  77. .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
  78. };
  79. struct ttm_placement vmw_vram_placement = {
  80. .num_placement = 1,
  81. .placement = &vram_placement_flags,
  82. .num_busy_placement = 1,
  83. .busy_placement = &vram_placement_flags
  84. };
  85. static const struct ttm_place vram_gmr_placement_flags[] = {
  86. {
  87. .fpfn = 0,
  88. .lpfn = 0,
  89. .mem_type = TTM_PL_VRAM,
  90. .flags = TTM_PL_FLAG_CACHED
  91. }, {
  92. .fpfn = 0,
  93. .lpfn = 0,
  94. .mem_type = VMW_PL_GMR,
  95. .flags = TTM_PL_FLAG_CACHED
  96. }
  97. };
  98. static const struct ttm_place gmr_vram_placement_flags[] = {
  99. {
  100. .fpfn = 0,
  101. .lpfn = 0,
  102. .mem_type = VMW_PL_GMR,
  103. .flags = TTM_PL_FLAG_CACHED
  104. }, {
  105. .fpfn = 0,
  106. .lpfn = 0,
  107. .mem_type = TTM_PL_VRAM,
  108. .flags = TTM_PL_FLAG_CACHED
  109. }
  110. };
  111. struct ttm_placement vmw_vram_gmr_placement = {
  112. .num_placement = 2,
  113. .placement = vram_gmr_placement_flags,
  114. .num_busy_placement = 1,
  115. .busy_placement = &gmr_placement_flags
  116. };
  117. static const struct ttm_place vram_gmr_ne_placement_flags[] = {
  118. {
  119. .fpfn = 0,
  120. .lpfn = 0,
  121. .mem_type = TTM_PL_VRAM,
  122. .flags = TTM_PL_FLAG_CACHED |
  123. TTM_PL_FLAG_NO_EVICT
  124. }, {
  125. .fpfn = 0,
  126. .lpfn = 0,
  127. .mem_type = VMW_PL_GMR,
  128. .flags = TTM_PL_FLAG_CACHED |
  129. TTM_PL_FLAG_NO_EVICT
  130. }
  131. };
  132. struct ttm_placement vmw_vram_gmr_ne_placement = {
  133. .num_placement = 2,
  134. .placement = vram_gmr_ne_placement_flags,
  135. .num_busy_placement = 1,
  136. .busy_placement = &gmr_ne_placement_flags
  137. };
  138. struct ttm_placement vmw_vram_sys_placement = {
  139. .num_placement = 1,
  140. .placement = &vram_placement_flags,
  141. .num_busy_placement = 1,
  142. .busy_placement = &sys_placement_flags
  143. };
  144. struct ttm_placement vmw_vram_ne_placement = {
  145. .num_placement = 1,
  146. .placement = &vram_ne_placement_flags,
  147. .num_busy_placement = 1,
  148. .busy_placement = &vram_ne_placement_flags
  149. };
  150. struct ttm_placement vmw_sys_placement = {
  151. .num_placement = 1,
  152. .placement = &sys_placement_flags,
  153. .num_busy_placement = 1,
  154. .busy_placement = &sys_placement_flags
  155. };
  156. struct ttm_placement vmw_sys_ne_placement = {
  157. .num_placement = 1,
  158. .placement = &sys_ne_placement_flags,
  159. .num_busy_placement = 1,
  160. .busy_placement = &sys_ne_placement_flags
  161. };
  162. static const struct ttm_place evictable_placement_flags[] = {
  163. {
  164. .fpfn = 0,
  165. .lpfn = 0,
  166. .mem_type = TTM_PL_SYSTEM,
  167. .flags = TTM_PL_FLAG_CACHED
  168. }, {
  169. .fpfn = 0,
  170. .lpfn = 0,
  171. .mem_type = TTM_PL_VRAM,
  172. .flags = TTM_PL_FLAG_CACHED
  173. }, {
  174. .fpfn = 0,
  175. .lpfn = 0,
  176. .mem_type = VMW_PL_GMR,
  177. .flags = TTM_PL_FLAG_CACHED
  178. }, {
  179. .fpfn = 0,
  180. .lpfn = 0,
  181. .mem_type = VMW_PL_MOB,
  182. .flags = TTM_PL_FLAG_CACHED
  183. }
  184. };
  185. static const struct ttm_place nonfixed_placement_flags[] = {
  186. {
  187. .fpfn = 0,
  188. .lpfn = 0,
  189. .mem_type = TTM_PL_SYSTEM,
  190. .flags = TTM_PL_FLAG_CACHED
  191. }, {
  192. .fpfn = 0,
  193. .lpfn = 0,
  194. .mem_type = VMW_PL_GMR,
  195. .flags = TTM_PL_FLAG_CACHED
  196. }, {
  197. .fpfn = 0,
  198. .lpfn = 0,
  199. .mem_type = VMW_PL_MOB,
  200. .flags = TTM_PL_FLAG_CACHED
  201. }
  202. };
  203. struct ttm_placement vmw_evictable_placement = {
  204. .num_placement = 4,
  205. .placement = evictable_placement_flags,
  206. .num_busy_placement = 1,
  207. .busy_placement = &sys_placement_flags
  208. };
  209. struct ttm_placement vmw_srf_placement = {
  210. .num_placement = 1,
  211. .num_busy_placement = 2,
  212. .placement = &gmr_placement_flags,
  213. .busy_placement = gmr_vram_placement_flags
  214. };
  215. struct ttm_placement vmw_mob_placement = {
  216. .num_placement = 1,
  217. .num_busy_placement = 1,
  218. .placement = &mob_placement_flags,
  219. .busy_placement = &mob_placement_flags
  220. };
  221. struct ttm_placement vmw_mob_ne_placement = {
  222. .num_placement = 1,
  223. .num_busy_placement = 1,
  224. .placement = &mob_ne_placement_flags,
  225. .busy_placement = &mob_ne_placement_flags
  226. };
  227. struct ttm_placement vmw_nonfixed_placement = {
  228. .num_placement = 3,
  229. .placement = nonfixed_placement_flags,
  230. .num_busy_placement = 1,
  231. .busy_placement = &sys_placement_flags
  232. };
  233. struct vmw_ttm_tt {
  234. struct ttm_dma_tt dma_ttm;
  235. struct vmw_private *dev_priv;
  236. int gmr_id;
  237. struct vmw_mob *mob;
  238. int mem_type;
  239. struct sg_table sgt;
  240. struct vmw_sg_table vsgt;
  241. uint64_t sg_alloc_size;
  242. bool mapped;
  243. bool bound;
  244. };
  245. const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
  246. /**
  247. * Helper functions to advance a struct vmw_piter iterator.
  248. *
  249. * @viter: Pointer to the iterator.
  250. *
  251. * These functions return false if past the end of the list,
  252. * true otherwise. Functions are selected depending on the current
  253. * DMA mapping mode.
  254. */
  255. static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
  256. {
  257. return ++(viter->i) < viter->num_pages;
  258. }
  259. static bool __vmw_piter_sg_next(struct vmw_piter *viter)
  260. {
  261. bool ret = __vmw_piter_non_sg_next(viter);
  262. return __sg_page_iter_dma_next(&viter->iter) && ret;
  263. }
  264. /**
  265. * Helper functions to return a pointer to the current page.
  266. *
  267. * @viter: Pointer to the iterator
  268. *
  269. * These functions return a pointer to the page currently
  270. * pointed to by @viter. Functions are selected depending on the
  271. * current mapping mode.
  272. */
  273. static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
  274. {
  275. return viter->pages[viter->i];
  276. }
  277. /**
  278. * Helper functions to return the DMA address of the current page.
  279. *
  280. * @viter: Pointer to the iterator
  281. *
  282. * These functions return the DMA address of the page currently
  283. * pointed to by @viter. Functions are selected depending on the
  284. * current mapping mode.
  285. */
  286. static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
  287. {
  288. return page_to_phys(viter->pages[viter->i]);
  289. }
  290. static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
  291. {
  292. return viter->addrs[viter->i];
  293. }
  294. static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
  295. {
  296. return sg_page_iter_dma_address(&viter->iter);
  297. }
  298. /**
  299. * vmw_piter_start - Initialize a struct vmw_piter.
  300. *
  301. * @viter: Pointer to the iterator to initialize
  302. * @vsgt: Pointer to a struct vmw_sg_table to initialize from
  303. *
  304. * Note that we're following the convention of __sg_page_iter_start, so that
  305. * the iterator doesn't point to a valid page after initialization; it has
  306. * to be advanced one step first.
  307. */
  308. void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
  309. unsigned long p_offset)
  310. {
  311. viter->i = p_offset - 1;
  312. viter->num_pages = vsgt->num_pages;
  313. viter->page = &__vmw_piter_non_sg_page;
  314. viter->pages = vsgt->pages;
  315. switch (vsgt->mode) {
  316. case vmw_dma_phys:
  317. viter->next = &__vmw_piter_non_sg_next;
  318. viter->dma_address = &__vmw_piter_phys_addr;
  319. break;
  320. case vmw_dma_alloc_coherent:
  321. viter->next = &__vmw_piter_non_sg_next;
  322. viter->dma_address = &__vmw_piter_dma_addr;
  323. viter->addrs = vsgt->addrs;
  324. break;
  325. case vmw_dma_map_populate:
  326. case vmw_dma_map_bind:
  327. viter->next = &__vmw_piter_sg_next;
  328. viter->dma_address = &__vmw_piter_sg_addr;
  329. __sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl,
  330. vsgt->sgt->orig_nents, p_offset);
  331. break;
  332. default:
  333. BUG();
  334. }
  335. }
  336. /**
  337. * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for
  338. * TTM pages
  339. *
  340. * @vmw_tt: Pointer to a struct vmw_ttm_backend
  341. *
  342. * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
  343. */
  344. static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
  345. {
  346. struct device *dev = vmw_tt->dev_priv->dev->dev;
  347. dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
  348. vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
  349. }
  350. /**
  351. * vmw_ttm_map_for_dma - map TTM pages to get device addresses
  352. *
  353. * @vmw_tt: Pointer to a struct vmw_ttm_backend
  354. *
  355. * This function is used to get device addresses from the kernel DMA layer.
  356. * However, it's violating the DMA API in that when this operation has been
  357. * performed, it's illegal for the CPU to write to the pages without first
  358. * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
  359. * therefore only legal to call this function if we know that the function
  360. * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
  361. * a CPU write buffer flush.
  362. */
  363. static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
  364. {
  365. struct device *dev = vmw_tt->dev_priv->dev->dev;
  366. return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
  367. }
  368. /**
  369. * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
  370. *
  371. * @vmw_tt: Pointer to a struct vmw_ttm_tt
  372. *
  373. * Select the correct function for and make sure the TTM pages are
  374. * visible to the device. Allocate storage for the device mappings.
  375. * If a mapping has already been performed, indicated by the storage
  376. * pointer being non NULL, the function returns success.
  377. */
  378. static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
  379. {
  380. struct vmw_private *dev_priv = vmw_tt->dev_priv;
  381. struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
  382. struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
  383. struct ttm_operation_ctx ctx = {
  384. .interruptible = true,
  385. .no_wait_gpu = false
  386. };
  387. struct vmw_piter iter;
  388. dma_addr_t old;
  389. int ret = 0;
  390. static size_t sgl_size;
  391. static size_t sgt_size;
  392. struct scatterlist *sg;
  393. if (vmw_tt->mapped)
  394. return 0;
  395. vsgt->mode = dev_priv->map_mode;
  396. vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
  397. vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
  398. vsgt->addrs = vmw_tt->dma_ttm.dma_address;
  399. vsgt->sgt = &vmw_tt->sgt;
  400. switch (dev_priv->map_mode) {
  401. case vmw_dma_map_bind:
  402. case vmw_dma_map_populate:
  403. if (unlikely(!sgl_size)) {
  404. sgl_size = ttm_round_pot(sizeof(struct scatterlist));
  405. sgt_size = ttm_round_pot(sizeof(struct sg_table));
  406. }
  407. vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
  408. ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
  409. if (unlikely(ret != 0))
  410. return ret;
  411. sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
  412. vsgt->num_pages, 0,
  413. (unsigned long) vsgt->num_pages << PAGE_SHIFT,
  414. dma_get_max_seg_size(dev_priv->dev->dev),
  415. NULL, 0, GFP_KERNEL);
  416. if (IS_ERR(sg)) {
  417. ret = PTR_ERR(sg);
  418. goto out_sg_alloc_fail;
  419. }
  420. if (vsgt->num_pages > vmw_tt->sgt.orig_nents) {
  421. uint64_t over_alloc =
  422. sgl_size * (vsgt->num_pages -
  423. vmw_tt->sgt.orig_nents);
  424. ttm_mem_global_free(glob, over_alloc);
  425. vmw_tt->sg_alloc_size -= over_alloc;
  426. }
  427. ret = vmw_ttm_map_for_dma(vmw_tt);
  428. if (unlikely(ret != 0))
  429. goto out_map_fail;
  430. break;
  431. default:
  432. break;
  433. }
  434. old = ~((dma_addr_t) 0);
  435. vmw_tt->vsgt.num_regions = 0;
  436. for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
  437. dma_addr_t cur = vmw_piter_dma_addr(&iter);
  438. if (cur != old + PAGE_SIZE)
  439. vmw_tt->vsgt.num_regions++;
  440. old = cur;
  441. }
  442. vmw_tt->mapped = true;
  443. return 0;
  444. out_map_fail:
  445. sg_free_table(vmw_tt->vsgt.sgt);
  446. vmw_tt->vsgt.sgt = NULL;
  447. out_sg_alloc_fail:
  448. ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
  449. return ret;
  450. }
  451. /**
  452. * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
  453. *
  454. * @vmw_tt: Pointer to a struct vmw_ttm_tt
  455. *
  456. * Tear down any previously set up device DMA mappings and free
  457. * any storage space allocated for them. If there are no mappings set up,
  458. * this function is a NOP.
  459. */
  460. static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
  461. {
  462. struct vmw_private *dev_priv = vmw_tt->dev_priv;
  463. if (!vmw_tt->vsgt.sgt)
  464. return;
  465. switch (dev_priv->map_mode) {
  466. case vmw_dma_map_bind:
  467. case vmw_dma_map_populate:
  468. vmw_ttm_unmap_from_dma(vmw_tt);
  469. sg_free_table(vmw_tt->vsgt.sgt);
  470. vmw_tt->vsgt.sgt = NULL;
  471. ttm_mem_global_free(vmw_mem_glob(dev_priv),
  472. vmw_tt->sg_alloc_size);
  473. break;
  474. default:
  475. break;
  476. }
  477. vmw_tt->mapped = false;
  478. }
  479. /**
  480. * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
  481. * TTM buffer object
  482. *
  483. * @bo: Pointer to a struct ttm_buffer_object
  484. *
  485. * Returns a pointer to a struct vmw_sg_table object. The object should
  486. * not be freed after use.
  487. * Note that for the device addresses to be valid, the buffer object must
  488. * either be reserved or pinned.
  489. */
  490. const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
  491. {
  492. struct vmw_ttm_tt *vmw_tt =
  493. container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
  494. return &vmw_tt->vsgt;
  495. }
  496. static int vmw_ttm_bind(struct ttm_bo_device *bdev,
  497. struct ttm_tt *ttm, struct ttm_resource *bo_mem)
  498. {
  499. struct vmw_ttm_tt *vmw_be =
  500. container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
  501. int ret = 0;
  502. if (!bo_mem)
  503. return -EINVAL;
  504. if (vmw_be->bound)
  505. return 0;
  506. ret = vmw_ttm_map_dma(vmw_be);
  507. if (unlikely(ret != 0))
  508. return ret;
  509. vmw_be->gmr_id = bo_mem->start;
  510. vmw_be->mem_type = bo_mem->mem_type;
  511. switch (bo_mem->mem_type) {
  512. case VMW_PL_GMR:
  513. ret = vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
  514. ttm->num_pages, vmw_be->gmr_id);
  515. break;
  516. case VMW_PL_MOB:
  517. if (unlikely(vmw_be->mob == NULL)) {
  518. vmw_be->mob =
  519. vmw_mob_create(ttm->num_pages);
  520. if (unlikely(vmw_be->mob == NULL))
  521. return -ENOMEM;
  522. }
  523. ret = vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
  524. &vmw_be->vsgt, ttm->num_pages,
  525. vmw_be->gmr_id);
  526. break;
  527. default:
  528. BUG();
  529. }
  530. vmw_be->bound = true;
  531. return ret;
  532. }
  533. static void vmw_ttm_unbind(struct ttm_bo_device *bdev,
  534. struct ttm_tt *ttm)
  535. {
  536. struct vmw_ttm_tt *vmw_be =
  537. container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
  538. if (!vmw_be->bound)
  539. return;
  540. switch (vmw_be->mem_type) {
  541. case VMW_PL_GMR:
  542. vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
  543. break;
  544. case VMW_PL_MOB:
  545. vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
  546. break;
  547. default:
  548. BUG();
  549. }
  550. if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
  551. vmw_ttm_unmap_dma(vmw_be);
  552. vmw_be->bound = false;
  553. }
  554. static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
  555. {
  556. struct vmw_ttm_tt *vmw_be =
  557. container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
  558. vmw_ttm_unbind(bdev, ttm);
  559. ttm_tt_destroy_common(bdev, ttm);
  560. vmw_ttm_unmap_dma(vmw_be);
  561. if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
  562. ttm_dma_tt_fini(&vmw_be->dma_ttm);
  563. else
  564. ttm_tt_fini(ttm);
  565. if (vmw_be->mob)
  566. vmw_mob_destroy(vmw_be->mob);
  567. kfree(vmw_be);
  568. }
  569. static int vmw_ttm_populate(struct ttm_bo_device *bdev,
  570. struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
  571. {
  572. struct vmw_ttm_tt *vmw_tt =
  573. container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
  574. struct vmw_private *dev_priv = vmw_tt->dev_priv;
  575. struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
  576. int ret;
  577. if (ttm_tt_is_populated(ttm))
  578. return 0;
  579. if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
  580. size_t size =
  581. ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
  582. ret = ttm_mem_global_alloc(glob, size, ctx);
  583. if (unlikely(ret != 0))
  584. return ret;
  585. ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
  586. ctx);
  587. if (unlikely(ret != 0))
  588. ttm_mem_global_free(glob, size);
  589. } else
  590. ret = ttm_pool_populate(ttm, ctx);
  591. return ret;
  592. }
  593. static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev,
  594. struct ttm_tt *ttm)
  595. {
  596. struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
  597. dma_ttm.ttm);
  598. struct vmw_private *dev_priv = vmw_tt->dev_priv;
  599. struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
  600. if (vmw_tt->mob) {
  601. vmw_mob_destroy(vmw_tt->mob);
  602. vmw_tt->mob = NULL;
  603. }
  604. vmw_ttm_unmap_dma(vmw_tt);
  605. if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
  606. size_t size =
  607. ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
  608. ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
  609. ttm_mem_global_free(glob, size);
  610. } else
  611. ttm_pool_unpopulate(ttm);
  612. }
  613. static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
  614. uint32_t page_flags)
  615. {
  616. struct vmw_ttm_tt *vmw_be;
  617. int ret;
  618. vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
  619. if (!vmw_be)
  620. return NULL;
  621. vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
  622. vmw_be->mob = NULL;
  623. if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
  624. ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags);
  625. else
  626. ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags);
  627. if (unlikely(ret != 0))
  628. goto out_no_init;
  629. return &vmw_be->dma_ttm.ttm;
  630. out_no_init:
  631. kfree(vmw_be);
  632. return NULL;
  633. }
  634. static void vmw_evict_flags(struct ttm_buffer_object *bo,
  635. struct ttm_placement *placement)
  636. {
  637. *placement = vmw_sys_placement;
  638. }
  639. static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
  640. {
  641. struct ttm_object_file *tfile =
  642. vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
  643. return vmw_user_bo_verify_access(bo, tfile);
  644. }
  645. static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
  646. {
  647. struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
  648. switch (mem->mem_type) {
  649. case TTM_PL_SYSTEM:
  650. case VMW_PL_GMR:
  651. case VMW_PL_MOB:
  652. return 0;
  653. case TTM_PL_VRAM:
  654. mem->bus.offset = (mem->start << PAGE_SHIFT) +
  655. dev_priv->vram_start;
  656. mem->bus.is_iomem = true;
  657. break;
  658. default:
  659. return -EINVAL;
  660. }
  661. return 0;
  662. }
  663. /**
  664. * vmw_move_notify - TTM move_notify_callback
  665. *
  666. * @bo: The TTM buffer object about to move.
  667. * @mem: The struct ttm_resource indicating to what memory
  668. * region the move is taking place.
  669. *
  670. * Calls move_notify for all subsystems needing it.
  671. * (currently only resources).
  672. */
  673. static void vmw_move_notify(struct ttm_buffer_object *bo,
  674. bool evict,
  675. struct ttm_resource *mem)
  676. {
  677. vmw_bo_move_notify(bo, mem);
  678. vmw_query_move_notify(bo, mem);
  679. }
  680. /**
  681. * vmw_swap_notify - TTM move_notify_callback
  682. *
  683. * @bo: The TTM buffer object about to be swapped out.
  684. */
  685. static void vmw_swap_notify(struct ttm_buffer_object *bo)
  686. {
  687. vmw_bo_swap_notify(bo);
  688. (void) ttm_bo_wait(bo, false, false);
  689. }
  690. struct ttm_bo_driver vmw_bo_driver = {
  691. .ttm_tt_create = &vmw_ttm_tt_create,
  692. .ttm_tt_populate = &vmw_ttm_populate,
  693. .ttm_tt_unpopulate = &vmw_ttm_unpopulate,
  694. .ttm_tt_bind = &vmw_ttm_bind,
  695. .ttm_tt_unbind = &vmw_ttm_unbind,
  696. .ttm_tt_destroy = &vmw_ttm_destroy,
  697. .eviction_valuable = ttm_bo_eviction_valuable,
  698. .evict_flags = vmw_evict_flags,
  699. .move = NULL,
  700. .verify_access = vmw_verify_access,
  701. .move_notify = vmw_move_notify,
  702. .swap_notify = vmw_swap_notify,
  703. .io_mem_reserve = &vmw_ttm_io_mem_reserve,
  704. };
  705. int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
  706. unsigned long bo_size,
  707. struct ttm_buffer_object **bo_p)
  708. {
  709. struct ttm_operation_ctx ctx = {
  710. .interruptible = false,
  711. .no_wait_gpu = false
  712. };
  713. struct ttm_buffer_object *bo;
  714. int ret;
  715. ret = ttm_bo_create(&dev_priv->bdev, bo_size,
  716. ttm_bo_type_device,
  717. &vmw_sys_ne_placement,
  718. 0, false, &bo);
  719. if (unlikely(ret != 0))
  720. return ret;
  721. ret = ttm_bo_reserve(bo, false, true, NULL);
  722. BUG_ON(ret != 0);
  723. ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx);
  724. if (likely(ret == 0)) {
  725. struct vmw_ttm_tt *vmw_tt =
  726. container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
  727. ret = vmw_ttm_map_dma(vmw_tt);
  728. }
  729. ttm_bo_unreserve(bo);
  730. if (likely(ret == 0))
  731. *bo_p = bo;
  732. return ret;
  733. }