vmwgfx_bo.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176
  1. // SPDX-License-Identifier: GPL-2.0 OR MIT
  2. /**************************************************************************
  3. *
  4. * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. **************************************************************************/
  28. #include <drm/ttm/ttm_placement.h>
  29. #include "vmwgfx_drv.h"
  30. #include "ttm_object.h"
  31. /**
  32. * struct vmw_user_buffer_object - User-space-visible buffer object
  33. *
  34. * @prime: The prime object providing user visibility.
  35. * @vbo: The struct vmw_buffer_object
  36. */
  37. struct vmw_user_buffer_object {
  38. struct ttm_prime_object prime;
  39. struct vmw_buffer_object vbo;
  40. };
  41. /**
  42. * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
  43. * vmw_buffer_object.
  44. *
  45. * @bo: Pointer to the TTM buffer object.
  46. * Return: Pointer to the struct vmw_buffer_object embedding the
  47. * TTM buffer object.
  48. */
  49. static struct vmw_buffer_object *
  50. vmw_buffer_object(struct ttm_buffer_object *bo)
  51. {
  52. return container_of(bo, struct vmw_buffer_object, base);
  53. }
  54. /**
  55. * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
  56. * vmw_user_buffer_object.
  57. *
  58. * @bo: Pointer to the TTM buffer object.
  59. * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
  60. * object.
  61. */
  62. static struct vmw_user_buffer_object *
  63. vmw_user_buffer_object(struct ttm_buffer_object *bo)
  64. {
  65. struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
  66. return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
  67. }
  68. /**
  69. * vmw_bo_pin_in_placement - Validate a buffer to placement.
  70. *
  71. * @dev_priv: Driver private.
  72. * @buf: DMA buffer to move.
  73. * @placement: The placement to pin it.
  74. * @interruptible: Use interruptible wait.
  75. * Return: Zero on success, Negative error code on failure. In particular
  76. * -ERESTARTSYS if interrupted by a signal
  77. */
  78. int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
  79. struct vmw_buffer_object *buf,
  80. struct ttm_placement *placement,
  81. bool interruptible)
  82. {
  83. struct ttm_operation_ctx ctx = {interruptible, false };
  84. struct ttm_buffer_object *bo = &buf->base;
  85. int ret;
  86. uint32_t new_flags;
  87. ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
  88. if (unlikely(ret != 0))
  89. return ret;
  90. vmw_execbuf_release_pinned_bo(dev_priv);
  91. ret = ttm_bo_reserve(bo, interruptible, false, NULL);
  92. if (unlikely(ret != 0))
  93. goto err;
  94. if (buf->pin_count > 0)
  95. ret = ttm_bo_mem_compat(placement, &bo->mem,
  96. &new_flags) == true ? 0 : -EINVAL;
  97. else
  98. ret = ttm_bo_validate(bo, placement, &ctx);
  99. if (!ret)
  100. vmw_bo_pin_reserved(buf, true);
  101. ttm_bo_unreserve(bo);
  102. err:
  103. ttm_write_unlock(&dev_priv->reservation_sem);
  104. return ret;
  105. }
  106. /**
  107. * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
  108. *
  109. * This function takes the reservation_sem in write mode.
  110. * Flushes and unpins the query bo to avoid failures.
  111. *
  112. * @dev_priv: Driver private.
  113. * @buf: DMA buffer to move.
  114. * @pin: Pin buffer if true.
  115. * @interruptible: Use interruptible wait.
  116. * Return: Zero on success, Negative error code on failure. In particular
  117. * -ERESTARTSYS if interrupted by a signal
  118. */
  119. int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
  120. struct vmw_buffer_object *buf,
  121. bool interruptible)
  122. {
  123. struct ttm_operation_ctx ctx = {interruptible, false };
  124. struct ttm_buffer_object *bo = &buf->base;
  125. int ret;
  126. uint32_t new_flags;
  127. ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
  128. if (unlikely(ret != 0))
  129. return ret;
  130. vmw_execbuf_release_pinned_bo(dev_priv);
  131. ret = ttm_bo_reserve(bo, interruptible, false, NULL);
  132. if (unlikely(ret != 0))
  133. goto err;
  134. if (buf->pin_count > 0) {
  135. ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
  136. &new_flags) == true ? 0 : -EINVAL;
  137. goto out_unreserve;
  138. }
  139. ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
  140. if (likely(ret == 0) || ret == -ERESTARTSYS)
  141. goto out_unreserve;
  142. ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
  143. out_unreserve:
  144. if (!ret)
  145. vmw_bo_pin_reserved(buf, true);
  146. ttm_bo_unreserve(bo);
  147. err:
  148. ttm_write_unlock(&dev_priv->reservation_sem);
  149. return ret;
  150. }
  151. /**
  152. * vmw_bo_pin_in_vram - Move a buffer to vram.
  153. *
  154. * This function takes the reservation_sem in write mode.
  155. * Flushes and unpins the query bo to avoid failures.
  156. *
  157. * @dev_priv: Driver private.
  158. * @buf: DMA buffer to move.
  159. * @interruptible: Use interruptible wait.
  160. * Return: Zero on success, Negative error code on failure. In particular
  161. * -ERESTARTSYS if interrupted by a signal
  162. */
  163. int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
  164. struct vmw_buffer_object *buf,
  165. bool interruptible)
  166. {
  167. return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
  168. interruptible);
  169. }
  170. /**
  171. * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
  172. *
  173. * This function takes the reservation_sem in write mode.
  174. * Flushes and unpins the query bo to avoid failures.
  175. *
  176. * @dev_priv: Driver private.
  177. * @buf: DMA buffer to pin.
  178. * @interruptible: Use interruptible wait.
  179. * Return: Zero on success, Negative error code on failure. In particular
  180. * -ERESTARTSYS if interrupted by a signal
  181. */
  182. int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
  183. struct vmw_buffer_object *buf,
  184. bool interruptible)
  185. {
  186. struct ttm_operation_ctx ctx = {interruptible, false };
  187. struct ttm_buffer_object *bo = &buf->base;
  188. struct ttm_placement placement;
  189. struct ttm_place place;
  190. int ret = 0;
  191. uint32_t new_flags;
  192. place = vmw_vram_placement.placement[0];
  193. place.lpfn = bo->num_pages;
  194. placement.num_placement = 1;
  195. placement.placement = &place;
  196. placement.num_busy_placement = 1;
  197. placement.busy_placement = &place;
  198. ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
  199. if (unlikely(ret != 0))
  200. return ret;
  201. vmw_execbuf_release_pinned_bo(dev_priv);
  202. ret = ttm_bo_reserve(bo, interruptible, false, NULL);
  203. if (unlikely(ret != 0))
  204. goto err_unlock;
  205. /*
  206. * Is this buffer already in vram but not at the start of it?
  207. * In that case, evict it first because TTM isn't good at handling
  208. * that situation.
  209. */
  210. if (bo->mem.mem_type == TTM_PL_VRAM &&
  211. bo->mem.start < bo->num_pages &&
  212. bo->mem.start > 0 &&
  213. buf->pin_count == 0) {
  214. ctx.interruptible = false;
  215. (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
  216. }
  217. if (buf->pin_count > 0)
  218. ret = ttm_bo_mem_compat(&placement, &bo->mem,
  219. &new_flags) == true ? 0 : -EINVAL;
  220. else
  221. ret = ttm_bo_validate(bo, &placement, &ctx);
  222. /* For some reason we didn't end up at the start of vram */
  223. WARN_ON(ret == 0 && bo->mem.start != 0);
  224. if (!ret)
  225. vmw_bo_pin_reserved(buf, true);
  226. ttm_bo_unreserve(bo);
  227. err_unlock:
  228. ttm_write_unlock(&dev_priv->reservation_sem);
  229. return ret;
  230. }
  231. /**
  232. * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
  233. *
  234. * This function takes the reservation_sem in write mode.
  235. *
  236. * @dev_priv: Driver private.
  237. * @buf: DMA buffer to unpin.
  238. * @interruptible: Use interruptible wait.
  239. * Return: Zero on success, Negative error code on failure. In particular
  240. * -ERESTARTSYS if interrupted by a signal
  241. */
  242. int vmw_bo_unpin(struct vmw_private *dev_priv,
  243. struct vmw_buffer_object *buf,
  244. bool interruptible)
  245. {
  246. struct ttm_buffer_object *bo = &buf->base;
  247. int ret;
  248. ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
  249. if (unlikely(ret != 0))
  250. return ret;
  251. ret = ttm_bo_reserve(bo, interruptible, false, NULL);
  252. if (unlikely(ret != 0))
  253. goto err;
  254. vmw_bo_pin_reserved(buf, false);
  255. ttm_bo_unreserve(bo);
  256. err:
  257. ttm_read_unlock(&dev_priv->reservation_sem);
  258. return ret;
  259. }
  260. /**
  261. * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
  262. * of a buffer.
  263. *
  264. * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
  265. * @ptr: SVGAGuestPtr returning the result.
  266. */
  267. void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
  268. SVGAGuestPtr *ptr)
  269. {
  270. if (bo->mem.mem_type == TTM_PL_VRAM) {
  271. ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
  272. ptr->offset = bo->mem.start << PAGE_SHIFT;
  273. } else {
  274. ptr->gmrId = bo->mem.start;
  275. ptr->offset = 0;
  276. }
  277. }
  278. /**
  279. * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
  280. *
  281. * @vbo: The buffer object. Must be reserved.
  282. * @pin: Whether to pin or unpin.
  283. *
  284. */
  285. void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
  286. {
  287. struct ttm_operation_ctx ctx = { false, true };
  288. struct ttm_place pl;
  289. struct ttm_placement placement;
  290. struct ttm_buffer_object *bo = &vbo->base;
  291. uint32_t old_mem_type = bo->mem.mem_type;
  292. int ret;
  293. dma_resv_assert_held(bo->base.resv);
  294. if (pin) {
  295. if (vbo->pin_count++ > 0)
  296. return;
  297. } else {
  298. WARN_ON(vbo->pin_count <= 0);
  299. if (--vbo->pin_count > 0)
  300. return;
  301. }
  302. pl.fpfn = 0;
  303. pl.lpfn = 0;
  304. pl.mem_type = bo->mem.mem_type;
  305. pl.flags = bo->mem.placement;
  306. if (pin)
  307. pl.flags |= TTM_PL_FLAG_NO_EVICT;
  308. else
  309. pl.flags &= ~TTM_PL_FLAG_NO_EVICT;
  310. memset(&placement, 0, sizeof(placement));
  311. placement.num_placement = 1;
  312. placement.placement = &pl;
  313. ret = ttm_bo_validate(bo, &placement, &ctx);
  314. BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
  315. }
  316. /**
  317. * vmw_bo_map_and_cache - Map a buffer object and cache the map
  318. *
  319. * @vbo: The buffer object to map
  320. * Return: A kernel virtual address or NULL if mapping failed.
  321. *
  322. * This function maps a buffer object into the kernel address space, or
  323. * returns the virtual kernel address of an already existing map. The virtual
  324. * address remains valid as long as the buffer object is pinned or reserved.
  325. * The cached map is torn down on either
  326. * 1) Buffer object move
  327. * 2) Buffer object swapout
  328. * 3) Buffer object destruction
  329. *
  330. */
  331. void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
  332. {
  333. struct ttm_buffer_object *bo = &vbo->base;
  334. bool not_used;
  335. void *virtual;
  336. int ret;
  337. virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
  338. if (virtual)
  339. return virtual;
  340. ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
  341. if (ret)
  342. DRM_ERROR("Buffer object map failed: %d.\n", ret);
  343. return ttm_kmap_obj_virtual(&vbo->map, &not_used);
  344. }
  345. /**
  346. * vmw_bo_unmap - Tear down a cached buffer object map.
  347. *
  348. * @vbo: The buffer object whose map we are tearing down.
  349. *
  350. * This function tears down a cached map set up using
  351. * vmw_buffer_object_map_and_cache().
  352. */
  353. void vmw_bo_unmap(struct vmw_buffer_object *vbo)
  354. {
  355. if (vbo->map.bo == NULL)
  356. return;
  357. ttm_bo_kunmap(&vbo->map);
  358. }
  359. /**
  360. * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
  361. *
  362. * @dev_priv: Pointer to a struct vmw_private identifying the device.
  363. * @size: The requested buffer size.
  364. * @user: Whether this is an ordinary dma buffer or a user dma buffer.
  365. */
  366. static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
  367. bool user)
  368. {
  369. static size_t struct_size, user_struct_size;
  370. size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
  371. size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
  372. if (unlikely(struct_size == 0)) {
  373. size_t backend_size = ttm_round_pot(vmw_tt_size);
  374. struct_size = backend_size +
  375. ttm_round_pot(sizeof(struct vmw_buffer_object));
  376. user_struct_size = backend_size +
  377. ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
  378. TTM_OBJ_EXTRA_SIZE;
  379. }
  380. if (dev_priv->map_mode == vmw_dma_alloc_coherent)
  381. page_array_size +=
  382. ttm_round_pot(num_pages * sizeof(dma_addr_t));
  383. return ((user) ? user_struct_size : struct_size) +
  384. page_array_size;
  385. }
  386. /**
  387. * vmw_bo_bo_free - vmw buffer object destructor
  388. *
  389. * @bo: Pointer to the embedded struct ttm_buffer_object
  390. */
  391. void vmw_bo_bo_free(struct ttm_buffer_object *bo)
  392. {
  393. struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
  394. WARN_ON(vmw_bo->dirty);
  395. WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
  396. vmw_bo_unmap(vmw_bo);
  397. kfree(vmw_bo);
  398. }
  399. /**
  400. * vmw_user_bo_destroy - vmw buffer object destructor
  401. *
  402. * @bo: Pointer to the embedded struct ttm_buffer_object
  403. */
  404. static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
  405. {
  406. struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
  407. struct vmw_buffer_object *vbo = &vmw_user_bo->vbo;
  408. WARN_ON(vbo->dirty);
  409. WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
  410. vmw_bo_unmap(vbo);
  411. ttm_prime_object_kfree(vmw_user_bo, prime);
  412. }
  413. /**
  414. * vmw_bo_init - Initialize a vmw buffer object
  415. *
  416. * @dev_priv: Pointer to the device private struct
  417. * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
  418. * @size: Buffer object size in bytes.
  419. * @placement: Initial placement.
  420. * @interruptible: Whether waits should be performed interruptible.
  421. * @bo_free: The buffer object destructor.
  422. * Returns: Zero on success, negative error code on error.
  423. *
  424. * Note that on error, the code will free the buffer object.
  425. */
  426. int vmw_bo_init(struct vmw_private *dev_priv,
  427. struct vmw_buffer_object *vmw_bo,
  428. size_t size, struct ttm_placement *placement,
  429. bool interruptible,
  430. void (*bo_free)(struct ttm_buffer_object *bo))
  431. {
  432. struct ttm_bo_device *bdev = &dev_priv->bdev;
  433. size_t acc_size;
  434. int ret;
  435. bool user = (bo_free == &vmw_user_bo_destroy);
  436. WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
  437. acc_size = vmw_bo_acc_size(dev_priv, size, user);
  438. memset(vmw_bo, 0, sizeof(*vmw_bo));
  439. BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
  440. vmw_bo->base.priority = 3;
  441. vmw_bo->res_tree = RB_ROOT;
  442. ret = ttm_bo_init(bdev, &vmw_bo->base, size,
  443. ttm_bo_type_device, placement,
  444. 0, interruptible, acc_size,
  445. NULL, NULL, bo_free);
  446. return ret;
  447. }
  448. /**
  449. * vmw_user_bo_release - TTM reference base object release callback for
  450. * vmw user buffer objects
  451. *
  452. * @p_base: The TTM base object pointer about to be unreferenced.
  453. *
  454. * Clears the TTM base object pointer and drops the reference the
  455. * base object has on the underlying struct vmw_buffer_object.
  456. */
  457. static void vmw_user_bo_release(struct ttm_base_object **p_base)
  458. {
  459. struct vmw_user_buffer_object *vmw_user_bo;
  460. struct ttm_base_object *base = *p_base;
  461. *p_base = NULL;
  462. if (unlikely(base == NULL))
  463. return;
  464. vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
  465. prime.base);
  466. ttm_bo_put(&vmw_user_bo->vbo.base);
  467. }
  468. /**
  469. * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
  470. * for vmw user buffer objects
  471. *
  472. * @base: Pointer to the TTM base object
  473. * @ref_type: Reference type of the reference reaching zero.
  474. *
  475. * Called when user-space drops its last synccpu reference on the buffer
  476. * object, Either explicitly or as part of a cleanup file close.
  477. */
  478. static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
  479. enum ttm_ref_type ref_type)
  480. {
  481. struct vmw_user_buffer_object *user_bo;
  482. user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
  483. switch (ref_type) {
  484. case TTM_REF_SYNCCPU_WRITE:
  485. atomic_dec(&user_bo->vbo.cpu_writers);
  486. break;
  487. default:
  488. WARN_ONCE(true, "Undefined buffer object reference release.\n");
  489. }
  490. }
  491. /**
  492. * vmw_user_bo_alloc - Allocate a user buffer object
  493. *
  494. * @dev_priv: Pointer to a struct device private.
  495. * @tfile: Pointer to a struct ttm_object_file on which to register the user
  496. * object.
  497. * @size: Size of the buffer object.
  498. * @shareable: Boolean whether the buffer is shareable with other open files.
  499. * @handle: Pointer to where the handle value should be assigned.
  500. * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
  501. * should be assigned.
  502. * Return: Zero on success, negative error code on error.
  503. */
  504. int vmw_user_bo_alloc(struct vmw_private *dev_priv,
  505. struct ttm_object_file *tfile,
  506. uint32_t size,
  507. bool shareable,
  508. uint32_t *handle,
  509. struct vmw_buffer_object **p_vbo,
  510. struct ttm_base_object **p_base)
  511. {
  512. struct vmw_user_buffer_object *user_bo;
  513. int ret;
  514. user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
  515. if (unlikely(!user_bo)) {
  516. DRM_ERROR("Failed to allocate a buffer.\n");
  517. return -ENOMEM;
  518. }
  519. ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
  520. (dev_priv->has_mob) ?
  521. &vmw_sys_placement :
  522. &vmw_vram_sys_placement, true,
  523. &vmw_user_bo_destroy);
  524. if (unlikely(ret != 0))
  525. return ret;
  526. ttm_bo_get(&user_bo->vbo.base);
  527. ret = ttm_prime_object_init(tfile,
  528. size,
  529. &user_bo->prime,
  530. shareable,
  531. ttm_buffer_type,
  532. &vmw_user_bo_release,
  533. &vmw_user_bo_ref_obj_release);
  534. if (unlikely(ret != 0)) {
  535. ttm_bo_put(&user_bo->vbo.base);
  536. goto out_no_base_object;
  537. }
  538. *p_vbo = &user_bo->vbo;
  539. if (p_base) {
  540. *p_base = &user_bo->prime.base;
  541. kref_get(&(*p_base)->refcount);
  542. }
  543. *handle = user_bo->prime.base.handle;
  544. out_no_base_object:
  545. return ret;
  546. }
  547. /**
  548. * vmw_user_bo_verify_access - verify access permissions on this
  549. * buffer object.
  550. *
  551. * @bo: Pointer to the buffer object being accessed
  552. * @tfile: Identifying the caller.
  553. */
  554. int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
  555. struct ttm_object_file *tfile)
  556. {
  557. struct vmw_user_buffer_object *vmw_user_bo;
  558. if (unlikely(bo->destroy != vmw_user_bo_destroy))
  559. return -EPERM;
  560. vmw_user_bo = vmw_user_buffer_object(bo);
  561. /* Check that the caller has opened the object. */
  562. if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
  563. return 0;
  564. DRM_ERROR("Could not grant buffer access.\n");
  565. return -EPERM;
  566. }
  567. /**
  568. * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
  569. * access, idling previous GPU operations on the buffer and optionally
  570. * blocking it for further command submissions.
  571. *
  572. * @user_bo: Pointer to the buffer object being grabbed for CPU access
  573. * @tfile: Identifying the caller.
  574. * @flags: Flags indicating how the grab should be performed.
  575. * Return: Zero on success, Negative error code on error. In particular,
  576. * -EBUSY will be returned if a dontblock operation is requested and the
  577. * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
  578. * interrupted by a signal.
  579. *
  580. * A blocking grab will be automatically released when @tfile is closed.
  581. */
  582. static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
  583. struct ttm_object_file *tfile,
  584. uint32_t flags)
  585. {
  586. bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
  587. struct ttm_buffer_object *bo = &user_bo->vbo.base;
  588. bool existed;
  589. int ret;
  590. if (flags & drm_vmw_synccpu_allow_cs) {
  591. long lret;
  592. lret = dma_resv_wait_timeout_rcu
  593. (bo->base.resv, true, true,
  594. nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
  595. if (!lret)
  596. return -EBUSY;
  597. else if (lret < 0)
  598. return lret;
  599. return 0;
  600. }
  601. ret = ttm_bo_reserve(bo, true, nonblock, NULL);
  602. if (unlikely(ret != 0))
  603. return ret;
  604. ret = ttm_bo_wait(bo, true, nonblock);
  605. if (likely(ret == 0))
  606. atomic_inc(&user_bo->vbo.cpu_writers);
  607. ttm_bo_unreserve(bo);
  608. if (unlikely(ret != 0))
  609. return ret;
  610. ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
  611. TTM_REF_SYNCCPU_WRITE, &existed, false);
  612. if (ret != 0 || existed)
  613. atomic_dec(&user_bo->vbo.cpu_writers);
  614. return ret;
  615. }
  616. /**
  617. * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
  618. * and unblock command submission on the buffer if blocked.
  619. *
  620. * @handle: Handle identifying the buffer object.
  621. * @tfile: Identifying the caller.
  622. * @flags: Flags indicating the type of release.
  623. */
  624. static int vmw_user_bo_synccpu_release(uint32_t handle,
  625. struct ttm_object_file *tfile,
  626. uint32_t flags)
  627. {
  628. if (!(flags & drm_vmw_synccpu_allow_cs))
  629. return ttm_ref_object_base_unref(tfile, handle,
  630. TTM_REF_SYNCCPU_WRITE);
  631. return 0;
  632. }
  633. /**
  634. * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
  635. * functionality.
  636. *
  637. * @dev: Identifies the drm device.
  638. * @data: Pointer to the ioctl argument.
  639. * @file_priv: Identifies the caller.
  640. * Return: Zero on success, negative error code on error.
  641. *
  642. * This function checks the ioctl arguments for validity and calls the
  643. * relevant synccpu functions.
  644. */
  645. int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
  646. struct drm_file *file_priv)
  647. {
  648. struct drm_vmw_synccpu_arg *arg =
  649. (struct drm_vmw_synccpu_arg *) data;
  650. struct vmw_buffer_object *vbo;
  651. struct vmw_user_buffer_object *user_bo;
  652. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  653. struct ttm_base_object *buffer_base;
  654. int ret;
  655. if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
  656. || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
  657. drm_vmw_synccpu_dontblock |
  658. drm_vmw_synccpu_allow_cs)) != 0) {
  659. DRM_ERROR("Illegal synccpu flags.\n");
  660. return -EINVAL;
  661. }
  662. switch (arg->op) {
  663. case drm_vmw_synccpu_grab:
  664. ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
  665. &buffer_base);
  666. if (unlikely(ret != 0))
  667. return ret;
  668. user_bo = container_of(vbo, struct vmw_user_buffer_object,
  669. vbo);
  670. ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
  671. vmw_bo_unreference(&vbo);
  672. ttm_base_object_unref(&buffer_base);
  673. if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
  674. ret != -EBUSY)) {
  675. DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
  676. (unsigned int) arg->handle);
  677. return ret;
  678. }
  679. break;
  680. case drm_vmw_synccpu_release:
  681. ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
  682. arg->flags);
  683. if (unlikely(ret != 0)) {
  684. DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
  685. (unsigned int) arg->handle);
  686. return ret;
  687. }
  688. break;
  689. default:
  690. DRM_ERROR("Invalid synccpu operation.\n");
  691. return -EINVAL;
  692. }
  693. return 0;
  694. }
  695. /**
  696. * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
  697. * allocation functionality.
  698. *
  699. * @dev: Identifies the drm device.
  700. * @data: Pointer to the ioctl argument.
  701. * @file_priv: Identifies the caller.
  702. * Return: Zero on success, negative error code on error.
  703. *
  704. * This function checks the ioctl arguments for validity and allocates a
  705. * struct vmw_user_buffer_object bo.
  706. */
  707. int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
  708. struct drm_file *file_priv)
  709. {
  710. struct vmw_private *dev_priv = vmw_priv(dev);
  711. union drm_vmw_alloc_dmabuf_arg *arg =
  712. (union drm_vmw_alloc_dmabuf_arg *)data;
  713. struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
  714. struct drm_vmw_dmabuf_rep *rep = &arg->rep;
  715. struct vmw_buffer_object *vbo;
  716. uint32_t handle;
  717. int ret;
  718. ret = ttm_read_lock(&dev_priv->reservation_sem, true);
  719. if (unlikely(ret != 0))
  720. return ret;
  721. ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
  722. req->size, false, &handle, &vbo,
  723. NULL);
  724. if (unlikely(ret != 0))
  725. goto out_no_bo;
  726. rep->handle = handle;
  727. rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
  728. rep->cur_gmr_id = handle;
  729. rep->cur_gmr_offset = 0;
  730. vmw_bo_unreference(&vbo);
  731. out_no_bo:
  732. ttm_read_unlock(&dev_priv->reservation_sem);
  733. return ret;
  734. }
  735. /**
  736. * vmw_bo_unref_ioctl - Generic handle close ioctl.
  737. *
  738. * @dev: Identifies the drm device.
  739. * @data: Pointer to the ioctl argument.
  740. * @file_priv: Identifies the caller.
  741. * Return: Zero on success, negative error code on error.
  742. *
  743. * This function checks the ioctl arguments for validity and closes a
  744. * handle to a TTM base object, optionally freeing the object.
  745. */
  746. int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
  747. struct drm_file *file_priv)
  748. {
  749. struct drm_vmw_unref_dmabuf_arg *arg =
  750. (struct drm_vmw_unref_dmabuf_arg *)data;
  751. return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
  752. arg->handle,
  753. TTM_REF_USAGE);
  754. }
  755. /**
  756. * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
  757. *
  758. * @tfile: The TTM object file the handle is registered with.
  759. * @handle: The user buffer object handle
  760. * @out: Pointer to a where a pointer to the embedded
  761. * struct vmw_buffer_object should be placed.
  762. * @p_base: Pointer to where a pointer to the TTM base object should be
  763. * placed, or NULL if no such pointer is required.
  764. * Return: Zero on success, Negative error code on error.
  765. *
  766. * Both the output base object pointer and the vmw buffer object pointer
  767. * will be refcounted.
  768. */
  769. int vmw_user_bo_lookup(struct ttm_object_file *tfile,
  770. uint32_t handle, struct vmw_buffer_object **out,
  771. struct ttm_base_object **p_base)
  772. {
  773. struct vmw_user_buffer_object *vmw_user_bo;
  774. struct ttm_base_object *base;
  775. base = ttm_base_object_lookup(tfile, handle);
  776. if (unlikely(base == NULL)) {
  777. DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
  778. (unsigned long)handle);
  779. return -ESRCH;
  780. }
  781. if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
  782. ttm_base_object_unref(&base);
  783. DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
  784. (unsigned long)handle);
  785. return -EINVAL;
  786. }
  787. vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
  788. prime.base);
  789. ttm_bo_get(&vmw_user_bo->vbo.base);
  790. if (p_base)
  791. *p_base = base;
  792. else
  793. ttm_base_object_unref(&base);
  794. *out = &vmw_user_bo->vbo;
  795. return 0;
  796. }
  797. /**
  798. * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
  799. * @tfile: The TTM object file the handle is registered with.
  800. * @handle: The user buffer object handle.
  801. *
  802. * This function looks up a struct vmw_user_bo and returns a pointer to the
  803. * struct vmw_buffer_object it derives from without refcounting the pointer.
  804. * The returned pointer is only valid until vmw_user_bo_noref_release() is
  805. * called, and the object pointed to by the returned pointer may be doomed.
  806. * Any persistent usage of the object requires a refcount to be taken using
  807. * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
  808. * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
  809. * or scheduling functions may be called inbetween these function calls.
  810. *
  811. * Return: A struct vmw_buffer_object pointer if successful or negative
  812. * error pointer on failure.
  813. */
  814. struct vmw_buffer_object *
  815. vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
  816. {
  817. struct vmw_user_buffer_object *vmw_user_bo;
  818. struct ttm_base_object *base;
  819. base = ttm_base_object_noref_lookup(tfile, handle);
  820. if (!base) {
  821. DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
  822. (unsigned long)handle);
  823. return ERR_PTR(-ESRCH);
  824. }
  825. if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
  826. ttm_base_object_noref_release();
  827. DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
  828. (unsigned long)handle);
  829. return ERR_PTR(-EINVAL);
  830. }
  831. vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
  832. prime.base);
  833. return &vmw_user_bo->vbo;
  834. }
  835. /**
  836. * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
  837. *
  838. * @tfile: The TTM object file to register the handle with.
  839. * @vbo: The embedded vmw buffer object.
  840. * @handle: Pointer to where the new handle should be placed.
  841. * Return: Zero on success, Negative error code on error.
  842. */
  843. int vmw_user_bo_reference(struct ttm_object_file *tfile,
  844. struct vmw_buffer_object *vbo,
  845. uint32_t *handle)
  846. {
  847. struct vmw_user_buffer_object *user_bo;
  848. if (vbo->base.destroy != vmw_user_bo_destroy)
  849. return -EINVAL;
  850. user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
  851. *handle = user_bo->prime.base.handle;
  852. return ttm_ref_object_add(tfile, &user_bo->prime.base,
  853. TTM_REF_USAGE, NULL, false);
  854. }
  855. /**
  856. * vmw_bo_fence_single - Utility function to fence a single TTM buffer
  857. * object without unreserving it.
  858. *
  859. * @bo: Pointer to the struct ttm_buffer_object to fence.
  860. * @fence: Pointer to the fence. If NULL, this function will
  861. * insert a fence into the command stream..
  862. *
  863. * Contrary to the ttm_eu version of this function, it takes only
  864. * a single buffer object instead of a list, and it also doesn't
  865. * unreserve the buffer object, which needs to be done separately.
  866. */
  867. void vmw_bo_fence_single(struct ttm_buffer_object *bo,
  868. struct vmw_fence_obj *fence)
  869. {
  870. struct ttm_bo_device *bdev = bo->bdev;
  871. struct vmw_private *dev_priv =
  872. container_of(bdev, struct vmw_private, bdev);
  873. if (fence == NULL) {
  874. vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
  875. dma_resv_add_excl_fence(bo->base.resv, &fence->base);
  876. dma_fence_put(&fence->base);
  877. } else
  878. dma_resv_add_excl_fence(bo->base.resv, &fence->base);
  879. }
  880. /**
  881. * vmw_dumb_create - Create a dumb kms buffer
  882. *
  883. * @file_priv: Pointer to a struct drm_file identifying the caller.
  884. * @dev: Pointer to the drm device.
  885. * @args: Pointer to a struct drm_mode_create_dumb structure
  886. * Return: Zero on success, negative error code on failure.
  887. *
  888. * This is a driver callback for the core drm create_dumb functionality.
  889. * Note that this is very similar to the vmw_bo_alloc ioctl, except
  890. * that the arguments have a different format.
  891. */
  892. int vmw_dumb_create(struct drm_file *file_priv,
  893. struct drm_device *dev,
  894. struct drm_mode_create_dumb *args)
  895. {
  896. struct vmw_private *dev_priv = vmw_priv(dev);
  897. struct vmw_buffer_object *vbo;
  898. int ret;
  899. args->pitch = args->width * ((args->bpp + 7) / 8);
  900. args->size = args->pitch * args->height;
  901. ret = ttm_read_lock(&dev_priv->reservation_sem, true);
  902. if (unlikely(ret != 0))
  903. return ret;
  904. ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
  905. args->size, false, &args->handle,
  906. &vbo, NULL);
  907. if (unlikely(ret != 0))
  908. goto out_no_bo;
  909. vmw_bo_unreference(&vbo);
  910. out_no_bo:
  911. ttm_read_unlock(&dev_priv->reservation_sem);
  912. return ret;
  913. }
  914. /**
  915. * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
  916. *
  917. * @file_priv: Pointer to a struct drm_file identifying the caller.
  918. * @dev: Pointer to the drm device.
  919. * @handle: Handle identifying the dumb buffer.
  920. * @offset: The address space offset returned.
  921. * Return: Zero on success, negative error code on failure.
  922. *
  923. * This is a driver callback for the core drm dumb_map_offset functionality.
  924. */
  925. int vmw_dumb_map_offset(struct drm_file *file_priv,
  926. struct drm_device *dev, uint32_t handle,
  927. uint64_t *offset)
  928. {
  929. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  930. struct vmw_buffer_object *out_buf;
  931. int ret;
  932. ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
  933. if (ret != 0)
  934. return -EINVAL;
  935. *offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
  936. vmw_bo_unreference(&out_buf);
  937. return 0;
  938. }
  939. /**
  940. * vmw_dumb_destroy - Destroy a dumb boffer
  941. *
  942. * @file_priv: Pointer to a struct drm_file identifying the caller.
  943. * @dev: Pointer to the drm device.
  944. * @handle: Handle identifying the dumb buffer.
  945. * Return: Zero on success, negative error code on failure.
  946. *
  947. * This is a driver callback for the core drm dumb_destroy functionality.
  948. */
  949. int vmw_dumb_destroy(struct drm_file *file_priv,
  950. struct drm_device *dev,
  951. uint32_t handle)
  952. {
  953. return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
  954. handle, TTM_REF_USAGE);
  955. }
  956. /**
  957. * vmw_bo_swap_notify - swapout notify callback.
  958. *
  959. * @bo: The buffer object to be swapped out.
  960. */
  961. void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
  962. {
  963. /* Is @bo embedded in a struct vmw_buffer_object? */
  964. if (bo->destroy != vmw_bo_bo_free &&
  965. bo->destroy != vmw_user_bo_destroy)
  966. return;
  967. /* Kill any cached kernel maps before swapout */
  968. vmw_bo_unmap(vmw_buffer_object(bo));
  969. }
  970. /**
  971. * vmw_bo_move_notify - TTM move_notify_callback
  972. *
  973. * @bo: The TTM buffer object about to move.
  974. * @mem: The struct ttm_resource indicating to what memory
  975. * region the move is taking place.
  976. *
  977. * Detaches cached maps and device bindings that require that the
  978. * buffer doesn't move.
  979. */
  980. void vmw_bo_move_notify(struct ttm_buffer_object *bo,
  981. struct ttm_resource *mem)
  982. {
  983. struct vmw_buffer_object *vbo;
  984. if (mem == NULL)
  985. return;
  986. /* Make sure @bo is embedded in a struct vmw_buffer_object? */
  987. if (bo->destroy != vmw_bo_bo_free &&
  988. bo->destroy != vmw_user_bo_destroy)
  989. return;
  990. vbo = container_of(bo, struct vmw_buffer_object, base);
  991. /*
  992. * Kill any cached kernel maps before move to or from VRAM.
  993. * With other types of moves, the underlying pages stay the same,
  994. * and the map can be kept.
  995. */
  996. if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
  997. vmw_bo_unmap(vbo);
  998. /*
  999. * If we're moving a backup MOB out of MOB placement, then make sure we
  1000. * read back all resource content first, and unbind the MOB from
  1001. * the resource.
  1002. */
  1003. if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
  1004. vmw_resource_unbind_list(vbo);
  1005. }