vmwgfx_context.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926
  1. // SPDX-License-Identifier: GPL-2.0 OR MIT
  2. /**************************************************************************
  3. *
  4. * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include <drm/ttm/ttm_placement.h>
  28. #include "vmwgfx_drv.h"
  29. #include "vmwgfx_resource_priv.h"
  30. #include "vmwgfx_binding.h"
  31. struct vmw_user_context {
  32. struct ttm_base_object base;
  33. struct vmw_resource res;
  34. struct vmw_ctx_binding_state *cbs;
  35. struct vmw_cmdbuf_res_manager *man;
  36. struct vmw_resource *cotables[SVGA_COTABLE_MAX];
  37. spinlock_t cotable_lock;
  38. struct vmw_buffer_object *dx_query_mob;
  39. };
  40. static void vmw_user_context_free(struct vmw_resource *res);
  41. static struct vmw_resource *
  42. vmw_user_context_base_to_res(struct ttm_base_object *base);
  43. static int vmw_gb_context_create(struct vmw_resource *res);
  44. static int vmw_gb_context_bind(struct vmw_resource *res,
  45. struct ttm_validate_buffer *val_buf);
  46. static int vmw_gb_context_unbind(struct vmw_resource *res,
  47. bool readback,
  48. struct ttm_validate_buffer *val_buf);
  49. static int vmw_gb_context_destroy(struct vmw_resource *res);
  50. static int vmw_dx_context_create(struct vmw_resource *res);
  51. static int vmw_dx_context_bind(struct vmw_resource *res,
  52. struct ttm_validate_buffer *val_buf);
  53. static int vmw_dx_context_unbind(struct vmw_resource *res,
  54. bool readback,
  55. struct ttm_validate_buffer *val_buf);
  56. static int vmw_dx_context_destroy(struct vmw_resource *res);
  57. static uint64_t vmw_user_context_size;
  58. static const struct vmw_user_resource_conv user_context_conv = {
  59. .object_type = VMW_RES_CONTEXT,
  60. .base_obj_to_res = vmw_user_context_base_to_res,
  61. .res_free = vmw_user_context_free
  62. };
  63. const struct vmw_user_resource_conv *user_context_converter =
  64. &user_context_conv;
  65. static const struct vmw_res_func vmw_legacy_context_func = {
  66. .res_type = vmw_res_context,
  67. .needs_backup = false,
  68. .may_evict = false,
  69. .type_name = "legacy contexts",
  70. .backup_placement = NULL,
  71. .create = NULL,
  72. .destroy = NULL,
  73. .bind = NULL,
  74. .unbind = NULL
  75. };
  76. static const struct vmw_res_func vmw_gb_context_func = {
  77. .res_type = vmw_res_context,
  78. .needs_backup = true,
  79. .may_evict = true,
  80. .prio = 3,
  81. .dirty_prio = 3,
  82. .type_name = "guest backed contexts",
  83. .backup_placement = &vmw_mob_placement,
  84. .create = vmw_gb_context_create,
  85. .destroy = vmw_gb_context_destroy,
  86. .bind = vmw_gb_context_bind,
  87. .unbind = vmw_gb_context_unbind
  88. };
  89. static const struct vmw_res_func vmw_dx_context_func = {
  90. .res_type = vmw_res_dx_context,
  91. .needs_backup = true,
  92. .may_evict = true,
  93. .prio = 3,
  94. .dirty_prio = 3,
  95. .type_name = "dx contexts",
  96. .backup_placement = &vmw_mob_placement,
  97. .create = vmw_dx_context_create,
  98. .destroy = vmw_dx_context_destroy,
  99. .bind = vmw_dx_context_bind,
  100. .unbind = vmw_dx_context_unbind
  101. };
  102. /**
  103. * Context management:
  104. */
  105. static void vmw_context_cotables_unref(struct vmw_private *dev_priv,
  106. struct vmw_user_context *uctx)
  107. {
  108. struct vmw_resource *res;
  109. int i;
  110. u32 cotable_max = has_sm5_context(dev_priv) ?
  111. SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
  112. for (i = 0; i < cotable_max; ++i) {
  113. spin_lock(&uctx->cotable_lock);
  114. res = uctx->cotables[i];
  115. uctx->cotables[i] = NULL;
  116. spin_unlock(&uctx->cotable_lock);
  117. if (res)
  118. vmw_resource_unreference(&res);
  119. }
  120. }
  121. static void vmw_hw_context_destroy(struct vmw_resource *res)
  122. {
  123. struct vmw_user_context *uctx =
  124. container_of(res, struct vmw_user_context, res);
  125. struct vmw_private *dev_priv = res->dev_priv;
  126. struct {
  127. SVGA3dCmdHeader header;
  128. SVGA3dCmdDestroyContext body;
  129. } *cmd;
  130. if (res->func->destroy == vmw_gb_context_destroy ||
  131. res->func->destroy == vmw_dx_context_destroy) {
  132. mutex_lock(&dev_priv->cmdbuf_mutex);
  133. vmw_cmdbuf_res_man_destroy(uctx->man);
  134. mutex_lock(&dev_priv->binding_mutex);
  135. vmw_binding_state_kill(uctx->cbs);
  136. (void) res->func->destroy(res);
  137. mutex_unlock(&dev_priv->binding_mutex);
  138. if (dev_priv->pinned_bo != NULL &&
  139. !dev_priv->query_cid_valid)
  140. __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
  141. mutex_unlock(&dev_priv->cmdbuf_mutex);
  142. vmw_context_cotables_unref(dev_priv, uctx);
  143. return;
  144. }
  145. vmw_execbuf_release_pinned_bo(dev_priv);
  146. cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
  147. if (unlikely(cmd == NULL))
  148. return;
  149. cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
  150. cmd->header.size = sizeof(cmd->body);
  151. cmd->body.cid = res->id;
  152. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  153. vmw_fifo_resource_dec(dev_priv);
  154. }
  155. static int vmw_gb_context_init(struct vmw_private *dev_priv,
  156. bool dx,
  157. struct vmw_resource *res,
  158. void (*res_free)(struct vmw_resource *res))
  159. {
  160. int ret, i;
  161. struct vmw_user_context *uctx =
  162. container_of(res, struct vmw_user_context, res);
  163. res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
  164. SVGA3D_CONTEXT_DATA_SIZE);
  165. ret = vmw_resource_init(dev_priv, res, true,
  166. res_free,
  167. dx ? &vmw_dx_context_func :
  168. &vmw_gb_context_func);
  169. if (unlikely(ret != 0))
  170. goto out_err;
  171. if (dev_priv->has_mob) {
  172. uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
  173. if (IS_ERR(uctx->man)) {
  174. ret = PTR_ERR(uctx->man);
  175. uctx->man = NULL;
  176. goto out_err;
  177. }
  178. }
  179. uctx->cbs = vmw_binding_state_alloc(dev_priv);
  180. if (IS_ERR(uctx->cbs)) {
  181. ret = PTR_ERR(uctx->cbs);
  182. goto out_err;
  183. }
  184. spin_lock_init(&uctx->cotable_lock);
  185. if (dx) {
  186. u32 cotable_max = has_sm5_context(dev_priv) ?
  187. SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
  188. for (i = 0; i < cotable_max; ++i) {
  189. uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
  190. &uctx->res, i);
  191. if (IS_ERR(uctx->cotables[i])) {
  192. ret = PTR_ERR(uctx->cotables[i]);
  193. goto out_cotables;
  194. }
  195. }
  196. }
  197. res->hw_destroy = vmw_hw_context_destroy;
  198. return 0;
  199. out_cotables:
  200. vmw_context_cotables_unref(dev_priv, uctx);
  201. out_err:
  202. if (res_free)
  203. res_free(res);
  204. else
  205. kfree(res);
  206. return ret;
  207. }
  208. static int vmw_context_init(struct vmw_private *dev_priv,
  209. struct vmw_resource *res,
  210. void (*res_free)(struct vmw_resource *res),
  211. bool dx)
  212. {
  213. int ret;
  214. struct {
  215. SVGA3dCmdHeader header;
  216. SVGA3dCmdDefineContext body;
  217. } *cmd;
  218. if (dev_priv->has_mob)
  219. return vmw_gb_context_init(dev_priv, dx, res, res_free);
  220. ret = vmw_resource_init(dev_priv, res, false,
  221. res_free, &vmw_legacy_context_func);
  222. if (unlikely(ret != 0)) {
  223. DRM_ERROR("Failed to allocate a resource id.\n");
  224. goto out_early;
  225. }
  226. if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
  227. DRM_ERROR("Out of hw context ids.\n");
  228. vmw_resource_unreference(&res);
  229. return -ENOMEM;
  230. }
  231. cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
  232. if (unlikely(cmd == NULL)) {
  233. vmw_resource_unreference(&res);
  234. return -ENOMEM;
  235. }
  236. cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
  237. cmd->header.size = sizeof(cmd->body);
  238. cmd->body.cid = res->id;
  239. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  240. vmw_fifo_resource_inc(dev_priv);
  241. res->hw_destroy = vmw_hw_context_destroy;
  242. return 0;
  243. out_early:
  244. if (res_free == NULL)
  245. kfree(res);
  246. else
  247. res_free(res);
  248. return ret;
  249. }
  250. /*
  251. * GB context.
  252. */
  253. static int vmw_gb_context_create(struct vmw_resource *res)
  254. {
  255. struct vmw_private *dev_priv = res->dev_priv;
  256. int ret;
  257. struct {
  258. SVGA3dCmdHeader header;
  259. SVGA3dCmdDefineGBContext body;
  260. } *cmd;
  261. if (likely(res->id != -1))
  262. return 0;
  263. ret = vmw_resource_alloc_id(res);
  264. if (unlikely(ret != 0)) {
  265. DRM_ERROR("Failed to allocate a context id.\n");
  266. goto out_no_id;
  267. }
  268. if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
  269. ret = -EBUSY;
  270. goto out_no_fifo;
  271. }
  272. cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
  273. if (unlikely(cmd == NULL)) {
  274. ret = -ENOMEM;
  275. goto out_no_fifo;
  276. }
  277. cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
  278. cmd->header.size = sizeof(cmd->body);
  279. cmd->body.cid = res->id;
  280. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  281. vmw_fifo_resource_inc(dev_priv);
  282. return 0;
  283. out_no_fifo:
  284. vmw_resource_release_id(res);
  285. out_no_id:
  286. return ret;
  287. }
  288. static int vmw_gb_context_bind(struct vmw_resource *res,
  289. struct ttm_validate_buffer *val_buf)
  290. {
  291. struct vmw_private *dev_priv = res->dev_priv;
  292. struct {
  293. SVGA3dCmdHeader header;
  294. SVGA3dCmdBindGBContext body;
  295. } *cmd;
  296. struct ttm_buffer_object *bo = val_buf->bo;
  297. BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
  298. cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
  299. if (unlikely(cmd == NULL))
  300. return -ENOMEM;
  301. cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
  302. cmd->header.size = sizeof(cmd->body);
  303. cmd->body.cid = res->id;
  304. cmd->body.mobid = bo->mem.start;
  305. cmd->body.validContents = res->backup_dirty;
  306. res->backup_dirty = false;
  307. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  308. return 0;
  309. }
  310. static int vmw_gb_context_unbind(struct vmw_resource *res,
  311. bool readback,
  312. struct ttm_validate_buffer *val_buf)
  313. {
  314. struct vmw_private *dev_priv = res->dev_priv;
  315. struct ttm_buffer_object *bo = val_buf->bo;
  316. struct vmw_fence_obj *fence;
  317. struct vmw_user_context *uctx =
  318. container_of(res, struct vmw_user_context, res);
  319. struct {
  320. SVGA3dCmdHeader header;
  321. SVGA3dCmdReadbackGBContext body;
  322. } *cmd1;
  323. struct {
  324. SVGA3dCmdHeader header;
  325. SVGA3dCmdBindGBContext body;
  326. } *cmd2;
  327. uint32_t submit_size;
  328. uint8_t *cmd;
  329. BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
  330. mutex_lock(&dev_priv->binding_mutex);
  331. vmw_binding_state_scrub(uctx->cbs);
  332. submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
  333. cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
  334. if (unlikely(cmd == NULL)) {
  335. mutex_unlock(&dev_priv->binding_mutex);
  336. return -ENOMEM;
  337. }
  338. cmd2 = (void *) cmd;
  339. if (readback) {
  340. cmd1 = (void *) cmd;
  341. cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
  342. cmd1->header.size = sizeof(cmd1->body);
  343. cmd1->body.cid = res->id;
  344. cmd2 = (void *) (&cmd1[1]);
  345. }
  346. cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
  347. cmd2->header.size = sizeof(cmd2->body);
  348. cmd2->body.cid = res->id;
  349. cmd2->body.mobid = SVGA3D_INVALID_ID;
  350. vmw_fifo_commit(dev_priv, submit_size);
  351. mutex_unlock(&dev_priv->binding_mutex);
  352. /*
  353. * Create a fence object and fence the backup buffer.
  354. */
  355. (void) vmw_execbuf_fence_commands(NULL, dev_priv,
  356. &fence, NULL);
  357. vmw_bo_fence_single(bo, fence);
  358. if (likely(fence != NULL))
  359. vmw_fence_obj_unreference(&fence);
  360. return 0;
  361. }
  362. static int vmw_gb_context_destroy(struct vmw_resource *res)
  363. {
  364. struct vmw_private *dev_priv = res->dev_priv;
  365. struct {
  366. SVGA3dCmdHeader header;
  367. SVGA3dCmdDestroyGBContext body;
  368. } *cmd;
  369. if (likely(res->id == -1))
  370. return 0;
  371. cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
  372. if (unlikely(cmd == NULL))
  373. return -ENOMEM;
  374. cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
  375. cmd->header.size = sizeof(cmd->body);
  376. cmd->body.cid = res->id;
  377. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  378. if (dev_priv->query_cid == res->id)
  379. dev_priv->query_cid_valid = false;
  380. vmw_resource_release_id(res);
  381. vmw_fifo_resource_dec(dev_priv);
  382. return 0;
  383. }
  384. /*
  385. * DX context.
  386. */
  387. static int vmw_dx_context_create(struct vmw_resource *res)
  388. {
  389. struct vmw_private *dev_priv = res->dev_priv;
  390. int ret;
  391. struct {
  392. SVGA3dCmdHeader header;
  393. SVGA3dCmdDXDefineContext body;
  394. } *cmd;
  395. if (likely(res->id != -1))
  396. return 0;
  397. ret = vmw_resource_alloc_id(res);
  398. if (unlikely(ret != 0)) {
  399. DRM_ERROR("Failed to allocate a context id.\n");
  400. goto out_no_id;
  401. }
  402. if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
  403. ret = -EBUSY;
  404. goto out_no_fifo;
  405. }
  406. cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
  407. if (unlikely(cmd == NULL)) {
  408. ret = -ENOMEM;
  409. goto out_no_fifo;
  410. }
  411. cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
  412. cmd->header.size = sizeof(cmd->body);
  413. cmd->body.cid = res->id;
  414. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  415. vmw_fifo_resource_inc(dev_priv);
  416. return 0;
  417. out_no_fifo:
  418. vmw_resource_release_id(res);
  419. out_no_id:
  420. return ret;
  421. }
  422. static int vmw_dx_context_bind(struct vmw_resource *res,
  423. struct ttm_validate_buffer *val_buf)
  424. {
  425. struct vmw_private *dev_priv = res->dev_priv;
  426. struct {
  427. SVGA3dCmdHeader header;
  428. SVGA3dCmdDXBindContext body;
  429. } *cmd;
  430. struct ttm_buffer_object *bo = val_buf->bo;
  431. BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
  432. cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
  433. if (unlikely(cmd == NULL))
  434. return -ENOMEM;
  435. cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
  436. cmd->header.size = sizeof(cmd->body);
  437. cmd->body.cid = res->id;
  438. cmd->body.mobid = bo->mem.start;
  439. cmd->body.validContents = res->backup_dirty;
  440. res->backup_dirty = false;
  441. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  442. return 0;
  443. }
  444. /**
  445. * vmw_dx_context_scrub_cotables - Scrub all bindings and
  446. * cotables from a context
  447. *
  448. * @ctx: Pointer to the context resource
  449. * @readback: Whether to save the otable contents on scrubbing.
  450. *
  451. * COtables must be unbound before their context, but unbinding requires
  452. * the backup buffer being reserved, whereas scrubbing does not.
  453. * This function scrubs all cotables of a context, potentially reading back
  454. * the contents into their backup buffers. However, scrubbing cotables
  455. * also makes the device context invalid, so scrub all bindings first so
  456. * that doesn't have to be done later with an invalid context.
  457. */
  458. void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
  459. bool readback)
  460. {
  461. struct vmw_user_context *uctx =
  462. container_of(ctx, struct vmw_user_context, res);
  463. u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
  464. SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
  465. int i;
  466. vmw_binding_state_scrub(uctx->cbs);
  467. for (i = 0; i < cotable_max; ++i) {
  468. struct vmw_resource *res;
  469. /* Avoid racing with ongoing cotable destruction. */
  470. spin_lock(&uctx->cotable_lock);
  471. res = uctx->cotables[vmw_cotable_scrub_order[i]];
  472. if (res)
  473. res = vmw_resource_reference_unless_doomed(res);
  474. spin_unlock(&uctx->cotable_lock);
  475. if (!res)
  476. continue;
  477. WARN_ON(vmw_cotable_scrub(res, readback));
  478. vmw_resource_unreference(&res);
  479. }
  480. }
  481. static int vmw_dx_context_unbind(struct vmw_resource *res,
  482. bool readback,
  483. struct ttm_validate_buffer *val_buf)
  484. {
  485. struct vmw_private *dev_priv = res->dev_priv;
  486. struct ttm_buffer_object *bo = val_buf->bo;
  487. struct vmw_fence_obj *fence;
  488. struct vmw_user_context *uctx =
  489. container_of(res, struct vmw_user_context, res);
  490. struct {
  491. SVGA3dCmdHeader header;
  492. SVGA3dCmdDXReadbackContext body;
  493. } *cmd1;
  494. struct {
  495. SVGA3dCmdHeader header;
  496. SVGA3dCmdDXBindContext body;
  497. } *cmd2;
  498. uint32_t submit_size;
  499. uint8_t *cmd;
  500. BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
  501. mutex_lock(&dev_priv->binding_mutex);
  502. vmw_dx_context_scrub_cotables(res, readback);
  503. if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
  504. readback) {
  505. WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
  506. if (vmw_query_readback_all(uctx->dx_query_mob))
  507. DRM_ERROR("Failed to read back query states\n");
  508. }
  509. submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
  510. cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
  511. if (unlikely(cmd == NULL)) {
  512. mutex_unlock(&dev_priv->binding_mutex);
  513. return -ENOMEM;
  514. }
  515. cmd2 = (void *) cmd;
  516. if (readback) {
  517. cmd1 = (void *) cmd;
  518. cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
  519. cmd1->header.size = sizeof(cmd1->body);
  520. cmd1->body.cid = res->id;
  521. cmd2 = (void *) (&cmd1[1]);
  522. }
  523. cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
  524. cmd2->header.size = sizeof(cmd2->body);
  525. cmd2->body.cid = res->id;
  526. cmd2->body.mobid = SVGA3D_INVALID_ID;
  527. vmw_fifo_commit(dev_priv, submit_size);
  528. mutex_unlock(&dev_priv->binding_mutex);
  529. /*
  530. * Create a fence object and fence the backup buffer.
  531. */
  532. (void) vmw_execbuf_fence_commands(NULL, dev_priv,
  533. &fence, NULL);
  534. vmw_bo_fence_single(bo, fence);
  535. if (likely(fence != NULL))
  536. vmw_fence_obj_unreference(&fence);
  537. return 0;
  538. }
  539. static int vmw_dx_context_destroy(struct vmw_resource *res)
  540. {
  541. struct vmw_private *dev_priv = res->dev_priv;
  542. struct {
  543. SVGA3dCmdHeader header;
  544. SVGA3dCmdDXDestroyContext body;
  545. } *cmd;
  546. if (likely(res->id == -1))
  547. return 0;
  548. cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
  549. if (unlikely(cmd == NULL))
  550. return -ENOMEM;
  551. cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
  552. cmd->header.size = sizeof(cmd->body);
  553. cmd->body.cid = res->id;
  554. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  555. if (dev_priv->query_cid == res->id)
  556. dev_priv->query_cid_valid = false;
  557. vmw_resource_release_id(res);
  558. vmw_fifo_resource_dec(dev_priv);
  559. return 0;
  560. }
  561. /**
  562. * User-space context management:
  563. */
  564. static struct vmw_resource *
  565. vmw_user_context_base_to_res(struct ttm_base_object *base)
  566. {
  567. return &(container_of(base, struct vmw_user_context, base)->res);
  568. }
  569. static void vmw_user_context_free(struct vmw_resource *res)
  570. {
  571. struct vmw_user_context *ctx =
  572. container_of(res, struct vmw_user_context, res);
  573. struct vmw_private *dev_priv = res->dev_priv;
  574. if (ctx->cbs)
  575. vmw_binding_state_free(ctx->cbs);
  576. (void) vmw_context_bind_dx_query(res, NULL);
  577. ttm_base_object_kfree(ctx, base);
  578. ttm_mem_global_free(vmw_mem_glob(dev_priv),
  579. vmw_user_context_size);
  580. }
  581. /**
  582. * This function is called when user space has no more references on the
  583. * base object. It releases the base-object's reference on the resource object.
  584. */
  585. static void vmw_user_context_base_release(struct ttm_base_object **p_base)
  586. {
  587. struct ttm_base_object *base = *p_base;
  588. struct vmw_user_context *ctx =
  589. container_of(base, struct vmw_user_context, base);
  590. struct vmw_resource *res = &ctx->res;
  591. *p_base = NULL;
  592. vmw_resource_unreference(&res);
  593. }
  594. int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
  595. struct drm_file *file_priv)
  596. {
  597. struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
  598. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  599. return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
  600. }
  601. static int vmw_context_define(struct drm_device *dev, void *data,
  602. struct drm_file *file_priv, bool dx)
  603. {
  604. struct vmw_private *dev_priv = vmw_priv(dev);
  605. struct vmw_user_context *ctx;
  606. struct vmw_resource *res;
  607. struct vmw_resource *tmp;
  608. struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
  609. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  610. struct ttm_operation_ctx ttm_opt_ctx = {
  611. .interruptible = true,
  612. .no_wait_gpu = false
  613. };
  614. int ret;
  615. if (!has_sm4_context(dev_priv) && dx) {
  616. VMW_DEBUG_USER("DX contexts not supported by device.\n");
  617. return -EINVAL;
  618. }
  619. if (unlikely(vmw_user_context_size == 0))
  620. vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) +
  621. ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) +
  622. + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
  623. ret = ttm_read_lock(&dev_priv->reservation_sem, true);
  624. if (unlikely(ret != 0))
  625. return ret;
  626. ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
  627. vmw_user_context_size,
  628. &ttm_opt_ctx);
  629. if (unlikely(ret != 0)) {
  630. if (ret != -ERESTARTSYS)
  631. DRM_ERROR("Out of graphics memory for context"
  632. " creation.\n");
  633. goto out_unlock;
  634. }
  635. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  636. if (unlikely(!ctx)) {
  637. ttm_mem_global_free(vmw_mem_glob(dev_priv),
  638. vmw_user_context_size);
  639. ret = -ENOMEM;
  640. goto out_unlock;
  641. }
  642. res = &ctx->res;
  643. ctx->base.shareable = false;
  644. ctx->base.tfile = NULL;
  645. /*
  646. * From here on, the destructor takes over resource freeing.
  647. */
  648. ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
  649. if (unlikely(ret != 0))
  650. goto out_unlock;
  651. tmp = vmw_resource_reference(&ctx->res);
  652. ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
  653. &vmw_user_context_base_release, NULL);
  654. if (unlikely(ret != 0)) {
  655. vmw_resource_unreference(&tmp);
  656. goto out_err;
  657. }
  658. arg->cid = ctx->base.handle;
  659. out_err:
  660. vmw_resource_unreference(&res);
  661. out_unlock:
  662. ttm_read_unlock(&dev_priv->reservation_sem);
  663. return ret;
  664. }
  665. int vmw_context_define_ioctl(struct drm_device *dev, void *data,
  666. struct drm_file *file_priv)
  667. {
  668. return vmw_context_define(dev, data, file_priv, false);
  669. }
  670. int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
  671. struct drm_file *file_priv)
  672. {
  673. union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
  674. struct drm_vmw_context_arg *rep = &arg->rep;
  675. switch (arg->req) {
  676. case drm_vmw_context_legacy:
  677. return vmw_context_define(dev, rep, file_priv, false);
  678. case drm_vmw_context_dx:
  679. return vmw_context_define(dev, rep, file_priv, true);
  680. default:
  681. break;
  682. }
  683. return -EINVAL;
  684. }
  685. /**
  686. * vmw_context_binding_list - Return a list of context bindings
  687. *
  688. * @ctx: The context resource
  689. *
  690. * Returns the current list of bindings of the given context. Note that
  691. * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
  692. */
  693. struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
  694. {
  695. struct vmw_user_context *uctx =
  696. container_of(ctx, struct vmw_user_context, res);
  697. return vmw_binding_state_list(uctx->cbs);
  698. }
  699. struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
  700. {
  701. return container_of(ctx, struct vmw_user_context, res)->man;
  702. }
  703. struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
  704. SVGACOTableType cotable_type)
  705. {
  706. u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
  707. SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
  708. if (cotable_type >= cotable_max)
  709. return ERR_PTR(-EINVAL);
  710. return container_of(ctx, struct vmw_user_context, res)->
  711. cotables[cotable_type];
  712. }
  713. /**
  714. * vmw_context_binding_state -
  715. * Return a pointer to a context binding state structure
  716. *
  717. * @ctx: The context resource
  718. *
  719. * Returns the current state of bindings of the given context. Note that
  720. * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
  721. */
  722. struct vmw_ctx_binding_state *
  723. vmw_context_binding_state(struct vmw_resource *ctx)
  724. {
  725. return container_of(ctx, struct vmw_user_context, res)->cbs;
  726. }
  727. /**
  728. * vmw_context_bind_dx_query -
  729. * Sets query MOB for the context. If @mob is NULL, then this function will
  730. * remove the association between the MOB and the context. This function
  731. * assumes the binding_mutex is held.
  732. *
  733. * @ctx_res: The context resource
  734. * @mob: a reference to the query MOB
  735. *
  736. * Returns -EINVAL if a MOB has already been set and does not match the one
  737. * specified in the parameter. 0 otherwise.
  738. */
  739. int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
  740. struct vmw_buffer_object *mob)
  741. {
  742. struct vmw_user_context *uctx =
  743. container_of(ctx_res, struct vmw_user_context, res);
  744. if (mob == NULL) {
  745. if (uctx->dx_query_mob) {
  746. uctx->dx_query_mob->dx_query_ctx = NULL;
  747. vmw_bo_unreference(&uctx->dx_query_mob);
  748. uctx->dx_query_mob = NULL;
  749. }
  750. return 0;
  751. }
  752. /* Can only have one MOB per context for queries */
  753. if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
  754. return -EINVAL;
  755. mob->dx_query_ctx = ctx_res;
  756. if (!uctx->dx_query_mob)
  757. uctx->dx_query_mob = vmw_bo_reference(mob);
  758. return 0;
  759. }
  760. /**
  761. * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
  762. *
  763. * @ctx_res: The context resource
  764. */
  765. struct vmw_buffer_object *
  766. vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
  767. {
  768. struct vmw_user_context *uctx =
  769. container_of(ctx_res, struct vmw_user_context, res);
  770. return uctx->dx_query_mob;
  771. }