vmwgfx_binding.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463
  1. // SPDX-License-Identifier: GPL-2.0 OR MIT
  2. /**************************************************************************
  3. *
  4. * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. /*
  28. * This file implements the vmwgfx context binding manager,
  29. * The sole reason for having to use this code is that vmware guest
  30. * backed contexts can be swapped out to their backing mobs by the device
  31. * at any time, also swapped in at any time. At swapin time, the device
  32. * validates the context bindings to make sure they point to valid resources.
  33. * It's this outside-of-drawcall validation (that can happen at any time),
  34. * that makes this code necessary.
  35. *
  36. * We therefore need to kill any context bindings pointing to a resource
  37. * when the resource is swapped out. Furthermore, if the vmwgfx driver has
  38. * swapped out the context we can't swap it in again to kill bindings because
  39. * of backing mob reservation lockdep violations, so as part of
  40. * context swapout, also kill all bindings of a context, so that they are
  41. * already killed if a resource to which a binding points
  42. * needs to be swapped out.
  43. *
  44. * Note that a resource can be pointed to by bindings from multiple contexts,
  45. * Therefore we can't easily protect this data by a per context mutex
  46. * (unless we use deadlock-safe WW mutexes). So we use a global binding_mutex
  47. * to protect all binding manager data.
  48. *
  49. * Finally, any association between a context and a global resource
  50. * (surface, shader or even DX query) is conceptually a context binding that
  51. * needs to be tracked by this code.
  52. */
  53. #include "vmwgfx_drv.h"
  54. #include "vmwgfx_binding.h"
  55. #include "device_include/svga3d_reg.h"
  56. #define VMW_BINDING_RT_BIT 0
  57. #define VMW_BINDING_PS_BIT 1
  58. #define VMW_BINDING_SO_T_BIT 2
  59. #define VMW_BINDING_VB_BIT 3
  60. #define VMW_BINDING_UAV_BIT 4
  61. #define VMW_BINDING_CS_UAV_BIT 5
  62. #define VMW_BINDING_NUM_BITS 6
  63. #define VMW_BINDING_PS_SR_BIT 0
  64. /**
  65. * struct vmw_ctx_binding_state - per context binding state
  66. *
  67. * @dev_priv: Pointer to device private structure.
  68. * @list: linked list of individual active bindings.
  69. * @render_targets: Render target bindings.
  70. * @texture_units: Texture units bindings.
  71. * @ds_view: Depth-stencil view binding.
  72. * @so_targets: StreamOutput target bindings.
  73. * @vertex_buffers: Vertex buffer bindings.
  74. * @index_buffer: Index buffer binding.
  75. * @per_shader: Per shader-type bindings.
  76. * @ua_views: UAV bindings.
  77. * @so_state: StreamOutput bindings.
  78. * @dirty: Bitmap tracking per binding-type changes that have not yet
  79. * been emitted to the device.
  80. * @dirty_vb: Bitmap tracking individual vertex buffer binding changes that
  81. * have not yet been emitted to the device.
  82. * @bind_cmd_buffer: Scratch space used to construct binding commands.
  83. * @bind_cmd_count: Number of binding command data entries in @bind_cmd_buffer
  84. * @bind_first_slot: Used together with @bind_cmd_buffer to indicate the
  85. * device binding slot of the first command data entry in @bind_cmd_buffer.
  86. *
  87. * Note that this structure also provides storage space for the individual
  88. * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
  89. * for individual bindings.
  90. *
  91. */
  92. struct vmw_ctx_binding_state {
  93. struct vmw_private *dev_priv;
  94. struct list_head list;
  95. struct vmw_ctx_bindinfo_view render_targets[SVGA3D_RT_MAX];
  96. struct vmw_ctx_bindinfo_tex texture_units[SVGA3D_NUM_TEXTURE_UNITS];
  97. struct vmw_ctx_bindinfo_view ds_view;
  98. struct vmw_ctx_bindinfo_so_target so_targets[SVGA3D_DX_MAX_SOTARGETS];
  99. struct vmw_ctx_bindinfo_vb vertex_buffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
  100. struct vmw_ctx_bindinfo_ib index_buffer;
  101. struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE];
  102. struct vmw_ctx_bindinfo_uav ua_views[VMW_MAX_UAV_BIND_TYPE];
  103. struct vmw_ctx_bindinfo_so so_state;
  104. unsigned long dirty;
  105. DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS);
  106. u32 bind_cmd_buffer[VMW_MAX_VIEW_BINDINGS];
  107. u32 bind_cmd_count;
  108. u32 bind_first_slot;
  109. };
  110. static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
  111. static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
  112. bool rebind);
  113. static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
  114. static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind);
  115. static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind);
  116. static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind);
  117. static int vmw_binding_scrub_so_target(struct vmw_ctx_bindinfo *bi, bool rebind);
  118. static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs);
  119. static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi,
  120. bool rebind);
  121. static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind);
  122. static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind);
  123. static int vmw_binding_scrub_uav(struct vmw_ctx_bindinfo *bi, bool rebind);
  124. static int vmw_binding_scrub_cs_uav(struct vmw_ctx_bindinfo *bi, bool rebind);
  125. static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind);
  126. static void vmw_binding_build_asserts(void) __attribute__ ((unused));
  127. typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
  128. /**
  129. * struct vmw_binding_info - Per binding type information for the binding
  130. * manager
  131. *
  132. * @size: The size of the struct binding derived from a struct vmw_ctx_bindinfo.
  133. * @offsets: array[shader_slot] of offsets to the array[slot]
  134. * of struct bindings for the binding type.
  135. * @scrub_func: Pointer to the scrub function for this binding type.
  136. *
  137. * Holds static information to help optimize the binding manager and avoid
  138. * an excessive amount of switch statements.
  139. */
  140. struct vmw_binding_info {
  141. size_t size;
  142. const size_t *offsets;
  143. vmw_scrub_func scrub_func;
  144. };
  145. /*
  146. * A number of static variables that help determine the scrub func and the
  147. * location of the struct vmw_ctx_bindinfo slots for each binding type.
  148. */
  149. static const size_t vmw_binding_shader_offsets[] = {
  150. offsetof(struct vmw_ctx_binding_state, per_shader[0].shader),
  151. offsetof(struct vmw_ctx_binding_state, per_shader[1].shader),
  152. offsetof(struct vmw_ctx_binding_state, per_shader[2].shader),
  153. offsetof(struct vmw_ctx_binding_state, per_shader[3].shader),
  154. offsetof(struct vmw_ctx_binding_state, per_shader[4].shader),
  155. offsetof(struct vmw_ctx_binding_state, per_shader[5].shader),
  156. };
  157. static const size_t vmw_binding_rt_offsets[] = {
  158. offsetof(struct vmw_ctx_binding_state, render_targets),
  159. };
  160. static const size_t vmw_binding_tex_offsets[] = {
  161. offsetof(struct vmw_ctx_binding_state, texture_units),
  162. };
  163. static const size_t vmw_binding_cb_offsets[] = {
  164. offsetof(struct vmw_ctx_binding_state, per_shader[0].const_buffers),
  165. offsetof(struct vmw_ctx_binding_state, per_shader[1].const_buffers),
  166. offsetof(struct vmw_ctx_binding_state, per_shader[2].const_buffers),
  167. offsetof(struct vmw_ctx_binding_state, per_shader[3].const_buffers),
  168. offsetof(struct vmw_ctx_binding_state, per_shader[4].const_buffers),
  169. offsetof(struct vmw_ctx_binding_state, per_shader[5].const_buffers),
  170. };
  171. static const size_t vmw_binding_dx_ds_offsets[] = {
  172. offsetof(struct vmw_ctx_binding_state, ds_view),
  173. };
  174. static const size_t vmw_binding_sr_offsets[] = {
  175. offsetof(struct vmw_ctx_binding_state, per_shader[0].shader_res),
  176. offsetof(struct vmw_ctx_binding_state, per_shader[1].shader_res),
  177. offsetof(struct vmw_ctx_binding_state, per_shader[2].shader_res),
  178. offsetof(struct vmw_ctx_binding_state, per_shader[3].shader_res),
  179. offsetof(struct vmw_ctx_binding_state, per_shader[4].shader_res),
  180. offsetof(struct vmw_ctx_binding_state, per_shader[5].shader_res),
  181. };
  182. static const size_t vmw_binding_so_target_offsets[] = {
  183. offsetof(struct vmw_ctx_binding_state, so_targets),
  184. };
  185. static const size_t vmw_binding_vb_offsets[] = {
  186. offsetof(struct vmw_ctx_binding_state, vertex_buffers),
  187. };
  188. static const size_t vmw_binding_ib_offsets[] = {
  189. offsetof(struct vmw_ctx_binding_state, index_buffer),
  190. };
  191. static const size_t vmw_binding_uav_offsets[] = {
  192. offsetof(struct vmw_ctx_binding_state, ua_views[0].views),
  193. };
  194. static const size_t vmw_binding_cs_uav_offsets[] = {
  195. offsetof(struct vmw_ctx_binding_state, ua_views[1].views),
  196. };
  197. static const size_t vmw_binding_so_offsets[] = {
  198. offsetof(struct vmw_ctx_binding_state, so_state),
  199. };
  200. static const struct vmw_binding_info vmw_binding_infos[] = {
  201. [vmw_ctx_binding_shader] = {
  202. .size = sizeof(struct vmw_ctx_bindinfo_shader),
  203. .offsets = vmw_binding_shader_offsets,
  204. .scrub_func = vmw_binding_scrub_shader},
  205. [vmw_ctx_binding_rt] = {
  206. .size = sizeof(struct vmw_ctx_bindinfo_view),
  207. .offsets = vmw_binding_rt_offsets,
  208. .scrub_func = vmw_binding_scrub_render_target},
  209. [vmw_ctx_binding_tex] = {
  210. .size = sizeof(struct vmw_ctx_bindinfo_tex),
  211. .offsets = vmw_binding_tex_offsets,
  212. .scrub_func = vmw_binding_scrub_texture},
  213. [vmw_ctx_binding_cb] = {
  214. .size = sizeof(struct vmw_ctx_bindinfo_cb),
  215. .offsets = vmw_binding_cb_offsets,
  216. .scrub_func = vmw_binding_scrub_cb},
  217. [vmw_ctx_binding_dx_shader] = {
  218. .size = sizeof(struct vmw_ctx_bindinfo_shader),
  219. .offsets = vmw_binding_shader_offsets,
  220. .scrub_func = vmw_binding_scrub_dx_shader},
  221. [vmw_ctx_binding_dx_rt] = {
  222. .size = sizeof(struct vmw_ctx_bindinfo_view),
  223. .offsets = vmw_binding_rt_offsets,
  224. .scrub_func = vmw_binding_scrub_dx_rt},
  225. [vmw_ctx_binding_sr] = {
  226. .size = sizeof(struct vmw_ctx_bindinfo_view),
  227. .offsets = vmw_binding_sr_offsets,
  228. .scrub_func = vmw_binding_scrub_sr},
  229. [vmw_ctx_binding_ds] = {
  230. .size = sizeof(struct vmw_ctx_bindinfo_view),
  231. .offsets = vmw_binding_dx_ds_offsets,
  232. .scrub_func = vmw_binding_scrub_dx_rt},
  233. [vmw_ctx_binding_so_target] = {
  234. .size = sizeof(struct vmw_ctx_bindinfo_so_target),
  235. .offsets = vmw_binding_so_target_offsets,
  236. .scrub_func = vmw_binding_scrub_so_target},
  237. [vmw_ctx_binding_vb] = {
  238. .size = sizeof(struct vmw_ctx_bindinfo_vb),
  239. .offsets = vmw_binding_vb_offsets,
  240. .scrub_func = vmw_binding_scrub_vb},
  241. [vmw_ctx_binding_ib] = {
  242. .size = sizeof(struct vmw_ctx_bindinfo_ib),
  243. .offsets = vmw_binding_ib_offsets,
  244. .scrub_func = vmw_binding_scrub_ib},
  245. [vmw_ctx_binding_uav] = {
  246. .size = sizeof(struct vmw_ctx_bindinfo_view),
  247. .offsets = vmw_binding_uav_offsets,
  248. .scrub_func = vmw_binding_scrub_uav},
  249. [vmw_ctx_binding_cs_uav] = {
  250. .size = sizeof(struct vmw_ctx_bindinfo_view),
  251. .offsets = vmw_binding_cs_uav_offsets,
  252. .scrub_func = vmw_binding_scrub_cs_uav},
  253. [vmw_ctx_binding_so] = {
  254. .size = sizeof(struct vmw_ctx_bindinfo_so),
  255. .offsets = vmw_binding_so_offsets,
  256. .scrub_func = vmw_binding_scrub_so},
  257. };
  258. /**
  259. * vmw_cbs_context - Return a pointer to the context resource of a
  260. * context binding state tracker.
  261. *
  262. * @cbs: The context binding state tracker.
  263. *
  264. * Provided there are any active bindings, this function will return an
  265. * unreferenced pointer to the context resource that owns the context
  266. * binding state tracker. If there are no active bindings, this function
  267. * will return NULL. Note that the caller must somehow ensure that a reference
  268. * is held on the context resource prior to calling this function.
  269. */
  270. static const struct vmw_resource *
  271. vmw_cbs_context(const struct vmw_ctx_binding_state *cbs)
  272. {
  273. if (list_empty(&cbs->list))
  274. return NULL;
  275. return list_first_entry(&cbs->list, struct vmw_ctx_bindinfo,
  276. ctx_list)->ctx;
  277. }
  278. /**
  279. * vmw_binding_loc - determine the struct vmw_ctx_bindinfo slot location.
  280. *
  281. * @cbs: Pointer to a struct vmw_ctx_binding state which holds the slot.
  282. * @bt: The binding type.
  283. * @shader_slot: The shader slot of the binding. If none, then set to 0.
  284. * @slot: The slot of the binding.
  285. */
  286. static struct vmw_ctx_bindinfo *
  287. vmw_binding_loc(struct vmw_ctx_binding_state *cbs,
  288. enum vmw_ctx_binding_type bt, u32 shader_slot, u32 slot)
  289. {
  290. const struct vmw_binding_info *b = &vmw_binding_infos[bt];
  291. size_t offset = b->offsets[shader_slot] + b->size*slot;
  292. return (struct vmw_ctx_bindinfo *)((u8 *) cbs + offset);
  293. }
  294. /**
  295. * vmw_binding_drop: Stop tracking a context binding
  296. *
  297. * @bi: Pointer to binding tracker storage.
  298. *
  299. * Stops tracking a context binding, and re-initializes its storage.
  300. * Typically used when the context binding is replaced with a binding to
  301. * another (or the same, for that matter) resource.
  302. */
  303. static void vmw_binding_drop(struct vmw_ctx_bindinfo *bi)
  304. {
  305. list_del(&bi->ctx_list);
  306. if (!list_empty(&bi->res_list))
  307. list_del(&bi->res_list);
  308. bi->ctx = NULL;
  309. }
  310. /**
  311. * vmw_binding_add: Start tracking a context binding
  312. *
  313. * @cbs: Pointer to the context binding state tracker.
  314. * @bi: Information about the binding to track.
  315. *
  316. * Starts tracking the binding in the context binding
  317. * state structure @cbs.
  318. */
  319. void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
  320. const struct vmw_ctx_bindinfo *bi,
  321. u32 shader_slot, u32 slot)
  322. {
  323. struct vmw_ctx_bindinfo *loc =
  324. vmw_binding_loc(cbs, bi->bt, shader_slot, slot);
  325. const struct vmw_binding_info *b = &vmw_binding_infos[bi->bt];
  326. if (loc->ctx != NULL)
  327. vmw_binding_drop(loc);
  328. memcpy(loc, bi, b->size);
  329. loc->scrubbed = false;
  330. list_add(&loc->ctx_list, &cbs->list);
  331. INIT_LIST_HEAD(&loc->res_list);
  332. }
  333. /**
  334. * vmw_binding_add_uav_index - Add UAV index for tracking.
  335. * @cbs: Pointer to the context binding state tracker.
  336. * @slot: UAV type to which bind this index.
  337. * @index: The splice index to track.
  338. */
  339. void vmw_binding_add_uav_index(struct vmw_ctx_binding_state *cbs, uint32 slot,
  340. uint32 index)
  341. {
  342. cbs->ua_views[slot].index = index;
  343. }
  344. /**
  345. * vmw_binding_transfer: Transfer a context binding tracking entry.
  346. *
  347. * @cbs: Pointer to the persistent context binding state tracker.
  348. * @bi: Information about the binding to track.
  349. *
  350. */
  351. static void vmw_binding_transfer(struct vmw_ctx_binding_state *cbs,
  352. const struct vmw_ctx_binding_state *from,
  353. const struct vmw_ctx_bindinfo *bi)
  354. {
  355. size_t offset = (unsigned long)bi - (unsigned long)from;
  356. struct vmw_ctx_bindinfo *loc = (struct vmw_ctx_bindinfo *)
  357. ((unsigned long) cbs + offset);
  358. if (loc->ctx != NULL) {
  359. WARN_ON(bi->scrubbed);
  360. vmw_binding_drop(loc);
  361. }
  362. if (bi->res != NULL) {
  363. memcpy(loc, bi, vmw_binding_infos[bi->bt].size);
  364. list_add_tail(&loc->ctx_list, &cbs->list);
  365. list_add_tail(&loc->res_list, &loc->res->binding_head);
  366. }
  367. }
  368. /**
  369. * vmw_binding_state_kill - Kill all bindings associated with a
  370. * struct vmw_ctx_binding state structure, and re-initialize the structure.
  371. *
  372. * @cbs: Pointer to the context binding state tracker.
  373. *
  374. * Emits commands to scrub all bindings associated with the
  375. * context binding state tracker. Then re-initializes the whole structure.
  376. */
  377. void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs)
  378. {
  379. struct vmw_ctx_bindinfo *entry, *next;
  380. vmw_binding_state_scrub(cbs);
  381. list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
  382. vmw_binding_drop(entry);
  383. }
  384. /**
  385. * vmw_binding_state_scrub - Scrub all bindings associated with a
  386. * struct vmw_ctx_binding state structure.
  387. *
  388. * @cbs: Pointer to the context binding state tracker.
  389. *
  390. * Emits commands to scrub all bindings associated with the
  391. * context binding state tracker.
  392. */
  393. void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
  394. {
  395. struct vmw_ctx_bindinfo *entry;
  396. list_for_each_entry(entry, &cbs->list, ctx_list) {
  397. if (!entry->scrubbed) {
  398. (void) vmw_binding_infos[entry->bt].scrub_func
  399. (entry, false);
  400. entry->scrubbed = true;
  401. }
  402. }
  403. (void) vmw_binding_emit_dirty(cbs);
  404. }
  405. /**
  406. * vmw_binding_res_list_kill - Kill all bindings on a
  407. * resource binding list
  408. *
  409. * @head: list head of resource binding list
  410. *
  411. * Kills all bindings associated with a specific resource. Typically
  412. * called before the resource is destroyed.
  413. */
  414. void vmw_binding_res_list_kill(struct list_head *head)
  415. {
  416. struct vmw_ctx_bindinfo *entry, *next;
  417. vmw_binding_res_list_scrub(head);
  418. list_for_each_entry_safe(entry, next, head, res_list)
  419. vmw_binding_drop(entry);
  420. }
  421. /**
  422. * vmw_binding_res_list_scrub - Scrub all bindings on a
  423. * resource binding list
  424. *
  425. * @head: list head of resource binding list
  426. *
  427. * Scrub all bindings associated with a specific resource. Typically
  428. * called before the resource is evicted.
  429. */
  430. void vmw_binding_res_list_scrub(struct list_head *head)
  431. {
  432. struct vmw_ctx_bindinfo *entry;
  433. list_for_each_entry(entry, head, res_list) {
  434. if (!entry->scrubbed) {
  435. (void) vmw_binding_infos[entry->bt].scrub_func
  436. (entry, false);
  437. entry->scrubbed = true;
  438. }
  439. }
  440. list_for_each_entry(entry, head, res_list) {
  441. struct vmw_ctx_binding_state *cbs =
  442. vmw_context_binding_state(entry->ctx);
  443. (void) vmw_binding_emit_dirty(cbs);
  444. }
  445. }
  446. /**
  447. * vmw_binding_state_commit - Commit staged binding info
  448. *
  449. * @ctx: Pointer to context to commit the staged binding info to.
  450. * @from: Staged binding info built during execbuf.
  451. * @scrubbed: Transfer only scrubbed bindings.
  452. *
  453. * Transfers binding info from a temporary structure
  454. * (typically used by execbuf) to the persistent
  455. * structure in the context. This can be done once commands have been
  456. * submitted to hardware
  457. */
  458. void vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
  459. struct vmw_ctx_binding_state *from)
  460. {
  461. struct vmw_ctx_bindinfo *entry, *next;
  462. list_for_each_entry_safe(entry, next, &from->list, ctx_list) {
  463. vmw_binding_transfer(to, from, entry);
  464. vmw_binding_drop(entry);
  465. }
  466. /* Also transfer uav splice indices */
  467. to->ua_views[0].index = from->ua_views[0].index;
  468. to->ua_views[1].index = from->ua_views[1].index;
  469. }
  470. /**
  471. * vmw_binding_rebind_all - Rebind all scrubbed bindings of a context
  472. *
  473. * @ctx: The context resource
  474. *
  475. * Walks through the context binding list and rebinds all scrubbed
  476. * resources.
  477. */
  478. int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs)
  479. {
  480. struct vmw_ctx_bindinfo *entry;
  481. int ret;
  482. list_for_each_entry(entry, &cbs->list, ctx_list) {
  483. if (likely(!entry->scrubbed))
  484. continue;
  485. if ((entry->res == NULL || entry->res->id ==
  486. SVGA3D_INVALID_ID))
  487. continue;
  488. ret = vmw_binding_infos[entry->bt].scrub_func(entry, true);
  489. if (unlikely(ret != 0))
  490. return ret;
  491. entry->scrubbed = false;
  492. }
  493. return vmw_binding_emit_dirty(cbs);
  494. }
  495. /**
  496. * vmw_binding_scrub_shader - scrub a shader binding from a context.
  497. *
  498. * @bi: single binding information.
  499. * @rebind: Whether to issue a bind instead of scrub command.
  500. */
  501. static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
  502. {
  503. struct vmw_ctx_bindinfo_shader *binding =
  504. container_of(bi, typeof(*binding), bi);
  505. struct vmw_private *dev_priv = bi->ctx->dev_priv;
  506. struct {
  507. SVGA3dCmdHeader header;
  508. SVGA3dCmdSetShader body;
  509. } *cmd;
  510. cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
  511. if (unlikely(cmd == NULL))
  512. return -ENOMEM;
  513. cmd->header.id = SVGA_3D_CMD_SET_SHADER;
  514. cmd->header.size = sizeof(cmd->body);
  515. cmd->body.cid = bi->ctx->id;
  516. cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
  517. cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
  518. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  519. return 0;
  520. }
  521. /**
  522. * vmw_binding_scrub_render_target - scrub a render target binding
  523. * from a context.
  524. *
  525. * @bi: single binding information.
  526. * @rebind: Whether to issue a bind instead of scrub command.
  527. */
  528. static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
  529. bool rebind)
  530. {
  531. struct vmw_ctx_bindinfo_view *binding =
  532. container_of(bi, typeof(*binding), bi);
  533. struct vmw_private *dev_priv = bi->ctx->dev_priv;
  534. struct {
  535. SVGA3dCmdHeader header;
  536. SVGA3dCmdSetRenderTarget body;
  537. } *cmd;
  538. cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
  539. if (unlikely(cmd == NULL))
  540. return -ENOMEM;
  541. cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
  542. cmd->header.size = sizeof(cmd->body);
  543. cmd->body.cid = bi->ctx->id;
  544. cmd->body.type = binding->slot;
  545. cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
  546. cmd->body.target.face = 0;
  547. cmd->body.target.mipmap = 0;
  548. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  549. return 0;
  550. }
  551. /**
  552. * vmw_binding_scrub_texture - scrub a texture binding from a context.
  553. *
  554. * @bi: single binding information.
  555. * @rebind: Whether to issue a bind instead of scrub command.
  556. *
  557. * TODO: Possibly complement this function with a function that takes
  558. * a list of texture bindings and combines them to a single command.
  559. */
  560. static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
  561. bool rebind)
  562. {
  563. struct vmw_ctx_bindinfo_tex *binding =
  564. container_of(bi, typeof(*binding), bi);
  565. struct vmw_private *dev_priv = bi->ctx->dev_priv;
  566. struct {
  567. SVGA3dCmdHeader header;
  568. struct {
  569. SVGA3dCmdSetTextureState c;
  570. SVGA3dTextureState s1;
  571. } body;
  572. } *cmd;
  573. cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
  574. if (unlikely(cmd == NULL))
  575. return -ENOMEM;
  576. cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
  577. cmd->header.size = sizeof(cmd->body);
  578. cmd->body.c.cid = bi->ctx->id;
  579. cmd->body.s1.stage = binding->texture_stage;
  580. cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
  581. cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
  582. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  583. return 0;
  584. }
  585. /**
  586. * vmw_binding_scrub_dx_shader - scrub a dx shader binding from a context.
  587. *
  588. * @bi: single binding information.
  589. * @rebind: Whether to issue a bind instead of scrub command.
  590. */
  591. static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
  592. {
  593. struct vmw_ctx_bindinfo_shader *binding =
  594. container_of(bi, typeof(*binding), bi);
  595. struct vmw_private *dev_priv = bi->ctx->dev_priv;
  596. struct {
  597. SVGA3dCmdHeader header;
  598. SVGA3dCmdDXSetShader body;
  599. } *cmd;
  600. cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
  601. if (unlikely(cmd == NULL))
  602. return -ENOMEM;
  603. cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER;
  604. cmd->header.size = sizeof(cmd->body);
  605. cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
  606. cmd->body.shaderId = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
  607. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  608. return 0;
  609. }
  610. /**
  611. * vmw_binding_scrub_cb - scrub a constant buffer binding from a context.
  612. *
  613. * @bi: single binding information.
  614. * @rebind: Whether to issue a bind instead of scrub command.
  615. */
  616. static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
  617. {
  618. struct vmw_ctx_bindinfo_cb *binding =
  619. container_of(bi, typeof(*binding), bi);
  620. struct vmw_private *dev_priv = bi->ctx->dev_priv;
  621. struct {
  622. SVGA3dCmdHeader header;
  623. SVGA3dCmdDXSetSingleConstantBuffer body;
  624. } *cmd;
  625. cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
  626. if (unlikely(cmd == NULL))
  627. return -ENOMEM;
  628. cmd->header.id = SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER;
  629. cmd->header.size = sizeof(cmd->body);
  630. cmd->body.slot = binding->slot;
  631. cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
  632. if (rebind) {
  633. cmd->body.offsetInBytes = binding->offset;
  634. cmd->body.sizeInBytes = binding->size;
  635. cmd->body.sid = bi->res->id;
  636. } else {
  637. cmd->body.offsetInBytes = 0;
  638. cmd->body.sizeInBytes = 0;
  639. cmd->body.sid = SVGA3D_INVALID_ID;
  640. }
  641. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  642. return 0;
  643. }
  644. /**
  645. * vmw_collect_view_ids - Build view id data for a view binding command
  646. * without checking which bindings actually need to be emitted
  647. *
  648. * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  649. * @bi: Pointer to where the binding info array is stored in @cbs
  650. * @max_num: Maximum number of entries in the @bi array.
  651. *
  652. * Scans the @bi array for bindings and builds a buffer of view id data.
  653. * Stops at the first non-existing binding in the @bi array.
  654. * On output, @cbs->bind_cmd_count contains the number of bindings to be
  655. * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
  656. * contains the command data.
  657. */
  658. static void vmw_collect_view_ids(struct vmw_ctx_binding_state *cbs,
  659. const struct vmw_ctx_bindinfo *bi,
  660. u32 max_num)
  661. {
  662. const struct vmw_ctx_bindinfo_view *biv =
  663. container_of(bi, struct vmw_ctx_bindinfo_view, bi);
  664. unsigned long i;
  665. cbs->bind_cmd_count = 0;
  666. cbs->bind_first_slot = 0;
  667. for (i = 0; i < max_num; ++i, ++biv) {
  668. if (!biv->bi.ctx)
  669. break;
  670. cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
  671. ((biv->bi.scrubbed) ?
  672. SVGA3D_INVALID_ID : biv->bi.res->id);
  673. }
  674. }
  675. /**
  676. * vmw_collect_dirty_view_ids - Build view id data for a view binding command
  677. *
  678. * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  679. * @bi: Pointer to where the binding info array is stored in @cbs
  680. * @dirty: Bitmap indicating which bindings need to be emitted.
  681. * @max_num: Maximum number of entries in the @bi array.
  682. *
  683. * Scans the @bi array for bindings that need to be emitted and
  684. * builds a buffer of view id data.
  685. * On output, @cbs->bind_cmd_count contains the number of bindings to be
  686. * emitted, @cbs->bind_first_slot indicates the index of the first emitted
  687. * binding, and @cbs->bind_cmd_buffer contains the command data.
  688. */
  689. static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs,
  690. const struct vmw_ctx_bindinfo *bi,
  691. unsigned long *dirty,
  692. u32 max_num)
  693. {
  694. const struct vmw_ctx_bindinfo_view *biv =
  695. container_of(bi, struct vmw_ctx_bindinfo_view, bi);
  696. unsigned long i, next_bit;
  697. cbs->bind_cmd_count = 0;
  698. i = find_first_bit(dirty, max_num);
  699. next_bit = i;
  700. cbs->bind_first_slot = i;
  701. biv += i;
  702. for (; i < max_num; ++i, ++biv) {
  703. cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
  704. ((!biv->bi.ctx || biv->bi.scrubbed) ?
  705. SVGA3D_INVALID_ID : biv->bi.res->id);
  706. if (next_bit == i) {
  707. next_bit = find_next_bit(dirty, max_num, i + 1);
  708. if (next_bit >= max_num)
  709. break;
  710. }
  711. }
  712. }
  713. /**
  714. * vmw_binding_emit_set_sr - Issue delayed DX shader resource binding commands
  715. *
  716. * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  717. */
  718. static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
  719. int shader_slot)
  720. {
  721. const struct vmw_ctx_bindinfo *loc =
  722. &cbs->per_shader[shader_slot].shader_res[0].bi;
  723. struct {
  724. SVGA3dCmdHeader header;
  725. SVGA3dCmdDXSetShaderResources body;
  726. } *cmd;
  727. size_t cmd_size, view_id_size;
  728. const struct vmw_resource *ctx = vmw_cbs_context(cbs);
  729. vmw_collect_dirty_view_ids(cbs, loc,
  730. cbs->per_shader[shader_slot].dirty_sr,
  731. SVGA3D_DX_MAX_SRVIEWS);
  732. if (cbs->bind_cmd_count == 0)
  733. return 0;
  734. view_id_size = cbs->bind_cmd_count*sizeof(uint32);
  735. cmd_size = sizeof(*cmd) + view_id_size;
  736. cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
  737. if (unlikely(cmd == NULL))
  738. return -ENOMEM;
  739. cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER_RESOURCES;
  740. cmd->header.size = sizeof(cmd->body) + view_id_size;
  741. cmd->body.type = shader_slot + SVGA3D_SHADERTYPE_MIN;
  742. cmd->body.startView = cbs->bind_first_slot;
  743. memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
  744. vmw_fifo_commit(ctx->dev_priv, cmd_size);
  745. bitmap_clear(cbs->per_shader[shader_slot].dirty_sr,
  746. cbs->bind_first_slot, cbs->bind_cmd_count);
  747. return 0;
  748. }
  749. /**
  750. * vmw_binding_emit_set_rt - Issue delayed DX rendertarget binding commands
  751. *
  752. * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  753. */
  754. static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
  755. {
  756. const struct vmw_ctx_bindinfo *loc = &cbs->render_targets[0].bi;
  757. struct {
  758. SVGA3dCmdHeader header;
  759. SVGA3dCmdDXSetRenderTargets body;
  760. } *cmd;
  761. size_t cmd_size, view_id_size;
  762. const struct vmw_resource *ctx = vmw_cbs_context(cbs);
  763. vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS);
  764. view_id_size = cbs->bind_cmd_count*sizeof(uint32);
  765. cmd_size = sizeof(*cmd) + view_id_size;
  766. cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
  767. if (unlikely(cmd == NULL))
  768. return -ENOMEM;
  769. cmd->header.id = SVGA_3D_CMD_DX_SET_RENDERTARGETS;
  770. cmd->header.size = sizeof(cmd->body) + view_id_size;
  771. if (cbs->ds_view.bi.ctx && !cbs->ds_view.bi.scrubbed)
  772. cmd->body.depthStencilViewId = cbs->ds_view.bi.res->id;
  773. else
  774. cmd->body.depthStencilViewId = SVGA3D_INVALID_ID;
  775. memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
  776. vmw_fifo_commit(ctx->dev_priv, cmd_size);
  777. return 0;
  778. }
  779. /**
  780. * vmw_collect_so_targets - Build SVGA3dSoTarget data for a binding command
  781. * without checking which bindings actually need to be emitted
  782. *
  783. * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  784. * @bi: Pointer to where the binding info array is stored in @cbs
  785. * @max_num: Maximum number of entries in the @bi array.
  786. *
  787. * Scans the @bi array for bindings and builds a buffer of SVGA3dSoTarget data.
  788. * Stops at the first non-existing binding in the @bi array.
  789. * On output, @cbs->bind_cmd_count contains the number of bindings to be
  790. * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
  791. * contains the command data.
  792. */
  793. static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
  794. const struct vmw_ctx_bindinfo *bi,
  795. u32 max_num)
  796. {
  797. const struct vmw_ctx_bindinfo_so_target *biso =
  798. container_of(bi, struct vmw_ctx_bindinfo_so_target, bi);
  799. unsigned long i;
  800. SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer;
  801. cbs->bind_cmd_count = 0;
  802. cbs->bind_first_slot = 0;
  803. for (i = 0; i < max_num; ++i, ++biso, ++so_buffer,
  804. ++cbs->bind_cmd_count) {
  805. if (!biso->bi.ctx)
  806. break;
  807. if (!biso->bi.scrubbed) {
  808. so_buffer->sid = biso->bi.res->id;
  809. so_buffer->offset = biso->offset;
  810. so_buffer->sizeInBytes = biso->size;
  811. } else {
  812. so_buffer->sid = SVGA3D_INVALID_ID;
  813. so_buffer->offset = 0;
  814. so_buffer->sizeInBytes = 0;
  815. }
  816. }
  817. }
  818. /**
  819. * vmw_emit_set_so_target - Issue delayed streamout binding commands
  820. *
  821. * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  822. */
  823. static int vmw_emit_set_so_target(struct vmw_ctx_binding_state *cbs)
  824. {
  825. const struct vmw_ctx_bindinfo *loc = &cbs->so_targets[0].bi;
  826. struct {
  827. SVGA3dCmdHeader header;
  828. SVGA3dCmdDXSetSOTargets body;
  829. } *cmd;
  830. size_t cmd_size, so_target_size;
  831. const struct vmw_resource *ctx = vmw_cbs_context(cbs);
  832. vmw_collect_so_targets(cbs, loc, SVGA3D_DX_MAX_SOTARGETS);
  833. if (cbs->bind_cmd_count == 0)
  834. return 0;
  835. so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget);
  836. cmd_size = sizeof(*cmd) + so_target_size;
  837. cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
  838. if (unlikely(cmd == NULL))
  839. return -ENOMEM;
  840. cmd->header.id = SVGA_3D_CMD_DX_SET_SOTARGETS;
  841. cmd->header.size = sizeof(cmd->body) + so_target_size;
  842. memcpy(&cmd[1], cbs->bind_cmd_buffer, so_target_size);
  843. vmw_fifo_commit(ctx->dev_priv, cmd_size);
  844. return 0;
  845. }
  846. /**
  847. * vmw_binding_emit_dirty_ps - Issue delayed per shader binding commands
  848. *
  849. * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  850. *
  851. */
  852. static int vmw_binding_emit_dirty_ps(struct vmw_ctx_binding_state *cbs)
  853. {
  854. struct vmw_dx_shader_bindings *sb = &cbs->per_shader[0];
  855. u32 i;
  856. int ret;
  857. for (i = 0; i < SVGA3D_NUM_SHADERTYPE_DX10; ++i, ++sb) {
  858. if (!test_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty))
  859. continue;
  860. ret = vmw_emit_set_sr(cbs, i);
  861. if (ret)
  862. break;
  863. __clear_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty);
  864. }
  865. return 0;
  866. }
  867. /**
  868. * vmw_collect_dirty_vbs - Build SVGA3dVertexBuffer data for a
  869. * SVGA3dCmdDXSetVertexBuffers command
  870. *
  871. * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  872. * @bi: Pointer to where the binding info array is stored in @cbs
  873. * @dirty: Bitmap indicating which bindings need to be emitted.
  874. * @max_num: Maximum number of entries in the @bi array.
  875. *
  876. * Scans the @bi array for bindings that need to be emitted and
  877. * builds a buffer of SVGA3dVertexBuffer data.
  878. * On output, @cbs->bind_cmd_count contains the number of bindings to be
  879. * emitted, @cbs->bind_first_slot indicates the index of the first emitted
  880. * binding, and @cbs->bind_cmd_buffer contains the command data.
  881. */
  882. static void vmw_collect_dirty_vbs(struct vmw_ctx_binding_state *cbs,
  883. const struct vmw_ctx_bindinfo *bi,
  884. unsigned long *dirty,
  885. u32 max_num)
  886. {
  887. const struct vmw_ctx_bindinfo_vb *biv =
  888. container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
  889. unsigned long i, next_bit;
  890. SVGA3dVertexBuffer *vbs = (SVGA3dVertexBuffer *) &cbs->bind_cmd_buffer;
  891. cbs->bind_cmd_count = 0;
  892. i = find_first_bit(dirty, max_num);
  893. next_bit = i;
  894. cbs->bind_first_slot = i;
  895. biv += i;
  896. for (; i < max_num; ++i, ++biv, ++vbs) {
  897. if (!biv->bi.ctx || biv->bi.scrubbed) {
  898. vbs->sid = SVGA3D_INVALID_ID;
  899. vbs->stride = 0;
  900. vbs->offset = 0;
  901. } else {
  902. vbs->sid = biv->bi.res->id;
  903. vbs->stride = biv->stride;
  904. vbs->offset = biv->offset;
  905. }
  906. cbs->bind_cmd_count++;
  907. if (next_bit == i) {
  908. next_bit = find_next_bit(dirty, max_num, i + 1);
  909. if (next_bit >= max_num)
  910. break;
  911. }
  912. }
  913. }
  914. /**
  915. * vmw_binding_emit_set_vb - Issue delayed vertex buffer binding commands
  916. *
  917. * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  918. *
  919. */
  920. static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
  921. {
  922. const struct vmw_ctx_bindinfo *loc =
  923. &cbs->vertex_buffers[0].bi;
  924. struct {
  925. SVGA3dCmdHeader header;
  926. SVGA3dCmdDXSetVertexBuffers body;
  927. } *cmd;
  928. size_t cmd_size, set_vb_size;
  929. const struct vmw_resource *ctx = vmw_cbs_context(cbs);
  930. vmw_collect_dirty_vbs(cbs, loc, cbs->dirty_vb,
  931. SVGA3D_DX_MAX_VERTEXBUFFERS);
  932. if (cbs->bind_cmd_count == 0)
  933. return 0;
  934. set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer);
  935. cmd_size = sizeof(*cmd) + set_vb_size;
  936. cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
  937. if (unlikely(cmd == NULL))
  938. return -ENOMEM;
  939. cmd->header.id = SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS;
  940. cmd->header.size = sizeof(cmd->body) + set_vb_size;
  941. cmd->body.startBuffer = cbs->bind_first_slot;
  942. memcpy(&cmd[1], cbs->bind_cmd_buffer, set_vb_size);
  943. vmw_fifo_commit(ctx->dev_priv, cmd_size);
  944. bitmap_clear(cbs->dirty_vb,
  945. cbs->bind_first_slot, cbs->bind_cmd_count);
  946. return 0;
  947. }
  948. static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs)
  949. {
  950. const struct vmw_ctx_bindinfo *loc = &cbs->ua_views[0].views[0].bi;
  951. struct {
  952. SVGA3dCmdHeader header;
  953. SVGA3dCmdDXSetUAViews body;
  954. } *cmd;
  955. size_t cmd_size, view_id_size;
  956. const struct vmw_resource *ctx = vmw_cbs_context(cbs);
  957. vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS);
  958. view_id_size = cbs->bind_cmd_count*sizeof(uint32);
  959. cmd_size = sizeof(*cmd) + view_id_size;
  960. cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
  961. if (!cmd)
  962. return -ENOMEM;
  963. cmd->header.id = SVGA_3D_CMD_DX_SET_UA_VIEWS;
  964. cmd->header.size = sizeof(cmd->body) + view_id_size;
  965. /* Splice index is specified user-space */
  966. cmd->body.uavSpliceIndex = cbs->ua_views[0].index;
  967. memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
  968. vmw_fifo_commit(ctx->dev_priv, cmd_size);
  969. return 0;
  970. }
  971. static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs)
  972. {
  973. const struct vmw_ctx_bindinfo *loc = &cbs->ua_views[1].views[0].bi;
  974. struct {
  975. SVGA3dCmdHeader header;
  976. SVGA3dCmdDXSetCSUAViews body;
  977. } *cmd;
  978. size_t cmd_size, view_id_size;
  979. const struct vmw_resource *ctx = vmw_cbs_context(cbs);
  980. vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS);
  981. view_id_size = cbs->bind_cmd_count*sizeof(uint32);
  982. cmd_size = sizeof(*cmd) + view_id_size;
  983. cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
  984. if (!cmd)
  985. return -ENOMEM;
  986. cmd->header.id = SVGA_3D_CMD_DX_SET_CS_UA_VIEWS;
  987. cmd->header.size = sizeof(cmd->body) + view_id_size;
  988. /* Start index is specified user-space */
  989. cmd->body.startIndex = cbs->ua_views[1].index;
  990. memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
  991. vmw_fifo_commit(ctx->dev_priv, cmd_size);
  992. return 0;
  993. }
  994. /**
  995. * vmw_binding_emit_dirty - Issue delayed binding commands
  996. *
  997. * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  998. *
  999. * This function issues the delayed binding commands that arise from
  1000. * previous scrub / unscrub calls. These binding commands are typically
  1001. * commands that batch a number of bindings and therefore it makes sense
  1002. * to delay them.
  1003. */
  1004. static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs)
  1005. {
  1006. int ret = 0;
  1007. unsigned long hit = 0;
  1008. while ((hit = find_next_bit(&cbs->dirty, VMW_BINDING_NUM_BITS, hit))
  1009. < VMW_BINDING_NUM_BITS) {
  1010. switch (hit) {
  1011. case VMW_BINDING_RT_BIT:
  1012. ret = vmw_emit_set_rt(cbs);
  1013. break;
  1014. case VMW_BINDING_PS_BIT:
  1015. ret = vmw_binding_emit_dirty_ps(cbs);
  1016. break;
  1017. case VMW_BINDING_SO_T_BIT:
  1018. ret = vmw_emit_set_so_target(cbs);
  1019. break;
  1020. case VMW_BINDING_VB_BIT:
  1021. ret = vmw_emit_set_vb(cbs);
  1022. break;
  1023. case VMW_BINDING_UAV_BIT:
  1024. ret = vmw_emit_set_uav(cbs);
  1025. break;
  1026. case VMW_BINDING_CS_UAV_BIT:
  1027. ret = vmw_emit_set_cs_uav(cbs);
  1028. break;
  1029. default:
  1030. BUG();
  1031. }
  1032. if (ret)
  1033. return ret;
  1034. __clear_bit(hit, &cbs->dirty);
  1035. hit++;
  1036. }
  1037. return 0;
  1038. }
  1039. /**
  1040. * vmw_binding_scrub_sr - Schedule a dx shaderresource binding
  1041. * scrub from a context
  1042. *
  1043. * @bi: single binding information.
  1044. * @rebind: Whether to issue a bind instead of scrub command.
  1045. */
  1046. static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind)
  1047. {
  1048. struct vmw_ctx_bindinfo_view *biv =
  1049. container_of(bi, struct vmw_ctx_bindinfo_view, bi);
  1050. struct vmw_ctx_binding_state *cbs =
  1051. vmw_context_binding_state(bi->ctx);
  1052. __set_bit(biv->slot, cbs->per_shader[biv->shader_slot].dirty_sr);
  1053. __set_bit(VMW_BINDING_PS_SR_BIT,
  1054. &cbs->per_shader[biv->shader_slot].dirty);
  1055. __set_bit(VMW_BINDING_PS_BIT, &cbs->dirty);
  1056. return 0;
  1057. }
  1058. /**
  1059. * vmw_binding_scrub_dx_rt - Schedule a dx rendertarget binding
  1060. * scrub from a context
  1061. *
  1062. * @bi: single binding information.
  1063. * @rebind: Whether to issue a bind instead of scrub command.
  1064. */
  1065. static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind)
  1066. {
  1067. struct vmw_ctx_binding_state *cbs =
  1068. vmw_context_binding_state(bi->ctx);
  1069. __set_bit(VMW_BINDING_RT_BIT, &cbs->dirty);
  1070. return 0;
  1071. }
  1072. /**
  1073. * vmw_binding_scrub_so_target - Schedule a dx streamoutput buffer binding
  1074. * scrub from a context
  1075. *
  1076. * @bi: single binding information.
  1077. * @rebind: Whether to issue a bind instead of scrub command.
  1078. */
  1079. static int vmw_binding_scrub_so_target(struct vmw_ctx_bindinfo *bi, bool rebind)
  1080. {
  1081. struct vmw_ctx_binding_state *cbs =
  1082. vmw_context_binding_state(bi->ctx);
  1083. __set_bit(VMW_BINDING_SO_T_BIT, &cbs->dirty);
  1084. return 0;
  1085. }
  1086. /**
  1087. * vmw_binding_scrub_vb - Schedule a dx vertex buffer binding
  1088. * scrub from a context
  1089. *
  1090. * @bi: single binding information.
  1091. * @rebind: Whether to issue a bind instead of scrub command.
  1092. */
  1093. static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind)
  1094. {
  1095. struct vmw_ctx_bindinfo_vb *bivb =
  1096. container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
  1097. struct vmw_ctx_binding_state *cbs =
  1098. vmw_context_binding_state(bi->ctx);
  1099. __set_bit(bivb->slot, cbs->dirty_vb);
  1100. __set_bit(VMW_BINDING_VB_BIT, &cbs->dirty);
  1101. return 0;
  1102. }
  1103. /**
  1104. * vmw_binding_scrub_ib - scrub a dx index buffer binding from a context
  1105. *
  1106. * @bi: single binding information.
  1107. * @rebind: Whether to issue a bind instead of scrub command.
  1108. */
  1109. static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
  1110. {
  1111. struct vmw_ctx_bindinfo_ib *binding =
  1112. container_of(bi, typeof(*binding), bi);
  1113. struct vmw_private *dev_priv = bi->ctx->dev_priv;
  1114. struct {
  1115. SVGA3dCmdHeader header;
  1116. SVGA3dCmdDXSetIndexBuffer body;
  1117. } *cmd;
  1118. cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
  1119. if (unlikely(cmd == NULL))
  1120. return -ENOMEM;
  1121. cmd->header.id = SVGA_3D_CMD_DX_SET_INDEX_BUFFER;
  1122. cmd->header.size = sizeof(cmd->body);
  1123. if (rebind) {
  1124. cmd->body.sid = bi->res->id;
  1125. cmd->body.format = binding->format;
  1126. cmd->body.offset = binding->offset;
  1127. } else {
  1128. cmd->body.sid = SVGA3D_INVALID_ID;
  1129. cmd->body.format = 0;
  1130. cmd->body.offset = 0;
  1131. }
  1132. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  1133. return 0;
  1134. }
  1135. static int vmw_binding_scrub_uav(struct vmw_ctx_bindinfo *bi, bool rebind)
  1136. {
  1137. struct vmw_ctx_binding_state *cbs = vmw_context_binding_state(bi->ctx);
  1138. __set_bit(VMW_BINDING_UAV_BIT, &cbs->dirty);
  1139. return 0;
  1140. }
  1141. static int vmw_binding_scrub_cs_uav(struct vmw_ctx_bindinfo *bi, bool rebind)
  1142. {
  1143. struct vmw_ctx_binding_state *cbs = vmw_context_binding_state(bi->ctx);
  1144. __set_bit(VMW_BINDING_CS_UAV_BIT, &cbs->dirty);
  1145. return 0;
  1146. }
  1147. /**
  1148. * vmw_binding_scrub_so - Scrub a streamoutput binding from context.
  1149. * @bi: Single binding information.
  1150. * @rebind: Whether to issue a bind instead of scrub command.
  1151. */
  1152. static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
  1153. {
  1154. struct vmw_ctx_bindinfo_so *binding =
  1155. container_of(bi, typeof(*binding), bi);
  1156. struct vmw_private *dev_priv = bi->ctx->dev_priv;
  1157. struct {
  1158. SVGA3dCmdHeader header;
  1159. SVGA3dCmdDXSetStreamOutput body;
  1160. } *cmd;
  1161. cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
  1162. if (!cmd)
  1163. return -ENOMEM;
  1164. cmd->header.id = SVGA_3D_CMD_DX_SET_STREAMOUTPUT;
  1165. cmd->header.size = sizeof(cmd->body);
  1166. cmd->body.soid = rebind ? bi->res->id : SVGA3D_INVALID_ID;
  1167. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  1168. return 0;
  1169. }
  1170. /**
  1171. * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with
  1172. * memory accounting.
  1173. *
  1174. * @dev_priv: Pointer to a device private structure.
  1175. *
  1176. * Returns a pointer to a newly allocated struct or an error pointer on error.
  1177. */
  1178. struct vmw_ctx_binding_state *
  1179. vmw_binding_state_alloc(struct vmw_private *dev_priv)
  1180. {
  1181. struct vmw_ctx_binding_state *cbs;
  1182. struct ttm_operation_ctx ctx = {
  1183. .interruptible = false,
  1184. .no_wait_gpu = false
  1185. };
  1186. int ret;
  1187. ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs),
  1188. &ctx);
  1189. if (ret)
  1190. return ERR_PTR(ret);
  1191. cbs = vzalloc(sizeof(*cbs));
  1192. if (!cbs) {
  1193. ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
  1194. return ERR_PTR(-ENOMEM);
  1195. }
  1196. cbs->dev_priv = dev_priv;
  1197. INIT_LIST_HEAD(&cbs->list);
  1198. return cbs;
  1199. }
  1200. /**
  1201. * vmw_binding_state_free - Free a struct vmw_ctx_binding_state and its
  1202. * memory accounting info.
  1203. *
  1204. * @cbs: Pointer to the struct vmw_ctx_binding_state to be freed.
  1205. */
  1206. void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs)
  1207. {
  1208. struct vmw_private *dev_priv = cbs->dev_priv;
  1209. vfree(cbs);
  1210. ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
  1211. }
  1212. /**
  1213. * vmw_binding_state_list - Get the binding list of a
  1214. * struct vmw_ctx_binding_state
  1215. *
  1216. * @cbs: Pointer to the struct vmw_ctx_binding_state
  1217. *
  1218. * Returns the binding list which can be used to traverse through the bindings
  1219. * and access the resource information of all bindings.
  1220. */
  1221. struct list_head *vmw_binding_state_list(struct vmw_ctx_binding_state *cbs)
  1222. {
  1223. return &cbs->list;
  1224. }
  1225. /**
  1226. * vmwgfx_binding_state_reset - clear a struct vmw_ctx_binding_state
  1227. *
  1228. * @cbs: Pointer to the struct vmw_ctx_binding_state to be cleared
  1229. *
  1230. * Drops all bindings registered in @cbs. No device binding actions are
  1231. * performed.
  1232. */
  1233. void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs)
  1234. {
  1235. struct vmw_ctx_bindinfo *entry, *next;
  1236. list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
  1237. vmw_binding_drop(entry);
  1238. }
  1239. /**
  1240. * vmw_binding_dirtying - Return whether a binding type is dirtying its resource
  1241. * @binding_type: The binding type
  1242. *
  1243. * Each time a resource is put on the validation list as the result of a
  1244. * context binding referencing it, we need to determine whether that resource
  1245. * will be dirtied (written to by the GPU) as a result of the corresponding
  1246. * GPU operation. Currently rendertarget-, depth-stencil-, stream-output-target
  1247. * and unordered access view bindings are capable of dirtying its resource.
  1248. *
  1249. * Return: Whether the binding type dirties the resource its binding points to.
  1250. */
  1251. u32 vmw_binding_dirtying(enum vmw_ctx_binding_type binding_type)
  1252. {
  1253. static u32 is_binding_dirtying[vmw_ctx_binding_max] = {
  1254. [vmw_ctx_binding_rt] = VMW_RES_DIRTY_SET,
  1255. [vmw_ctx_binding_dx_rt] = VMW_RES_DIRTY_SET,
  1256. [vmw_ctx_binding_ds] = VMW_RES_DIRTY_SET,
  1257. [vmw_ctx_binding_so_target] = VMW_RES_DIRTY_SET,
  1258. [vmw_ctx_binding_uav] = VMW_RES_DIRTY_SET,
  1259. [vmw_ctx_binding_cs_uav] = VMW_RES_DIRTY_SET,
  1260. };
  1261. /* Review this function as new bindings are added. */
  1262. BUILD_BUG_ON(vmw_ctx_binding_max != 14);
  1263. return is_binding_dirtying[binding_type];
  1264. }
  1265. /*
  1266. * This function is unused at run-time, and only used to hold various build
  1267. * asserts important for code optimization assumptions.
  1268. */
  1269. static void vmw_binding_build_asserts(void)
  1270. {
  1271. BUILD_BUG_ON(SVGA3D_NUM_SHADERTYPE_DX10 != 3);
  1272. BUILD_BUG_ON(SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS > SVGA3D_RT_MAX);
  1273. BUILD_BUG_ON(sizeof(uint32) != sizeof(u32));
  1274. /*
  1275. * struct vmw_ctx_binding_state::bind_cmd_buffer is used for various
  1276. * view id arrays.
  1277. */
  1278. BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_RT_MAX);
  1279. BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_SRVIEWS);
  1280. BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_CONSTBUFFERS);
  1281. /*
  1282. * struct vmw_ctx_binding_state::bind_cmd_buffer is used for
  1283. * u32 view ids, SVGA3dSoTargets and SVGA3dVertexBuffers
  1284. */
  1285. BUILD_BUG_ON(SVGA3D_DX_MAX_SOTARGETS*sizeof(SVGA3dSoTarget) >
  1286. VMW_MAX_VIEW_BINDINGS*sizeof(u32));
  1287. BUILD_BUG_ON(SVGA3D_DX_MAX_VERTEXBUFFERS*sizeof(SVGA3dVertexBuffer) >
  1288. VMW_MAX_VIEW_BINDINGS*sizeof(u32));
  1289. }