vmwgfx_kms.c 73 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814
  1. // SPDX-License-Identifier: GPL-2.0 OR MIT
  2. /**************************************************************************
  3. *
  4. * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include <drm/drm_atomic.h>
  28. #include <drm/drm_atomic_helper.h>
  29. #include <drm/drm_damage_helper.h>
  30. #include <drm/drm_fourcc.h>
  31. #include <drm/drm_plane_helper.h>
  32. #include <drm/drm_rect.h>
  33. #include <drm/drm_sysfs.h>
  34. #include <drm/drm_vblank.h>
  35. #include "vmwgfx_kms.h"
  36. /* Might need a hrtimer here? */
  37. #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
  38. void vmw_du_cleanup(struct vmw_display_unit *du)
  39. {
  40. drm_plane_cleanup(&du->primary);
  41. drm_plane_cleanup(&du->cursor);
  42. drm_connector_unregister(&du->connector);
  43. drm_crtc_cleanup(&du->crtc);
  44. drm_encoder_cleanup(&du->encoder);
  45. drm_connector_cleanup(&du->connector);
  46. }
  47. /*
  48. * Display Unit Cursor functions
  49. */
  50. static int vmw_cursor_update_image(struct vmw_private *dev_priv,
  51. u32 *image, u32 width, u32 height,
  52. u32 hotspotX, u32 hotspotY)
  53. {
  54. struct {
  55. u32 cmd;
  56. SVGAFifoCmdDefineAlphaCursor cursor;
  57. } *cmd;
  58. u32 image_size = width * height * 4;
  59. u32 cmd_size = sizeof(*cmd) + image_size;
  60. if (!image)
  61. return -EINVAL;
  62. cmd = VMW_FIFO_RESERVE(dev_priv, cmd_size);
  63. if (unlikely(cmd == NULL))
  64. return -ENOMEM;
  65. memset(cmd, 0, sizeof(*cmd));
  66. memcpy(&cmd[1], image, image_size);
  67. cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
  68. cmd->cursor.id = 0;
  69. cmd->cursor.width = width;
  70. cmd->cursor.height = height;
  71. cmd->cursor.hotspotX = hotspotX;
  72. cmd->cursor.hotspotY = hotspotY;
  73. vmw_fifo_commit_flush(dev_priv, cmd_size);
  74. return 0;
  75. }
  76. static int vmw_cursor_update_bo(struct vmw_private *dev_priv,
  77. struct vmw_buffer_object *bo,
  78. u32 width, u32 height,
  79. u32 hotspotX, u32 hotspotY)
  80. {
  81. struct ttm_bo_kmap_obj map;
  82. unsigned long kmap_offset;
  83. unsigned long kmap_num;
  84. void *virtual;
  85. bool dummy;
  86. int ret;
  87. kmap_offset = 0;
  88. kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
  89. ret = ttm_bo_reserve(&bo->base, true, false, NULL);
  90. if (unlikely(ret != 0)) {
  91. DRM_ERROR("reserve failed\n");
  92. return -EINVAL;
  93. }
  94. ret = ttm_bo_kmap(&bo->base, kmap_offset, kmap_num, &map);
  95. if (unlikely(ret != 0))
  96. goto err_unreserve;
  97. virtual = ttm_kmap_obj_virtual(&map, &dummy);
  98. ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
  99. hotspotX, hotspotY);
  100. ttm_bo_kunmap(&map);
  101. err_unreserve:
  102. ttm_bo_unreserve(&bo->base);
  103. return ret;
  104. }
  105. static void vmw_cursor_update_position(struct vmw_private *dev_priv,
  106. bool show, int x, int y)
  107. {
  108. u32 *fifo_mem = dev_priv->mmio_virt;
  109. uint32_t count;
  110. spin_lock(&dev_priv->cursor_lock);
  111. vmw_mmio_write(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
  112. vmw_mmio_write(x, fifo_mem + SVGA_FIFO_CURSOR_X);
  113. vmw_mmio_write(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
  114. count = vmw_mmio_read(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
  115. vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
  116. spin_unlock(&dev_priv->cursor_lock);
  117. }
  118. void vmw_kms_cursor_snoop(struct vmw_surface *srf,
  119. struct ttm_object_file *tfile,
  120. struct ttm_buffer_object *bo,
  121. SVGA3dCmdHeader *header)
  122. {
  123. struct ttm_bo_kmap_obj map;
  124. unsigned long kmap_offset;
  125. unsigned long kmap_num;
  126. SVGA3dCopyBox *box;
  127. unsigned box_count;
  128. void *virtual;
  129. bool dummy;
  130. struct vmw_dma_cmd {
  131. SVGA3dCmdHeader header;
  132. SVGA3dCmdSurfaceDMA dma;
  133. } *cmd;
  134. int i, ret;
  135. cmd = container_of(header, struct vmw_dma_cmd, header);
  136. /* No snooper installed */
  137. if (!srf->snooper.image)
  138. return;
  139. if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
  140. DRM_ERROR("face and mipmap for cursors should never != 0\n");
  141. return;
  142. }
  143. if (cmd->header.size < 64) {
  144. DRM_ERROR("at least one full copy box must be given\n");
  145. return;
  146. }
  147. box = (SVGA3dCopyBox *)&cmd[1];
  148. box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
  149. sizeof(SVGA3dCopyBox);
  150. if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
  151. box->x != 0 || box->y != 0 || box->z != 0 ||
  152. box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
  153. box->d != 1 || box_count != 1) {
  154. /* TODO handle none page aligned offsets */
  155. /* TODO handle more dst & src != 0 */
  156. /* TODO handle more then one copy */
  157. DRM_ERROR("Can't snoop dma request for cursor!\n");
  158. DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
  159. box->srcx, box->srcy, box->srcz,
  160. box->x, box->y, box->z,
  161. box->w, box->h, box->d, box_count,
  162. cmd->dma.guest.ptr.offset);
  163. return;
  164. }
  165. kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
  166. kmap_num = (64*64*4) >> PAGE_SHIFT;
  167. ret = ttm_bo_reserve(bo, true, false, NULL);
  168. if (unlikely(ret != 0)) {
  169. DRM_ERROR("reserve failed\n");
  170. return;
  171. }
  172. ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
  173. if (unlikely(ret != 0))
  174. goto err_unreserve;
  175. virtual = ttm_kmap_obj_virtual(&map, &dummy);
  176. if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
  177. memcpy(srf->snooper.image, virtual, 64*64*4);
  178. } else {
  179. /* Image is unsigned pointer. */
  180. for (i = 0; i < box->h; i++)
  181. memcpy(srf->snooper.image + i * 64,
  182. virtual + i * cmd->dma.guest.pitch,
  183. box->w * 4);
  184. }
  185. srf->snooper.age++;
  186. ttm_bo_kunmap(&map);
  187. err_unreserve:
  188. ttm_bo_unreserve(bo);
  189. }
  190. /**
  191. * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
  192. *
  193. * @dev_priv: Pointer to the device private struct.
  194. *
  195. * Clears all legacy hotspots.
  196. */
  197. void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
  198. {
  199. struct drm_device *dev = dev_priv->dev;
  200. struct vmw_display_unit *du;
  201. struct drm_crtc *crtc;
  202. drm_modeset_lock_all(dev);
  203. drm_for_each_crtc(crtc, dev) {
  204. du = vmw_crtc_to_du(crtc);
  205. du->hotspot_x = 0;
  206. du->hotspot_y = 0;
  207. }
  208. drm_modeset_unlock_all(dev);
  209. }
  210. void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
  211. {
  212. struct drm_device *dev = dev_priv->dev;
  213. struct vmw_display_unit *du;
  214. struct drm_crtc *crtc;
  215. mutex_lock(&dev->mode_config.mutex);
  216. list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
  217. du = vmw_crtc_to_du(crtc);
  218. if (!du->cursor_surface ||
  219. du->cursor_age == du->cursor_surface->snooper.age)
  220. continue;
  221. du->cursor_age = du->cursor_surface->snooper.age;
  222. vmw_cursor_update_image(dev_priv,
  223. du->cursor_surface->snooper.image,
  224. 64, 64,
  225. du->hotspot_x + du->core_hotspot_x,
  226. du->hotspot_y + du->core_hotspot_y);
  227. }
  228. mutex_unlock(&dev->mode_config.mutex);
  229. }
  230. void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
  231. {
  232. vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
  233. drm_plane_cleanup(plane);
  234. }
  235. void vmw_du_primary_plane_destroy(struct drm_plane *plane)
  236. {
  237. drm_plane_cleanup(plane);
  238. /* Planes are static in our case so we don't free it */
  239. }
  240. /**
  241. * vmw_du_vps_unpin_surf - unpins resource associated with a framebuffer surface
  242. *
  243. * @vps: plane state associated with the display surface
  244. * @unreference: true if we also want to unreference the display.
  245. */
  246. void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
  247. bool unreference)
  248. {
  249. if (vps->surf) {
  250. if (vps->pinned) {
  251. vmw_resource_unpin(&vps->surf->res);
  252. vps->pinned--;
  253. }
  254. if (unreference) {
  255. if (vps->pinned)
  256. DRM_ERROR("Surface still pinned\n");
  257. vmw_surface_unreference(&vps->surf);
  258. }
  259. }
  260. }
  261. /**
  262. * vmw_du_plane_cleanup_fb - Unpins the cursor
  263. *
  264. * @plane: display plane
  265. * @old_state: Contains the FB to clean up
  266. *
  267. * Unpins the framebuffer surface
  268. *
  269. * Returns 0 on success
  270. */
  271. void
  272. vmw_du_plane_cleanup_fb(struct drm_plane *plane,
  273. struct drm_plane_state *old_state)
  274. {
  275. struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
  276. vmw_du_plane_unpin_surf(vps, false);
  277. }
  278. /**
  279. * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
  280. *
  281. * @plane: display plane
  282. * @new_state: info on the new plane state, including the FB
  283. *
  284. * Returns 0 on success
  285. */
  286. int
  287. vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
  288. struct drm_plane_state *new_state)
  289. {
  290. struct drm_framebuffer *fb = new_state->fb;
  291. struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
  292. if (vps->surf)
  293. vmw_surface_unreference(&vps->surf);
  294. if (vps->bo)
  295. vmw_bo_unreference(&vps->bo);
  296. if (fb) {
  297. if (vmw_framebuffer_to_vfb(fb)->bo) {
  298. vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
  299. vmw_bo_reference(vps->bo);
  300. } else {
  301. vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
  302. vmw_surface_reference(vps->surf);
  303. }
  304. }
  305. return 0;
  306. }
  307. void
  308. vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
  309. struct drm_plane_state *old_state)
  310. {
  311. struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
  312. struct vmw_private *dev_priv = vmw_priv(crtc->dev);
  313. struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
  314. struct vmw_plane_state *vps = vmw_plane_state_to_vps(plane->state);
  315. s32 hotspot_x, hotspot_y;
  316. int ret = 0;
  317. hotspot_x = du->hotspot_x;
  318. hotspot_y = du->hotspot_y;
  319. if (plane->state->fb) {
  320. hotspot_x += plane->state->fb->hot_x;
  321. hotspot_y += plane->state->fb->hot_y;
  322. }
  323. du->cursor_surface = vps->surf;
  324. du->cursor_bo = vps->bo;
  325. if (vps->surf) {
  326. du->cursor_age = du->cursor_surface->snooper.age;
  327. ret = vmw_cursor_update_image(dev_priv,
  328. vps->surf->snooper.image,
  329. 64, 64, hotspot_x,
  330. hotspot_y);
  331. } else if (vps->bo) {
  332. ret = vmw_cursor_update_bo(dev_priv, vps->bo,
  333. plane->state->crtc_w,
  334. plane->state->crtc_h,
  335. hotspot_x, hotspot_y);
  336. } else {
  337. vmw_cursor_update_position(dev_priv, false, 0, 0);
  338. return;
  339. }
  340. if (!ret) {
  341. du->cursor_x = plane->state->crtc_x + du->set_gui_x;
  342. du->cursor_y = plane->state->crtc_y + du->set_gui_y;
  343. vmw_cursor_update_position(dev_priv, true,
  344. du->cursor_x + hotspot_x,
  345. du->cursor_y + hotspot_y);
  346. du->core_hotspot_x = hotspot_x - du->hotspot_x;
  347. du->core_hotspot_y = hotspot_y - du->hotspot_y;
  348. } else {
  349. DRM_ERROR("Failed to update cursor image\n");
  350. }
  351. }
  352. /**
  353. * vmw_du_primary_plane_atomic_check - check if the new state is okay
  354. *
  355. * @plane: display plane
  356. * @state: info on the new plane state, including the FB
  357. *
  358. * Check if the new state is settable given the current state. Other
  359. * than what the atomic helper checks, we care about crtc fitting
  360. * the FB and maintaining one active framebuffer.
  361. *
  362. * Returns 0 on success
  363. */
  364. int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
  365. struct drm_plane_state *state)
  366. {
  367. struct drm_crtc_state *crtc_state = NULL;
  368. struct drm_framebuffer *new_fb = state->fb;
  369. int ret;
  370. if (state->crtc)
  371. crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
  372. ret = drm_atomic_helper_check_plane_state(state, crtc_state,
  373. DRM_PLANE_HELPER_NO_SCALING,
  374. DRM_PLANE_HELPER_NO_SCALING,
  375. false, true);
  376. if (!ret && new_fb) {
  377. struct drm_crtc *crtc = state->crtc;
  378. struct vmw_connector_state *vcs;
  379. struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
  380. vcs = vmw_connector_state_to_vcs(du->connector.state);
  381. }
  382. return ret;
  383. }
  384. /**
  385. * vmw_du_cursor_plane_atomic_check - check if the new state is okay
  386. *
  387. * @plane: cursor plane
  388. * @state: info on the new plane state
  389. *
  390. * This is a chance to fail if the new cursor state does not fit
  391. * our requirements.
  392. *
  393. * Returns 0 on success
  394. */
  395. int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
  396. struct drm_plane_state *new_state)
  397. {
  398. int ret = 0;
  399. struct drm_crtc_state *crtc_state = NULL;
  400. struct vmw_surface *surface = NULL;
  401. struct drm_framebuffer *fb = new_state->fb;
  402. if (new_state->crtc)
  403. crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
  404. new_state->crtc);
  405. ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
  406. DRM_PLANE_HELPER_NO_SCALING,
  407. DRM_PLANE_HELPER_NO_SCALING,
  408. true, true);
  409. if (ret)
  410. return ret;
  411. /* Turning off */
  412. if (!fb)
  413. return 0;
  414. /* A lot of the code assumes this */
  415. if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
  416. DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
  417. new_state->crtc_w, new_state->crtc_h);
  418. ret = -EINVAL;
  419. }
  420. if (!vmw_framebuffer_to_vfb(fb)->bo)
  421. surface = vmw_framebuffer_to_vfbs(fb)->surface;
  422. if (surface && !surface->snooper.image) {
  423. DRM_ERROR("surface not suitable for cursor\n");
  424. ret = -EINVAL;
  425. }
  426. return ret;
  427. }
  428. int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
  429. struct drm_crtc_state *new_state)
  430. {
  431. struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
  432. int connector_mask = drm_connector_mask(&du->connector);
  433. bool has_primary = new_state->plane_mask &
  434. drm_plane_mask(crtc->primary);
  435. /* We always want to have an active plane with an active CRTC */
  436. if (has_primary != new_state->enable)
  437. return -EINVAL;
  438. if (new_state->connector_mask != connector_mask &&
  439. new_state->connector_mask != 0) {
  440. DRM_ERROR("Invalid connectors configuration\n");
  441. return -EINVAL;
  442. }
  443. /*
  444. * Our virtual device does not have a dot clock, so use the logical
  445. * clock value as the dot clock.
  446. */
  447. if (new_state->mode.crtc_clock == 0)
  448. new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
  449. return 0;
  450. }
  451. void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
  452. struct drm_crtc_state *old_crtc_state)
  453. {
  454. }
  455. void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
  456. struct drm_crtc_state *old_crtc_state)
  457. {
  458. struct drm_pending_vblank_event *event = crtc->state->event;
  459. if (event) {
  460. crtc->state->event = NULL;
  461. spin_lock_irq(&crtc->dev->event_lock);
  462. drm_crtc_send_vblank_event(crtc, event);
  463. spin_unlock_irq(&crtc->dev->event_lock);
  464. }
  465. }
  466. /**
  467. * vmw_du_crtc_duplicate_state - duplicate crtc state
  468. * @crtc: DRM crtc
  469. *
  470. * Allocates and returns a copy of the crtc state (both common and
  471. * vmw-specific) for the specified crtc.
  472. *
  473. * Returns: The newly allocated crtc state, or NULL on failure.
  474. */
  475. struct drm_crtc_state *
  476. vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
  477. {
  478. struct drm_crtc_state *state;
  479. struct vmw_crtc_state *vcs;
  480. if (WARN_ON(!crtc->state))
  481. return NULL;
  482. vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
  483. if (!vcs)
  484. return NULL;
  485. state = &vcs->base;
  486. __drm_atomic_helper_crtc_duplicate_state(crtc, state);
  487. return state;
  488. }
  489. /**
  490. * vmw_du_crtc_reset - creates a blank vmw crtc state
  491. * @crtc: DRM crtc
  492. *
  493. * Resets the atomic state for @crtc by freeing the state pointer (which
  494. * might be NULL, e.g. at driver load time) and allocating a new empty state
  495. * object.
  496. */
  497. void vmw_du_crtc_reset(struct drm_crtc *crtc)
  498. {
  499. struct vmw_crtc_state *vcs;
  500. if (crtc->state) {
  501. __drm_atomic_helper_crtc_destroy_state(crtc->state);
  502. kfree(vmw_crtc_state_to_vcs(crtc->state));
  503. }
  504. vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
  505. if (!vcs) {
  506. DRM_ERROR("Cannot allocate vmw_crtc_state\n");
  507. return;
  508. }
  509. __drm_atomic_helper_crtc_reset(crtc, &vcs->base);
  510. }
  511. /**
  512. * vmw_du_crtc_destroy_state - destroy crtc state
  513. * @crtc: DRM crtc
  514. * @state: state object to destroy
  515. *
  516. * Destroys the crtc state (both common and vmw-specific) for the
  517. * specified plane.
  518. */
  519. void
  520. vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
  521. struct drm_crtc_state *state)
  522. {
  523. drm_atomic_helper_crtc_destroy_state(crtc, state);
  524. }
  525. /**
  526. * vmw_du_plane_duplicate_state - duplicate plane state
  527. * @plane: drm plane
  528. *
  529. * Allocates and returns a copy of the plane state (both common and
  530. * vmw-specific) for the specified plane.
  531. *
  532. * Returns: The newly allocated plane state, or NULL on failure.
  533. */
  534. struct drm_plane_state *
  535. vmw_du_plane_duplicate_state(struct drm_plane *plane)
  536. {
  537. struct drm_plane_state *state;
  538. struct vmw_plane_state *vps;
  539. vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
  540. if (!vps)
  541. return NULL;
  542. vps->pinned = 0;
  543. vps->cpp = 0;
  544. /* Each ref counted resource needs to be acquired again */
  545. if (vps->surf)
  546. (void) vmw_surface_reference(vps->surf);
  547. if (vps->bo)
  548. (void) vmw_bo_reference(vps->bo);
  549. state = &vps->base;
  550. __drm_atomic_helper_plane_duplicate_state(plane, state);
  551. return state;
  552. }
  553. /**
  554. * vmw_du_plane_reset - creates a blank vmw plane state
  555. * @plane: drm plane
  556. *
  557. * Resets the atomic state for @plane by freeing the state pointer (which might
  558. * be NULL, e.g. at driver load time) and allocating a new empty state object.
  559. */
  560. void vmw_du_plane_reset(struct drm_plane *plane)
  561. {
  562. struct vmw_plane_state *vps;
  563. if (plane->state)
  564. vmw_du_plane_destroy_state(plane, plane->state);
  565. vps = kzalloc(sizeof(*vps), GFP_KERNEL);
  566. if (!vps) {
  567. DRM_ERROR("Cannot allocate vmw_plane_state\n");
  568. return;
  569. }
  570. __drm_atomic_helper_plane_reset(plane, &vps->base);
  571. }
  572. /**
  573. * vmw_du_plane_destroy_state - destroy plane state
  574. * @plane: DRM plane
  575. * @state: state object to destroy
  576. *
  577. * Destroys the plane state (both common and vmw-specific) for the
  578. * specified plane.
  579. */
  580. void
  581. vmw_du_plane_destroy_state(struct drm_plane *plane,
  582. struct drm_plane_state *state)
  583. {
  584. struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
  585. /* Should have been freed by cleanup_fb */
  586. if (vps->surf)
  587. vmw_surface_unreference(&vps->surf);
  588. if (vps->bo)
  589. vmw_bo_unreference(&vps->bo);
  590. drm_atomic_helper_plane_destroy_state(plane, state);
  591. }
  592. /**
  593. * vmw_du_connector_duplicate_state - duplicate connector state
  594. * @connector: DRM connector
  595. *
  596. * Allocates and returns a copy of the connector state (both common and
  597. * vmw-specific) for the specified connector.
  598. *
  599. * Returns: The newly allocated connector state, or NULL on failure.
  600. */
  601. struct drm_connector_state *
  602. vmw_du_connector_duplicate_state(struct drm_connector *connector)
  603. {
  604. struct drm_connector_state *state;
  605. struct vmw_connector_state *vcs;
  606. if (WARN_ON(!connector->state))
  607. return NULL;
  608. vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
  609. if (!vcs)
  610. return NULL;
  611. state = &vcs->base;
  612. __drm_atomic_helper_connector_duplicate_state(connector, state);
  613. return state;
  614. }
  615. /**
  616. * vmw_du_connector_reset - creates a blank vmw connector state
  617. * @connector: DRM connector
  618. *
  619. * Resets the atomic state for @connector by freeing the state pointer (which
  620. * might be NULL, e.g. at driver load time) and allocating a new empty state
  621. * object.
  622. */
  623. void vmw_du_connector_reset(struct drm_connector *connector)
  624. {
  625. struct vmw_connector_state *vcs;
  626. if (connector->state) {
  627. __drm_atomic_helper_connector_destroy_state(connector->state);
  628. kfree(vmw_connector_state_to_vcs(connector->state));
  629. }
  630. vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
  631. if (!vcs) {
  632. DRM_ERROR("Cannot allocate vmw_connector_state\n");
  633. return;
  634. }
  635. __drm_atomic_helper_connector_reset(connector, &vcs->base);
  636. }
  637. /**
  638. * vmw_du_connector_destroy_state - destroy connector state
  639. * @connector: DRM connector
  640. * @state: state object to destroy
  641. *
  642. * Destroys the connector state (both common and vmw-specific) for the
  643. * specified plane.
  644. */
  645. void
  646. vmw_du_connector_destroy_state(struct drm_connector *connector,
  647. struct drm_connector_state *state)
  648. {
  649. drm_atomic_helper_connector_destroy_state(connector, state);
  650. }
  651. /*
  652. * Generic framebuffer code
  653. */
  654. /*
  655. * Surface framebuffer code
  656. */
  657. static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
  658. {
  659. struct vmw_framebuffer_surface *vfbs =
  660. vmw_framebuffer_to_vfbs(framebuffer);
  661. drm_framebuffer_cleanup(framebuffer);
  662. vmw_surface_unreference(&vfbs->surface);
  663. if (vfbs->base.user_obj)
  664. ttm_base_object_unref(&vfbs->base.user_obj);
  665. kfree(vfbs);
  666. }
  667. /**
  668. * vmw_kms_readback - Perform a readback from the screen system to
  669. * a buffer-object backed framebuffer.
  670. *
  671. * @dev_priv: Pointer to the device private structure.
  672. * @file_priv: Pointer to a struct drm_file identifying the caller.
  673. * Must be set to NULL if @user_fence_rep is NULL.
  674. * @vfb: Pointer to the buffer-object backed framebuffer.
  675. * @user_fence_rep: User-space provided structure for fence information.
  676. * Must be set to non-NULL if @file_priv is non-NULL.
  677. * @vclips: Array of clip rects.
  678. * @num_clips: Number of clip rects in @vclips.
  679. *
  680. * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
  681. * interrupted.
  682. */
  683. int vmw_kms_readback(struct vmw_private *dev_priv,
  684. struct drm_file *file_priv,
  685. struct vmw_framebuffer *vfb,
  686. struct drm_vmw_fence_rep __user *user_fence_rep,
  687. struct drm_vmw_rect *vclips,
  688. uint32_t num_clips)
  689. {
  690. switch (dev_priv->active_display_unit) {
  691. case vmw_du_screen_object:
  692. return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
  693. user_fence_rep, vclips, num_clips,
  694. NULL);
  695. case vmw_du_screen_target:
  696. return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
  697. user_fence_rep, NULL, vclips, num_clips,
  698. 1, false, true, NULL);
  699. default:
  700. WARN_ONCE(true,
  701. "Readback called with invalid display system.\n");
  702. }
  703. return -ENOSYS;
  704. }
  705. static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
  706. .destroy = vmw_framebuffer_surface_destroy,
  707. .dirty = drm_atomic_helper_dirtyfb,
  708. };
  709. static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
  710. struct vmw_surface *surface,
  711. struct vmw_framebuffer **out,
  712. const struct drm_mode_fb_cmd2
  713. *mode_cmd,
  714. bool is_bo_proxy)
  715. {
  716. struct drm_device *dev = dev_priv->dev;
  717. struct vmw_framebuffer_surface *vfbs;
  718. enum SVGA3dSurfaceFormat format;
  719. int ret;
  720. struct drm_format_name_buf format_name;
  721. /* 3D is only supported on HWv8 and newer hosts */
  722. if (dev_priv->active_display_unit == vmw_du_legacy)
  723. return -ENOSYS;
  724. /*
  725. * Sanity checks.
  726. */
  727. /* Surface must be marked as a scanout. */
  728. if (unlikely(!surface->metadata.scanout))
  729. return -EINVAL;
  730. if (unlikely(surface->metadata.mip_levels[0] != 1 ||
  731. surface->metadata.num_sizes != 1 ||
  732. surface->metadata.base_size.width < mode_cmd->width ||
  733. surface->metadata.base_size.height < mode_cmd->height ||
  734. surface->metadata.base_size.depth != 1)) {
  735. DRM_ERROR("Incompatible surface dimensions "
  736. "for requested mode.\n");
  737. return -EINVAL;
  738. }
  739. switch (mode_cmd->pixel_format) {
  740. case DRM_FORMAT_ARGB8888:
  741. format = SVGA3D_A8R8G8B8;
  742. break;
  743. case DRM_FORMAT_XRGB8888:
  744. format = SVGA3D_X8R8G8B8;
  745. break;
  746. case DRM_FORMAT_RGB565:
  747. format = SVGA3D_R5G6B5;
  748. break;
  749. case DRM_FORMAT_XRGB1555:
  750. format = SVGA3D_A1R5G5B5;
  751. break;
  752. default:
  753. DRM_ERROR("Invalid pixel format: %s\n",
  754. drm_get_format_name(mode_cmd->pixel_format, &format_name));
  755. return -EINVAL;
  756. }
  757. /*
  758. * For DX, surface format validation is done when surface->scanout
  759. * is set.
  760. */
  761. if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
  762. DRM_ERROR("Invalid surface format for requested mode.\n");
  763. return -EINVAL;
  764. }
  765. vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
  766. if (!vfbs) {
  767. ret = -ENOMEM;
  768. goto out_err1;
  769. }
  770. drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
  771. vfbs->surface = vmw_surface_reference(surface);
  772. vfbs->base.user_handle = mode_cmd->handles[0];
  773. vfbs->is_bo_proxy = is_bo_proxy;
  774. *out = &vfbs->base;
  775. ret = drm_framebuffer_init(dev, &vfbs->base.base,
  776. &vmw_framebuffer_surface_funcs);
  777. if (ret)
  778. goto out_err2;
  779. return 0;
  780. out_err2:
  781. vmw_surface_unreference(&surface);
  782. kfree(vfbs);
  783. out_err1:
  784. return ret;
  785. }
  786. /*
  787. * Buffer-object framebuffer code
  788. */
  789. static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
  790. {
  791. struct vmw_framebuffer_bo *vfbd =
  792. vmw_framebuffer_to_vfbd(framebuffer);
  793. drm_framebuffer_cleanup(framebuffer);
  794. vmw_bo_unreference(&vfbd->buffer);
  795. if (vfbd->base.user_obj)
  796. ttm_base_object_unref(&vfbd->base.user_obj);
  797. kfree(vfbd);
  798. }
  799. static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
  800. struct drm_file *file_priv,
  801. unsigned int flags, unsigned int color,
  802. struct drm_clip_rect *clips,
  803. unsigned int num_clips)
  804. {
  805. struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
  806. struct vmw_framebuffer_bo *vfbd =
  807. vmw_framebuffer_to_vfbd(framebuffer);
  808. struct drm_clip_rect norect;
  809. int ret, increment = 1;
  810. drm_modeset_lock_all(dev_priv->dev);
  811. ret = ttm_read_lock(&dev_priv->reservation_sem, true);
  812. if (unlikely(ret != 0)) {
  813. drm_modeset_unlock_all(dev_priv->dev);
  814. return ret;
  815. }
  816. if (!num_clips) {
  817. num_clips = 1;
  818. clips = &norect;
  819. norect.x1 = norect.y1 = 0;
  820. norect.x2 = framebuffer->width;
  821. norect.y2 = framebuffer->height;
  822. } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
  823. num_clips /= 2;
  824. increment = 2;
  825. }
  826. switch (dev_priv->active_display_unit) {
  827. case vmw_du_legacy:
  828. ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0,
  829. clips, num_clips, increment);
  830. break;
  831. default:
  832. ret = -EINVAL;
  833. WARN_ONCE(true, "Dirty called with invalid display system.\n");
  834. break;
  835. }
  836. vmw_fifo_flush(dev_priv, false);
  837. ttm_read_unlock(&dev_priv->reservation_sem);
  838. drm_modeset_unlock_all(dev_priv->dev);
  839. return ret;
  840. }
  841. static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer,
  842. struct drm_file *file_priv,
  843. unsigned int flags, unsigned int color,
  844. struct drm_clip_rect *clips,
  845. unsigned int num_clips)
  846. {
  847. struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
  848. if (dev_priv->active_display_unit == vmw_du_legacy)
  849. return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags,
  850. color, clips, num_clips);
  851. return drm_atomic_helper_dirtyfb(framebuffer, file_priv, flags, color,
  852. clips, num_clips);
  853. }
  854. static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
  855. .destroy = vmw_framebuffer_bo_destroy,
  856. .dirty = vmw_framebuffer_bo_dirty_ext,
  857. };
  858. /**
  859. * Pin the bofer in a location suitable for access by the
  860. * display system.
  861. */
  862. static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
  863. {
  864. struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
  865. struct vmw_buffer_object *buf;
  866. struct ttm_placement *placement;
  867. int ret;
  868. buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
  869. vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
  870. if (!buf)
  871. return 0;
  872. switch (dev_priv->active_display_unit) {
  873. case vmw_du_legacy:
  874. vmw_overlay_pause_all(dev_priv);
  875. ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false);
  876. vmw_overlay_resume_all(dev_priv);
  877. break;
  878. case vmw_du_screen_object:
  879. case vmw_du_screen_target:
  880. if (vfb->bo) {
  881. if (dev_priv->capabilities & SVGA_CAP_3D) {
  882. /*
  883. * Use surface DMA to get content to
  884. * sreen target surface.
  885. */
  886. placement = &vmw_vram_gmr_placement;
  887. } else {
  888. /* Use CPU blit. */
  889. placement = &vmw_sys_placement;
  890. }
  891. } else {
  892. /* Use surface / image update */
  893. placement = &vmw_mob_placement;
  894. }
  895. return vmw_bo_pin_in_placement(dev_priv, buf, placement, false);
  896. default:
  897. return -EINVAL;
  898. }
  899. return ret;
  900. }
  901. static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
  902. {
  903. struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
  904. struct vmw_buffer_object *buf;
  905. buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
  906. vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
  907. if (WARN_ON(!buf))
  908. return 0;
  909. return vmw_bo_unpin(dev_priv, buf, false);
  910. }
  911. /**
  912. * vmw_create_bo_proxy - create a proxy surface for the buffer object
  913. *
  914. * @dev: DRM device
  915. * @mode_cmd: parameters for the new surface
  916. * @bo_mob: MOB backing the buffer object
  917. * @srf_out: newly created surface
  918. *
  919. * When the content FB is a buffer object, we create a surface as a proxy to the
  920. * same buffer. This way we can do a surface copy rather than a surface DMA.
  921. * This is a more efficient approach
  922. *
  923. * RETURNS:
  924. * 0 on success, error code otherwise
  925. */
  926. static int vmw_create_bo_proxy(struct drm_device *dev,
  927. const struct drm_mode_fb_cmd2 *mode_cmd,
  928. struct vmw_buffer_object *bo_mob,
  929. struct vmw_surface **srf_out)
  930. {
  931. struct vmw_surface_metadata metadata = {0};
  932. uint32_t format;
  933. struct vmw_resource *res;
  934. unsigned int bytes_pp;
  935. struct drm_format_name_buf format_name;
  936. int ret;
  937. switch (mode_cmd->pixel_format) {
  938. case DRM_FORMAT_ARGB8888:
  939. case DRM_FORMAT_XRGB8888:
  940. format = SVGA3D_X8R8G8B8;
  941. bytes_pp = 4;
  942. break;
  943. case DRM_FORMAT_RGB565:
  944. case DRM_FORMAT_XRGB1555:
  945. format = SVGA3D_R5G6B5;
  946. bytes_pp = 2;
  947. break;
  948. case 8:
  949. format = SVGA3D_P8;
  950. bytes_pp = 1;
  951. break;
  952. default:
  953. DRM_ERROR("Invalid framebuffer format %s\n",
  954. drm_get_format_name(mode_cmd->pixel_format, &format_name));
  955. return -EINVAL;
  956. }
  957. metadata.format = format;
  958. metadata.mip_levels[0] = 1;
  959. metadata.num_sizes = 1;
  960. metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
  961. metadata.base_size.height = mode_cmd->height;
  962. metadata.base_size.depth = 1;
  963. metadata.scanout = true;
  964. ret = vmw_gb_surface_define(vmw_priv(dev), 0, &metadata, srf_out);
  965. if (ret) {
  966. DRM_ERROR("Failed to allocate proxy content buffer\n");
  967. return ret;
  968. }
  969. res = &(*srf_out)->res;
  970. /* Reserve and switch the backing mob. */
  971. mutex_lock(&res->dev_priv->cmdbuf_mutex);
  972. (void) vmw_resource_reserve(res, false, true);
  973. vmw_bo_unreference(&res->backup);
  974. res->backup = vmw_bo_reference(bo_mob);
  975. res->backup_offset = 0;
  976. vmw_resource_unreserve(res, false, false, false, NULL, 0);
  977. mutex_unlock(&res->dev_priv->cmdbuf_mutex);
  978. return 0;
  979. }
  980. static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
  981. struct vmw_buffer_object *bo,
  982. struct vmw_framebuffer **out,
  983. const struct drm_mode_fb_cmd2
  984. *mode_cmd)
  985. {
  986. struct drm_device *dev = dev_priv->dev;
  987. struct vmw_framebuffer_bo *vfbd;
  988. unsigned int requested_size;
  989. struct drm_format_name_buf format_name;
  990. int ret;
  991. requested_size = mode_cmd->height * mode_cmd->pitches[0];
  992. if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) {
  993. DRM_ERROR("Screen buffer object size is too small "
  994. "for requested mode.\n");
  995. return -EINVAL;
  996. }
  997. /* Limited framebuffer color depth support for screen objects */
  998. if (dev_priv->active_display_unit == vmw_du_screen_object) {
  999. switch (mode_cmd->pixel_format) {
  1000. case DRM_FORMAT_XRGB8888:
  1001. case DRM_FORMAT_ARGB8888:
  1002. break;
  1003. case DRM_FORMAT_XRGB1555:
  1004. case DRM_FORMAT_RGB565:
  1005. break;
  1006. default:
  1007. DRM_ERROR("Invalid pixel format: %s\n",
  1008. drm_get_format_name(mode_cmd->pixel_format, &format_name));
  1009. return -EINVAL;
  1010. }
  1011. }
  1012. vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
  1013. if (!vfbd) {
  1014. ret = -ENOMEM;
  1015. goto out_err1;
  1016. }
  1017. drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
  1018. vfbd->base.bo = true;
  1019. vfbd->buffer = vmw_bo_reference(bo);
  1020. vfbd->base.user_handle = mode_cmd->handles[0];
  1021. *out = &vfbd->base;
  1022. ret = drm_framebuffer_init(dev, &vfbd->base.base,
  1023. &vmw_framebuffer_bo_funcs);
  1024. if (ret)
  1025. goto out_err2;
  1026. return 0;
  1027. out_err2:
  1028. vmw_bo_unreference(&bo);
  1029. kfree(vfbd);
  1030. out_err1:
  1031. return ret;
  1032. }
  1033. /**
  1034. * vmw_kms_srf_ok - check if a surface can be created
  1035. *
  1036. * @width: requested width
  1037. * @height: requested height
  1038. *
  1039. * Surfaces need to be less than texture size
  1040. */
  1041. static bool
  1042. vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
  1043. {
  1044. if (width > dev_priv->texture_max_width ||
  1045. height > dev_priv->texture_max_height)
  1046. return false;
  1047. return true;
  1048. }
  1049. /**
  1050. * vmw_kms_new_framebuffer - Create a new framebuffer.
  1051. *
  1052. * @dev_priv: Pointer to device private struct.
  1053. * @bo: Pointer to buffer object to wrap the kms framebuffer around.
  1054. * Either @bo or @surface must be NULL.
  1055. * @surface: Pointer to a surface to wrap the kms framebuffer around.
  1056. * Either @bo or @surface must be NULL.
  1057. * @only_2d: No presents will occur to this buffer object based framebuffer.
  1058. * This helps the code to do some important optimizations.
  1059. * @mode_cmd: Frame-buffer metadata.
  1060. */
  1061. struct vmw_framebuffer *
  1062. vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
  1063. struct vmw_buffer_object *bo,
  1064. struct vmw_surface *surface,
  1065. bool only_2d,
  1066. const struct drm_mode_fb_cmd2 *mode_cmd)
  1067. {
  1068. struct vmw_framebuffer *vfb = NULL;
  1069. bool is_bo_proxy = false;
  1070. int ret;
  1071. /*
  1072. * We cannot use the SurfaceDMA command in an non-accelerated VM,
  1073. * therefore, wrap the buffer object in a surface so we can use the
  1074. * SurfaceCopy command.
  1075. */
  1076. if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) &&
  1077. bo && only_2d &&
  1078. mode_cmd->width > 64 && /* Don't create a proxy for cursor */
  1079. dev_priv->active_display_unit == vmw_du_screen_target) {
  1080. ret = vmw_create_bo_proxy(dev_priv->dev, mode_cmd,
  1081. bo, &surface);
  1082. if (ret)
  1083. return ERR_PTR(ret);
  1084. is_bo_proxy = true;
  1085. }
  1086. /* Create the new framebuffer depending one what we have */
  1087. if (surface) {
  1088. ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
  1089. mode_cmd,
  1090. is_bo_proxy);
  1091. /*
  1092. * vmw_create_bo_proxy() adds a reference that is no longer
  1093. * needed
  1094. */
  1095. if (is_bo_proxy)
  1096. vmw_surface_unreference(&surface);
  1097. } else if (bo) {
  1098. ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
  1099. mode_cmd);
  1100. } else {
  1101. BUG();
  1102. }
  1103. if (ret)
  1104. return ERR_PTR(ret);
  1105. vfb->pin = vmw_framebuffer_pin;
  1106. vfb->unpin = vmw_framebuffer_unpin;
  1107. return vfb;
  1108. }
  1109. /*
  1110. * Generic Kernel modesetting functions
  1111. */
  1112. static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
  1113. struct drm_file *file_priv,
  1114. const struct drm_mode_fb_cmd2 *mode_cmd)
  1115. {
  1116. struct vmw_private *dev_priv = vmw_priv(dev);
  1117. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  1118. struct vmw_framebuffer *vfb = NULL;
  1119. struct vmw_surface *surface = NULL;
  1120. struct vmw_buffer_object *bo = NULL;
  1121. struct ttm_base_object *user_obj;
  1122. int ret;
  1123. /*
  1124. * Take a reference on the user object of the resource
  1125. * backing the kms fb. This ensures that user-space handle
  1126. * lookups on that resource will always work as long as
  1127. * it's registered with a kms framebuffer. This is important,
  1128. * since vmw_execbuf_process identifies resources in the
  1129. * command stream using user-space handles.
  1130. */
  1131. user_obj = ttm_base_object_lookup(tfile, mode_cmd->handles[0]);
  1132. if (unlikely(user_obj == NULL)) {
  1133. DRM_ERROR("Could not locate requested kms frame buffer.\n");
  1134. return ERR_PTR(-ENOENT);
  1135. }
  1136. /**
  1137. * End conditioned code.
  1138. */
  1139. /* returns either a bo or surface */
  1140. ret = vmw_user_lookup_handle(dev_priv, tfile,
  1141. mode_cmd->handles[0],
  1142. &surface, &bo);
  1143. if (ret)
  1144. goto err_out;
  1145. if (!bo &&
  1146. !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
  1147. DRM_ERROR("Surface size cannot exceed %dx%d",
  1148. dev_priv->texture_max_width,
  1149. dev_priv->texture_max_height);
  1150. goto err_out;
  1151. }
  1152. vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
  1153. !(dev_priv->capabilities & SVGA_CAP_3D),
  1154. mode_cmd);
  1155. if (IS_ERR(vfb)) {
  1156. ret = PTR_ERR(vfb);
  1157. goto err_out;
  1158. }
  1159. err_out:
  1160. /* vmw_user_lookup_handle takes one ref so does new_fb */
  1161. if (bo)
  1162. vmw_bo_unreference(&bo);
  1163. if (surface)
  1164. vmw_surface_unreference(&surface);
  1165. if (ret) {
  1166. DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
  1167. ttm_base_object_unref(&user_obj);
  1168. return ERR_PTR(ret);
  1169. } else
  1170. vfb->user_obj = user_obj;
  1171. return &vfb->base;
  1172. }
  1173. /**
  1174. * vmw_kms_check_display_memory - Validates display memory required for a
  1175. * topology
  1176. * @dev: DRM device
  1177. * @num_rects: number of drm_rect in rects
  1178. * @rects: array of drm_rect representing the topology to validate indexed by
  1179. * crtc index.
  1180. *
  1181. * Returns:
  1182. * 0 on success otherwise negative error code
  1183. */
  1184. static int vmw_kms_check_display_memory(struct drm_device *dev,
  1185. uint32_t num_rects,
  1186. struct drm_rect *rects)
  1187. {
  1188. struct vmw_private *dev_priv = vmw_priv(dev);
  1189. struct drm_rect bounding_box = {0};
  1190. u64 total_pixels = 0, pixel_mem, bb_mem;
  1191. int i;
  1192. for (i = 0; i < num_rects; i++) {
  1193. /*
  1194. * For STDU only individual screen (screen target) is limited by
  1195. * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
  1196. */
  1197. if (dev_priv->active_display_unit == vmw_du_screen_target &&
  1198. (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
  1199. drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
  1200. VMW_DEBUG_KMS("Screen size not supported.\n");
  1201. return -EINVAL;
  1202. }
  1203. /* Bounding box upper left is at (0,0). */
  1204. if (rects[i].x2 > bounding_box.x2)
  1205. bounding_box.x2 = rects[i].x2;
  1206. if (rects[i].y2 > bounding_box.y2)
  1207. bounding_box.y2 = rects[i].y2;
  1208. total_pixels += (u64) drm_rect_width(&rects[i]) *
  1209. (u64) drm_rect_height(&rects[i]);
  1210. }
  1211. /* Virtual svga device primary limits are always in 32-bpp. */
  1212. pixel_mem = total_pixels * 4;
  1213. /*
  1214. * For HV10 and below prim_bb_mem is vram size. When
  1215. * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
  1216. * limit on primary bounding box
  1217. */
  1218. if (pixel_mem > dev_priv->prim_bb_mem) {
  1219. VMW_DEBUG_KMS("Combined output size too large.\n");
  1220. return -EINVAL;
  1221. }
  1222. /* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
  1223. if (dev_priv->active_display_unit != vmw_du_screen_target ||
  1224. !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
  1225. bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
  1226. if (bb_mem > dev_priv->prim_bb_mem) {
  1227. VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
  1228. return -EINVAL;
  1229. }
  1230. }
  1231. return 0;
  1232. }
  1233. /**
  1234. * vmw_crtc_state_and_lock - Return new or current crtc state with locked
  1235. * crtc mutex
  1236. * @state: The atomic state pointer containing the new atomic state
  1237. * @crtc: The crtc
  1238. *
  1239. * This function returns the new crtc state if it's part of the state update.
  1240. * Otherwise returns the current crtc state. It also makes sure that the
  1241. * crtc mutex is locked.
  1242. *
  1243. * Returns: A valid crtc state pointer or NULL. It may also return a
  1244. * pointer error, in particular -EDEADLK if locking needs to be rerun.
  1245. */
  1246. static struct drm_crtc_state *
  1247. vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
  1248. {
  1249. struct drm_crtc_state *crtc_state;
  1250. crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
  1251. if (crtc_state) {
  1252. lockdep_assert_held(&crtc->mutex.mutex.base);
  1253. } else {
  1254. int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
  1255. if (ret != 0 && ret != -EALREADY)
  1256. return ERR_PTR(ret);
  1257. crtc_state = crtc->state;
  1258. }
  1259. return crtc_state;
  1260. }
  1261. /**
  1262. * vmw_kms_check_implicit - Verify that all implicit display units scan out
  1263. * from the same fb after the new state is committed.
  1264. * @dev: The drm_device.
  1265. * @state: The new state to be checked.
  1266. *
  1267. * Returns:
  1268. * Zero on success,
  1269. * -EINVAL on invalid state,
  1270. * -EDEADLK if modeset locking needs to be rerun.
  1271. */
  1272. static int vmw_kms_check_implicit(struct drm_device *dev,
  1273. struct drm_atomic_state *state)
  1274. {
  1275. struct drm_framebuffer *implicit_fb = NULL;
  1276. struct drm_crtc *crtc;
  1277. struct drm_crtc_state *crtc_state;
  1278. struct drm_plane_state *plane_state;
  1279. drm_for_each_crtc(crtc, dev) {
  1280. struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
  1281. if (!du->is_implicit)
  1282. continue;
  1283. crtc_state = vmw_crtc_state_and_lock(state, crtc);
  1284. if (IS_ERR(crtc_state))
  1285. return PTR_ERR(crtc_state);
  1286. if (!crtc_state || !crtc_state->enable)
  1287. continue;
  1288. /*
  1289. * Can't move primary planes across crtcs, so this is OK.
  1290. * It also means we don't need to take the plane mutex.
  1291. */
  1292. plane_state = du->primary.state;
  1293. if (plane_state->crtc != crtc)
  1294. continue;
  1295. if (!implicit_fb)
  1296. implicit_fb = plane_state->fb;
  1297. else if (implicit_fb != plane_state->fb)
  1298. return -EINVAL;
  1299. }
  1300. return 0;
  1301. }
  1302. /**
  1303. * vmw_kms_check_topology - Validates topology in drm_atomic_state
  1304. * @dev: DRM device
  1305. * @state: the driver state object
  1306. *
  1307. * Returns:
  1308. * 0 on success otherwise negative error code
  1309. */
  1310. static int vmw_kms_check_topology(struct drm_device *dev,
  1311. struct drm_atomic_state *state)
  1312. {
  1313. struct drm_crtc_state *old_crtc_state, *new_crtc_state;
  1314. struct drm_rect *rects;
  1315. struct drm_crtc *crtc;
  1316. uint32_t i;
  1317. int ret = 0;
  1318. rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
  1319. GFP_KERNEL);
  1320. if (!rects)
  1321. return -ENOMEM;
  1322. drm_for_each_crtc(crtc, dev) {
  1323. struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
  1324. struct drm_crtc_state *crtc_state;
  1325. i = drm_crtc_index(crtc);
  1326. crtc_state = vmw_crtc_state_and_lock(state, crtc);
  1327. if (IS_ERR(crtc_state)) {
  1328. ret = PTR_ERR(crtc_state);
  1329. goto clean;
  1330. }
  1331. if (!crtc_state)
  1332. continue;
  1333. if (crtc_state->enable) {
  1334. rects[i].x1 = du->gui_x;
  1335. rects[i].y1 = du->gui_y;
  1336. rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
  1337. rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
  1338. } else {
  1339. rects[i].x1 = 0;
  1340. rects[i].y1 = 0;
  1341. rects[i].x2 = 0;
  1342. rects[i].y2 = 0;
  1343. }
  1344. }
  1345. /* Determine change to topology due to new atomic state */
  1346. for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
  1347. new_crtc_state, i) {
  1348. struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
  1349. struct drm_connector *connector;
  1350. struct drm_connector_state *conn_state;
  1351. struct vmw_connector_state *vmw_conn_state;
  1352. if (!du->pref_active && new_crtc_state->enable) {
  1353. VMW_DEBUG_KMS("Enabling a disabled display unit\n");
  1354. ret = -EINVAL;
  1355. goto clean;
  1356. }
  1357. /*
  1358. * For vmwgfx each crtc has only one connector attached and it
  1359. * is not changed so don't really need to check the
  1360. * crtc->connector_mask and iterate over it.
  1361. */
  1362. connector = &du->connector;
  1363. conn_state = drm_atomic_get_connector_state(state, connector);
  1364. if (IS_ERR(conn_state)) {
  1365. ret = PTR_ERR(conn_state);
  1366. goto clean;
  1367. }
  1368. vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
  1369. vmw_conn_state->gui_x = du->gui_x;
  1370. vmw_conn_state->gui_y = du->gui_y;
  1371. }
  1372. ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
  1373. rects);
  1374. clean:
  1375. kfree(rects);
  1376. return ret;
  1377. }
  1378. /**
  1379. * vmw_kms_atomic_check_modeset- validate state object for modeset changes
  1380. *
  1381. * @dev: DRM device
  1382. * @state: the driver state object
  1383. *
  1384. * This is a simple wrapper around drm_atomic_helper_check_modeset() for
  1385. * us to assign a value to mode->crtc_clock so that
  1386. * drm_calc_timestamping_constants() won't throw an error message
  1387. *
  1388. * Returns:
  1389. * Zero for success or -errno
  1390. */
  1391. static int
  1392. vmw_kms_atomic_check_modeset(struct drm_device *dev,
  1393. struct drm_atomic_state *state)
  1394. {
  1395. struct drm_crtc *crtc;
  1396. struct drm_crtc_state *crtc_state;
  1397. bool need_modeset = false;
  1398. int i, ret;
  1399. ret = drm_atomic_helper_check(dev, state);
  1400. if (ret)
  1401. return ret;
  1402. ret = vmw_kms_check_implicit(dev, state);
  1403. if (ret) {
  1404. VMW_DEBUG_KMS("Invalid implicit state\n");
  1405. return ret;
  1406. }
  1407. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  1408. if (drm_atomic_crtc_needs_modeset(crtc_state))
  1409. need_modeset = true;
  1410. }
  1411. if (need_modeset)
  1412. return vmw_kms_check_topology(dev, state);
  1413. return ret;
  1414. }
  1415. static const struct drm_mode_config_funcs vmw_kms_funcs = {
  1416. .fb_create = vmw_kms_fb_create,
  1417. .atomic_check = vmw_kms_atomic_check_modeset,
  1418. .atomic_commit = drm_atomic_helper_commit,
  1419. };
  1420. static int vmw_kms_generic_present(struct vmw_private *dev_priv,
  1421. struct drm_file *file_priv,
  1422. struct vmw_framebuffer *vfb,
  1423. struct vmw_surface *surface,
  1424. uint32_t sid,
  1425. int32_t destX, int32_t destY,
  1426. struct drm_vmw_rect *clips,
  1427. uint32_t num_clips)
  1428. {
  1429. return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
  1430. &surface->res, destX, destY,
  1431. num_clips, 1, NULL, NULL);
  1432. }
  1433. int vmw_kms_present(struct vmw_private *dev_priv,
  1434. struct drm_file *file_priv,
  1435. struct vmw_framebuffer *vfb,
  1436. struct vmw_surface *surface,
  1437. uint32_t sid,
  1438. int32_t destX, int32_t destY,
  1439. struct drm_vmw_rect *clips,
  1440. uint32_t num_clips)
  1441. {
  1442. int ret;
  1443. switch (dev_priv->active_display_unit) {
  1444. case vmw_du_screen_target:
  1445. ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
  1446. &surface->res, destX, destY,
  1447. num_clips, 1, NULL, NULL);
  1448. break;
  1449. case vmw_du_screen_object:
  1450. ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
  1451. sid, destX, destY, clips,
  1452. num_clips);
  1453. break;
  1454. default:
  1455. WARN_ONCE(true,
  1456. "Present called with invalid display system.\n");
  1457. ret = -ENOSYS;
  1458. break;
  1459. }
  1460. if (ret)
  1461. return ret;
  1462. vmw_fifo_flush(dev_priv, false);
  1463. return 0;
  1464. }
  1465. static void
  1466. vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
  1467. {
  1468. if (dev_priv->hotplug_mode_update_property)
  1469. return;
  1470. dev_priv->hotplug_mode_update_property =
  1471. drm_property_create_range(dev_priv->dev,
  1472. DRM_MODE_PROP_IMMUTABLE,
  1473. "hotplug_mode_update", 0, 1);
  1474. if (!dev_priv->hotplug_mode_update_property)
  1475. return;
  1476. }
  1477. int vmw_kms_init(struct vmw_private *dev_priv)
  1478. {
  1479. struct drm_device *dev = dev_priv->dev;
  1480. int ret;
  1481. drm_mode_config_init(dev);
  1482. dev->mode_config.funcs = &vmw_kms_funcs;
  1483. dev->mode_config.min_width = 1;
  1484. dev->mode_config.min_height = 1;
  1485. dev->mode_config.max_width = dev_priv->texture_max_width;
  1486. dev->mode_config.max_height = dev_priv->texture_max_height;
  1487. drm_mode_create_suggested_offset_properties(dev);
  1488. vmw_kms_create_hotplug_mode_update_property(dev_priv);
  1489. ret = vmw_kms_stdu_init_display(dev_priv);
  1490. if (ret) {
  1491. ret = vmw_kms_sou_init_display(dev_priv);
  1492. if (ret) /* Fallback */
  1493. ret = vmw_kms_ldu_init_display(dev_priv);
  1494. }
  1495. return ret;
  1496. }
  1497. int vmw_kms_close(struct vmw_private *dev_priv)
  1498. {
  1499. int ret = 0;
  1500. /*
  1501. * Docs says we should take the lock before calling this function
  1502. * but since it destroys encoders and our destructor calls
  1503. * drm_encoder_cleanup which takes the lock we deadlock.
  1504. */
  1505. drm_mode_config_cleanup(dev_priv->dev);
  1506. if (dev_priv->active_display_unit == vmw_du_legacy)
  1507. ret = vmw_kms_ldu_close_display(dev_priv);
  1508. return ret;
  1509. }
  1510. int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
  1511. struct drm_file *file_priv)
  1512. {
  1513. struct drm_vmw_cursor_bypass_arg *arg = data;
  1514. struct vmw_display_unit *du;
  1515. struct drm_crtc *crtc;
  1516. int ret = 0;
  1517. mutex_lock(&dev->mode_config.mutex);
  1518. if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
  1519. list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
  1520. du = vmw_crtc_to_du(crtc);
  1521. du->hotspot_x = arg->xhot;
  1522. du->hotspot_y = arg->yhot;
  1523. }
  1524. mutex_unlock(&dev->mode_config.mutex);
  1525. return 0;
  1526. }
  1527. crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
  1528. if (!crtc) {
  1529. ret = -ENOENT;
  1530. goto out;
  1531. }
  1532. du = vmw_crtc_to_du(crtc);
  1533. du->hotspot_x = arg->xhot;
  1534. du->hotspot_y = arg->yhot;
  1535. out:
  1536. mutex_unlock(&dev->mode_config.mutex);
  1537. return ret;
  1538. }
  1539. int vmw_kms_write_svga(struct vmw_private *vmw_priv,
  1540. unsigned width, unsigned height, unsigned pitch,
  1541. unsigned bpp, unsigned depth)
  1542. {
  1543. if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
  1544. vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
  1545. else if (vmw_fifo_have_pitchlock(vmw_priv))
  1546. vmw_mmio_write(pitch, vmw_priv->mmio_virt +
  1547. SVGA_FIFO_PITCHLOCK);
  1548. vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
  1549. vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
  1550. vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
  1551. if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
  1552. DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
  1553. depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
  1554. return -EINVAL;
  1555. }
  1556. return 0;
  1557. }
  1558. bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
  1559. uint32_t pitch,
  1560. uint32_t height)
  1561. {
  1562. return ((u64) pitch * (u64) height) < (u64)
  1563. ((dev_priv->active_display_unit == vmw_du_screen_target) ?
  1564. dev_priv->prim_bb_mem : dev_priv->vram_size);
  1565. }
  1566. /**
  1567. * Function called by DRM code called with vbl_lock held.
  1568. */
  1569. u32 vmw_get_vblank_counter(struct drm_crtc *crtc)
  1570. {
  1571. return 0;
  1572. }
  1573. /**
  1574. * Function called by DRM code called with vbl_lock held.
  1575. */
  1576. int vmw_enable_vblank(struct drm_crtc *crtc)
  1577. {
  1578. return -EINVAL;
  1579. }
  1580. /**
  1581. * Function called by DRM code called with vbl_lock held.
  1582. */
  1583. void vmw_disable_vblank(struct drm_crtc *crtc)
  1584. {
  1585. }
  1586. /**
  1587. * vmw_du_update_layout - Update the display unit with topology from resolution
  1588. * plugin and generate DRM uevent
  1589. * @dev_priv: device private
  1590. * @num_rects: number of drm_rect in rects
  1591. * @rects: toplogy to update
  1592. */
  1593. static int vmw_du_update_layout(struct vmw_private *dev_priv,
  1594. unsigned int num_rects, struct drm_rect *rects)
  1595. {
  1596. struct drm_device *dev = dev_priv->dev;
  1597. struct vmw_display_unit *du;
  1598. struct drm_connector *con;
  1599. struct drm_connector_list_iter conn_iter;
  1600. struct drm_modeset_acquire_ctx ctx;
  1601. struct drm_crtc *crtc;
  1602. int ret;
  1603. /* Currently gui_x/y is protected with the crtc mutex */
  1604. mutex_lock(&dev->mode_config.mutex);
  1605. drm_modeset_acquire_init(&ctx, 0);
  1606. retry:
  1607. drm_for_each_crtc(crtc, dev) {
  1608. ret = drm_modeset_lock(&crtc->mutex, &ctx);
  1609. if (ret < 0) {
  1610. if (ret == -EDEADLK) {
  1611. drm_modeset_backoff(&ctx);
  1612. goto retry;
  1613. }
  1614. goto out_fini;
  1615. }
  1616. }
  1617. drm_connector_list_iter_begin(dev, &conn_iter);
  1618. drm_for_each_connector_iter(con, &conn_iter) {
  1619. du = vmw_connector_to_du(con);
  1620. if (num_rects > du->unit) {
  1621. du->pref_width = drm_rect_width(&rects[du->unit]);
  1622. du->pref_height = drm_rect_height(&rects[du->unit]);
  1623. du->pref_active = true;
  1624. du->gui_x = rects[du->unit].x1;
  1625. du->gui_y = rects[du->unit].y1;
  1626. } else {
  1627. du->pref_width = 800;
  1628. du->pref_height = 600;
  1629. du->pref_active = false;
  1630. du->gui_x = 0;
  1631. du->gui_y = 0;
  1632. }
  1633. }
  1634. drm_connector_list_iter_end(&conn_iter);
  1635. list_for_each_entry(con, &dev->mode_config.connector_list, head) {
  1636. du = vmw_connector_to_du(con);
  1637. if (num_rects > du->unit) {
  1638. drm_object_property_set_value
  1639. (&con->base, dev->mode_config.suggested_x_property,
  1640. du->gui_x);
  1641. drm_object_property_set_value
  1642. (&con->base, dev->mode_config.suggested_y_property,
  1643. du->gui_y);
  1644. } else {
  1645. drm_object_property_set_value
  1646. (&con->base, dev->mode_config.suggested_x_property,
  1647. 0);
  1648. drm_object_property_set_value
  1649. (&con->base, dev->mode_config.suggested_y_property,
  1650. 0);
  1651. }
  1652. con->status = vmw_du_connector_detect(con, true);
  1653. }
  1654. drm_sysfs_hotplug_event(dev);
  1655. out_fini:
  1656. drm_modeset_drop_locks(&ctx);
  1657. drm_modeset_acquire_fini(&ctx);
  1658. mutex_unlock(&dev->mode_config.mutex);
  1659. return 0;
  1660. }
  1661. int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
  1662. u16 *r, u16 *g, u16 *b,
  1663. uint32_t size,
  1664. struct drm_modeset_acquire_ctx *ctx)
  1665. {
  1666. struct vmw_private *dev_priv = vmw_priv(crtc->dev);
  1667. int i;
  1668. for (i = 0; i < size; i++) {
  1669. DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
  1670. r[i], g[i], b[i]);
  1671. vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
  1672. vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
  1673. vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
  1674. }
  1675. return 0;
  1676. }
  1677. int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
  1678. {
  1679. return 0;
  1680. }
  1681. enum drm_connector_status
  1682. vmw_du_connector_detect(struct drm_connector *connector, bool force)
  1683. {
  1684. uint32_t num_displays;
  1685. struct drm_device *dev = connector->dev;
  1686. struct vmw_private *dev_priv = vmw_priv(dev);
  1687. struct vmw_display_unit *du = vmw_connector_to_du(connector);
  1688. num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
  1689. return ((vmw_connector_to_du(connector)->unit < num_displays &&
  1690. du->pref_active) ?
  1691. connector_status_connected : connector_status_disconnected);
  1692. }
  1693. static struct drm_display_mode vmw_kms_connector_builtin[] = {
  1694. /* 640x480@60Hz */
  1695. { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
  1696. 752, 800, 0, 480, 489, 492, 525, 0,
  1697. DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
  1698. /* 800x600@60Hz */
  1699. { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
  1700. 968, 1056, 0, 600, 601, 605, 628, 0,
  1701. DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
  1702. /* 1024x768@60Hz */
  1703. { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
  1704. 1184, 1344, 0, 768, 771, 777, 806, 0,
  1705. DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
  1706. /* 1152x864@75Hz */
  1707. { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
  1708. 1344, 1600, 0, 864, 865, 868, 900, 0,
  1709. DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
  1710. /* 1280x768@60Hz */
  1711. { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
  1712. 1472, 1664, 0, 768, 771, 778, 798, 0,
  1713. DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
  1714. /* 1280x800@60Hz */
  1715. { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
  1716. 1480, 1680, 0, 800, 803, 809, 831, 0,
  1717. DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
  1718. /* 1280x960@60Hz */
  1719. { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
  1720. 1488, 1800, 0, 960, 961, 964, 1000, 0,
  1721. DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
  1722. /* 1280x1024@60Hz */
  1723. { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
  1724. 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
  1725. DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
  1726. /* 1360x768@60Hz */
  1727. { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
  1728. 1536, 1792, 0, 768, 771, 777, 795, 0,
  1729. DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
  1730. /* 1440x1050@60Hz */
  1731. { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
  1732. 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
  1733. DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
  1734. /* 1440x900@60Hz */
  1735. { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
  1736. 1672, 1904, 0, 900, 903, 909, 934, 0,
  1737. DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
  1738. /* 1600x1200@60Hz */
  1739. { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
  1740. 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
  1741. DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
  1742. /* 1680x1050@60Hz */
  1743. { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
  1744. 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
  1745. DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
  1746. /* 1792x1344@60Hz */
  1747. { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
  1748. 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
  1749. DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
  1750. /* 1853x1392@60Hz */
  1751. { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
  1752. 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
  1753. DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
  1754. /* 1920x1200@60Hz */
  1755. { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
  1756. 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
  1757. DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
  1758. /* 1920x1440@60Hz */
  1759. { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
  1760. 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
  1761. DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
  1762. /* 2560x1600@60Hz */
  1763. { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
  1764. 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
  1765. DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
  1766. /* Terminate */
  1767. { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
  1768. };
  1769. /**
  1770. * vmw_guess_mode_timing - Provide fake timings for a
  1771. * 60Hz vrefresh mode.
  1772. *
  1773. * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
  1774. * members filled in.
  1775. */
  1776. void vmw_guess_mode_timing(struct drm_display_mode *mode)
  1777. {
  1778. mode->hsync_start = mode->hdisplay + 50;
  1779. mode->hsync_end = mode->hsync_start + 50;
  1780. mode->htotal = mode->hsync_end + 50;
  1781. mode->vsync_start = mode->vdisplay + 50;
  1782. mode->vsync_end = mode->vsync_start + 50;
  1783. mode->vtotal = mode->vsync_end + 50;
  1784. mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
  1785. }
  1786. int vmw_du_connector_fill_modes(struct drm_connector *connector,
  1787. uint32_t max_width, uint32_t max_height)
  1788. {
  1789. struct vmw_display_unit *du = vmw_connector_to_du(connector);
  1790. struct drm_device *dev = connector->dev;
  1791. struct vmw_private *dev_priv = vmw_priv(dev);
  1792. struct drm_display_mode *mode = NULL;
  1793. struct drm_display_mode *bmode;
  1794. struct drm_display_mode prefmode = { DRM_MODE("preferred",
  1795. DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
  1796. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  1797. DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
  1798. };
  1799. int i;
  1800. u32 assumed_bpp = 4;
  1801. if (dev_priv->assume_16bpp)
  1802. assumed_bpp = 2;
  1803. max_width = min(max_width, dev_priv->texture_max_width);
  1804. max_height = min(max_height, dev_priv->texture_max_height);
  1805. /*
  1806. * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
  1807. * HEIGHT registers.
  1808. */
  1809. if (dev_priv->active_display_unit == vmw_du_screen_target) {
  1810. max_width = min(max_width, dev_priv->stdu_max_width);
  1811. max_height = min(max_height, dev_priv->stdu_max_height);
  1812. }
  1813. /* Add preferred mode */
  1814. mode = drm_mode_duplicate(dev, &prefmode);
  1815. if (!mode)
  1816. return 0;
  1817. mode->hdisplay = du->pref_width;
  1818. mode->vdisplay = du->pref_height;
  1819. vmw_guess_mode_timing(mode);
  1820. if (vmw_kms_validate_mode_vram(dev_priv,
  1821. mode->hdisplay * assumed_bpp,
  1822. mode->vdisplay)) {
  1823. drm_mode_probed_add(connector, mode);
  1824. } else {
  1825. drm_mode_destroy(dev, mode);
  1826. mode = NULL;
  1827. }
  1828. if (du->pref_mode) {
  1829. list_del_init(&du->pref_mode->head);
  1830. drm_mode_destroy(dev, du->pref_mode);
  1831. }
  1832. /* mode might be null here, this is intended */
  1833. du->pref_mode = mode;
  1834. for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
  1835. bmode = &vmw_kms_connector_builtin[i];
  1836. if (bmode->hdisplay > max_width ||
  1837. bmode->vdisplay > max_height)
  1838. continue;
  1839. if (!vmw_kms_validate_mode_vram(dev_priv,
  1840. bmode->hdisplay * assumed_bpp,
  1841. bmode->vdisplay))
  1842. continue;
  1843. mode = drm_mode_duplicate(dev, bmode);
  1844. if (!mode)
  1845. return 0;
  1846. drm_mode_probed_add(connector, mode);
  1847. }
  1848. drm_connector_list_update(connector);
  1849. /* Move the prefered mode first, help apps pick the right mode. */
  1850. drm_mode_sort(&connector->modes);
  1851. return 1;
  1852. }
  1853. /**
  1854. * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
  1855. * @dev: drm device for the ioctl
  1856. * @data: data pointer for the ioctl
  1857. * @file_priv: drm file for the ioctl call
  1858. *
  1859. * Update preferred topology of display unit as per ioctl request. The topology
  1860. * is expressed as array of drm_vmw_rect.
  1861. * e.g.
  1862. * [0 0 640 480] [640 0 800 600] [0 480 640 480]
  1863. *
  1864. * NOTE:
  1865. * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
  1866. * device limit on topology, x + w and y + h (lower right) cannot be greater
  1867. * than INT_MAX. So topology beyond these limits will return with error.
  1868. *
  1869. * Returns:
  1870. * Zero on success, negative errno on failure.
  1871. */
  1872. int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
  1873. struct drm_file *file_priv)
  1874. {
  1875. struct vmw_private *dev_priv = vmw_priv(dev);
  1876. struct drm_mode_config *mode_config = &dev->mode_config;
  1877. struct drm_vmw_update_layout_arg *arg =
  1878. (struct drm_vmw_update_layout_arg *)data;
  1879. void __user *user_rects;
  1880. struct drm_vmw_rect *rects;
  1881. struct drm_rect *drm_rects;
  1882. unsigned rects_size;
  1883. int ret, i;
  1884. if (!arg->num_outputs) {
  1885. struct drm_rect def_rect = {0, 0, 800, 600};
  1886. VMW_DEBUG_KMS("Default layout x1 = %d y1 = %d x2 = %d y2 = %d\n",
  1887. def_rect.x1, def_rect.y1,
  1888. def_rect.x2, def_rect.y2);
  1889. vmw_du_update_layout(dev_priv, 1, &def_rect);
  1890. return 0;
  1891. }
  1892. rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
  1893. rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
  1894. GFP_KERNEL);
  1895. if (unlikely(!rects))
  1896. return -ENOMEM;
  1897. user_rects = (void __user *)(unsigned long)arg->rects;
  1898. ret = copy_from_user(rects, user_rects, rects_size);
  1899. if (unlikely(ret != 0)) {
  1900. DRM_ERROR("Failed to get rects.\n");
  1901. ret = -EFAULT;
  1902. goto out_free;
  1903. }
  1904. drm_rects = (struct drm_rect *)rects;
  1905. VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
  1906. for (i = 0; i < arg->num_outputs; i++) {
  1907. struct drm_vmw_rect curr_rect;
  1908. /* Verify user-space for overflow as kernel use drm_rect */
  1909. if ((rects[i].x + rects[i].w > INT_MAX) ||
  1910. (rects[i].y + rects[i].h > INT_MAX)) {
  1911. ret = -ERANGE;
  1912. goto out_free;
  1913. }
  1914. curr_rect = rects[i];
  1915. drm_rects[i].x1 = curr_rect.x;
  1916. drm_rects[i].y1 = curr_rect.y;
  1917. drm_rects[i].x2 = curr_rect.x + curr_rect.w;
  1918. drm_rects[i].y2 = curr_rect.y + curr_rect.h;
  1919. VMW_DEBUG_KMS(" x1 = %d y1 = %d x2 = %d y2 = %d\n",
  1920. drm_rects[i].x1, drm_rects[i].y1,
  1921. drm_rects[i].x2, drm_rects[i].y2);
  1922. /*
  1923. * Currently this check is limiting the topology within
  1924. * mode_config->max (which actually is max texture size
  1925. * supported by virtual device). This limit is here to address
  1926. * window managers that create a big framebuffer for whole
  1927. * topology.
  1928. */
  1929. if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 ||
  1930. drm_rects[i].x2 > mode_config->max_width ||
  1931. drm_rects[i].y2 > mode_config->max_height) {
  1932. VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
  1933. drm_rects[i].x1, drm_rects[i].y1,
  1934. drm_rects[i].x2, drm_rects[i].y2);
  1935. ret = -EINVAL;
  1936. goto out_free;
  1937. }
  1938. }
  1939. ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
  1940. if (ret == 0)
  1941. vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
  1942. out_free:
  1943. kfree(rects);
  1944. return ret;
  1945. }
  1946. /**
  1947. * vmw_kms_helper_dirty - Helper to build commands and perform actions based
  1948. * on a set of cliprects and a set of display units.
  1949. *
  1950. * @dev_priv: Pointer to a device private structure.
  1951. * @framebuffer: Pointer to the framebuffer on which to perform the actions.
  1952. * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
  1953. * Cliprects are given in framebuffer coordinates.
  1954. * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
  1955. * be NULL. Cliprects are given in source coordinates.
  1956. * @dest_x: X coordinate offset for the crtc / destination clip rects.
  1957. * @dest_y: Y coordinate offset for the crtc / destination clip rects.
  1958. * @num_clips: Number of cliprects in the @clips or @vclips array.
  1959. * @increment: Integer with which to increment the clip counter when looping.
  1960. * Used to skip a predetermined number of clip rects.
  1961. * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
  1962. */
  1963. int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
  1964. struct vmw_framebuffer *framebuffer,
  1965. const struct drm_clip_rect *clips,
  1966. const struct drm_vmw_rect *vclips,
  1967. s32 dest_x, s32 dest_y,
  1968. int num_clips,
  1969. int increment,
  1970. struct vmw_kms_dirty *dirty)
  1971. {
  1972. struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
  1973. struct drm_crtc *crtc;
  1974. u32 num_units = 0;
  1975. u32 i, k;
  1976. dirty->dev_priv = dev_priv;
  1977. /* If crtc is passed, no need to iterate over other display units */
  1978. if (dirty->crtc) {
  1979. units[num_units++] = vmw_crtc_to_du(dirty->crtc);
  1980. } else {
  1981. list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
  1982. head) {
  1983. struct drm_plane *plane = crtc->primary;
  1984. if (plane->state->fb == &framebuffer->base)
  1985. units[num_units++] = vmw_crtc_to_du(crtc);
  1986. }
  1987. }
  1988. for (k = 0; k < num_units; k++) {
  1989. struct vmw_display_unit *unit = units[k];
  1990. s32 crtc_x = unit->crtc.x;
  1991. s32 crtc_y = unit->crtc.y;
  1992. s32 crtc_width = unit->crtc.mode.hdisplay;
  1993. s32 crtc_height = unit->crtc.mode.vdisplay;
  1994. const struct drm_clip_rect *clips_ptr = clips;
  1995. const struct drm_vmw_rect *vclips_ptr = vclips;
  1996. dirty->unit = unit;
  1997. if (dirty->fifo_reserve_size > 0) {
  1998. dirty->cmd = VMW_FIFO_RESERVE(dev_priv,
  1999. dirty->fifo_reserve_size);
  2000. if (!dirty->cmd)
  2001. return -ENOMEM;
  2002. memset(dirty->cmd, 0, dirty->fifo_reserve_size);
  2003. }
  2004. dirty->num_hits = 0;
  2005. for (i = 0; i < num_clips; i++, clips_ptr += increment,
  2006. vclips_ptr += increment) {
  2007. s32 clip_left;
  2008. s32 clip_top;
  2009. /*
  2010. * Select clip array type. Note that integer type
  2011. * in @clips is unsigned short, whereas in @vclips
  2012. * it's 32-bit.
  2013. */
  2014. if (clips) {
  2015. dirty->fb_x = (s32) clips_ptr->x1;
  2016. dirty->fb_y = (s32) clips_ptr->y1;
  2017. dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
  2018. crtc_x;
  2019. dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
  2020. crtc_y;
  2021. } else {
  2022. dirty->fb_x = vclips_ptr->x;
  2023. dirty->fb_y = vclips_ptr->y;
  2024. dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
  2025. dest_x - crtc_x;
  2026. dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
  2027. dest_y - crtc_y;
  2028. }
  2029. dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
  2030. dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
  2031. /* Skip this clip if it's outside the crtc region */
  2032. if (dirty->unit_x1 >= crtc_width ||
  2033. dirty->unit_y1 >= crtc_height ||
  2034. dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
  2035. continue;
  2036. /* Clip right and bottom to crtc limits */
  2037. dirty->unit_x2 = min_t(s32, dirty->unit_x2,
  2038. crtc_width);
  2039. dirty->unit_y2 = min_t(s32, dirty->unit_y2,
  2040. crtc_height);
  2041. /* Clip left and top to crtc limits */
  2042. clip_left = min_t(s32, dirty->unit_x1, 0);
  2043. clip_top = min_t(s32, dirty->unit_y1, 0);
  2044. dirty->unit_x1 -= clip_left;
  2045. dirty->unit_y1 -= clip_top;
  2046. dirty->fb_x -= clip_left;
  2047. dirty->fb_y -= clip_top;
  2048. dirty->clip(dirty);
  2049. }
  2050. dirty->fifo_commit(dirty);
  2051. }
  2052. return 0;
  2053. }
  2054. /**
  2055. * vmw_kms_helper_validation_finish - Helper for post KMS command submission
  2056. * cleanup and fencing
  2057. * @dev_priv: Pointer to the device-private struct
  2058. * @file_priv: Pointer identifying the client when user-space fencing is used
  2059. * @ctx: Pointer to the validation context
  2060. * @out_fence: If non-NULL, returned refcounted fence-pointer
  2061. * @user_fence_rep: If non-NULL, pointer to user-space address area
  2062. * in which to copy user-space fence info
  2063. */
  2064. void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
  2065. struct drm_file *file_priv,
  2066. struct vmw_validation_context *ctx,
  2067. struct vmw_fence_obj **out_fence,
  2068. struct drm_vmw_fence_rep __user *
  2069. user_fence_rep)
  2070. {
  2071. struct vmw_fence_obj *fence = NULL;
  2072. uint32_t handle = 0;
  2073. int ret = 0;
  2074. if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
  2075. out_fence)
  2076. ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
  2077. file_priv ? &handle : NULL);
  2078. vmw_validation_done(ctx, fence);
  2079. if (file_priv)
  2080. vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
  2081. ret, user_fence_rep, fence,
  2082. handle, -1);
  2083. if (out_fence)
  2084. *out_fence = fence;
  2085. else
  2086. vmw_fence_obj_unreference(&fence);
  2087. }
  2088. /**
  2089. * vmw_kms_update_proxy - Helper function to update a proxy surface from
  2090. * its backing MOB.
  2091. *
  2092. * @res: Pointer to the surface resource
  2093. * @clips: Clip rects in framebuffer (surface) space.
  2094. * @num_clips: Number of clips in @clips.
  2095. * @increment: Integer with which to increment the clip counter when looping.
  2096. * Used to skip a predetermined number of clip rects.
  2097. *
  2098. * This function makes sure the proxy surface is updated from its backing MOB
  2099. * using the region given by @clips. The surface resource @res and its backing
  2100. * MOB needs to be reserved and validated on call.
  2101. */
  2102. int vmw_kms_update_proxy(struct vmw_resource *res,
  2103. const struct drm_clip_rect *clips,
  2104. unsigned num_clips,
  2105. int increment)
  2106. {
  2107. struct vmw_private *dev_priv = res->dev_priv;
  2108. struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
  2109. struct {
  2110. SVGA3dCmdHeader header;
  2111. SVGA3dCmdUpdateGBImage body;
  2112. } *cmd;
  2113. SVGA3dBox *box;
  2114. size_t copy_size = 0;
  2115. int i;
  2116. if (!clips)
  2117. return 0;
  2118. cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
  2119. if (!cmd)
  2120. return -ENOMEM;
  2121. for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
  2122. box = &cmd->body.box;
  2123. cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
  2124. cmd->header.size = sizeof(cmd->body);
  2125. cmd->body.image.sid = res->id;
  2126. cmd->body.image.face = 0;
  2127. cmd->body.image.mipmap = 0;
  2128. if (clips->x1 > size->width || clips->x2 > size->width ||
  2129. clips->y1 > size->height || clips->y2 > size->height) {
  2130. DRM_ERROR("Invalid clips outsize of framebuffer.\n");
  2131. return -EINVAL;
  2132. }
  2133. box->x = clips->x1;
  2134. box->y = clips->y1;
  2135. box->z = 0;
  2136. box->w = clips->x2 - clips->x1;
  2137. box->h = clips->y2 - clips->y1;
  2138. box->d = 1;
  2139. copy_size += sizeof(*cmd);
  2140. }
  2141. vmw_fifo_commit(dev_priv, copy_size);
  2142. return 0;
  2143. }
  2144. int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
  2145. unsigned unit,
  2146. u32 max_width,
  2147. u32 max_height,
  2148. struct drm_connector **p_con,
  2149. struct drm_crtc **p_crtc,
  2150. struct drm_display_mode **p_mode)
  2151. {
  2152. struct drm_connector *con;
  2153. struct vmw_display_unit *du;
  2154. struct drm_display_mode *mode;
  2155. int i = 0;
  2156. int ret = 0;
  2157. mutex_lock(&dev_priv->dev->mode_config.mutex);
  2158. list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
  2159. head) {
  2160. if (i == unit)
  2161. break;
  2162. ++i;
  2163. }
  2164. if (&con->head == &dev_priv->dev->mode_config.connector_list) {
  2165. DRM_ERROR("Could not find initial display unit.\n");
  2166. ret = -EINVAL;
  2167. goto out_unlock;
  2168. }
  2169. if (list_empty(&con->modes))
  2170. (void) vmw_du_connector_fill_modes(con, max_width, max_height);
  2171. if (list_empty(&con->modes)) {
  2172. DRM_ERROR("Could not find initial display mode.\n");
  2173. ret = -EINVAL;
  2174. goto out_unlock;
  2175. }
  2176. du = vmw_connector_to_du(con);
  2177. *p_con = con;
  2178. *p_crtc = &du->crtc;
  2179. list_for_each_entry(mode, &con->modes, head) {
  2180. if (mode->type & DRM_MODE_TYPE_PREFERRED)
  2181. break;
  2182. }
  2183. if (&mode->head == &con->modes) {
  2184. WARN_ONCE(true, "Could not find initial preferred mode.\n");
  2185. *p_mode = list_first_entry(&con->modes,
  2186. struct drm_display_mode,
  2187. head);
  2188. } else {
  2189. *p_mode = mode;
  2190. }
  2191. out_unlock:
  2192. mutex_unlock(&dev_priv->dev->mode_config.mutex);
  2193. return ret;
  2194. }
  2195. /**
  2196. * vmw_kms_create_implicit_placement_proparty - Set up the implicit placement
  2197. * property.
  2198. *
  2199. * @dev_priv: Pointer to a device private struct.
  2200. *
  2201. * Sets up the implicit placement property unless it's already set up.
  2202. */
  2203. void
  2204. vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
  2205. {
  2206. if (dev_priv->implicit_placement_property)
  2207. return;
  2208. dev_priv->implicit_placement_property =
  2209. drm_property_create_range(dev_priv->dev,
  2210. DRM_MODE_PROP_IMMUTABLE,
  2211. "implicit_placement", 0, 1);
  2212. }
  2213. /**
  2214. * vmw_kms_suspend - Save modesetting state and turn modesetting off.
  2215. *
  2216. * @dev: Pointer to the drm device
  2217. * Return: 0 on success. Negative error code on failure.
  2218. */
  2219. int vmw_kms_suspend(struct drm_device *dev)
  2220. {
  2221. struct vmw_private *dev_priv = vmw_priv(dev);
  2222. dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
  2223. if (IS_ERR(dev_priv->suspend_state)) {
  2224. int ret = PTR_ERR(dev_priv->suspend_state);
  2225. DRM_ERROR("Failed kms suspend: %d\n", ret);
  2226. dev_priv->suspend_state = NULL;
  2227. return ret;
  2228. }
  2229. return 0;
  2230. }
  2231. /**
  2232. * vmw_kms_resume - Re-enable modesetting and restore state
  2233. *
  2234. * @dev: Pointer to the drm device
  2235. * Return: 0 on success. Negative error code on failure.
  2236. *
  2237. * State is resumed from a previous vmw_kms_suspend(). It's illegal
  2238. * to call this function without a previous vmw_kms_suspend().
  2239. */
  2240. int vmw_kms_resume(struct drm_device *dev)
  2241. {
  2242. struct vmw_private *dev_priv = vmw_priv(dev);
  2243. int ret;
  2244. if (WARN_ON(!dev_priv->suspend_state))
  2245. return 0;
  2246. ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
  2247. dev_priv->suspend_state = NULL;
  2248. return ret;
  2249. }
  2250. /**
  2251. * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
  2252. *
  2253. * @dev: Pointer to the drm device
  2254. */
  2255. void vmw_kms_lost_device(struct drm_device *dev)
  2256. {
  2257. drm_atomic_helper_shutdown(dev);
  2258. }
  2259. /**
  2260. * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
  2261. * @update: The closure structure.
  2262. *
  2263. * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
  2264. * update on display unit.
  2265. *
  2266. * Return: 0 on success or a negative error code on failure.
  2267. */
  2268. int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
  2269. {
  2270. struct drm_plane_state *state = update->plane->state;
  2271. struct drm_plane_state *old_state = update->old_state;
  2272. struct drm_atomic_helper_damage_iter iter;
  2273. struct drm_rect clip;
  2274. struct drm_rect bb;
  2275. DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
  2276. uint32_t reserved_size = 0;
  2277. uint32_t submit_size = 0;
  2278. uint32_t curr_size = 0;
  2279. uint32_t num_hits = 0;
  2280. void *cmd_start;
  2281. char *cmd_next;
  2282. int ret;
  2283. /*
  2284. * Iterate in advance to check if really need plane update and find the
  2285. * number of clips that actually are in plane src for fifo allocation.
  2286. */
  2287. drm_atomic_helper_damage_iter_init(&iter, old_state, state);
  2288. drm_atomic_for_each_plane_damage(&iter, &clip)
  2289. num_hits++;
  2290. if (num_hits == 0)
  2291. return 0;
  2292. if (update->vfb->bo) {
  2293. struct vmw_framebuffer_bo *vfbbo =
  2294. container_of(update->vfb, typeof(*vfbbo), base);
  2295. ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer, false,
  2296. update->cpu_blit);
  2297. } else {
  2298. struct vmw_framebuffer_surface *vfbs =
  2299. container_of(update->vfb, typeof(*vfbs), base);
  2300. ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
  2301. 0, VMW_RES_DIRTY_NONE, NULL,
  2302. NULL);
  2303. }
  2304. if (ret)
  2305. return ret;
  2306. ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
  2307. if (ret)
  2308. goto out_unref;
  2309. reserved_size = update->calc_fifo_size(update, num_hits);
  2310. cmd_start = VMW_FIFO_RESERVE(update->dev_priv, reserved_size);
  2311. if (!cmd_start) {
  2312. ret = -ENOMEM;
  2313. goto out_revert;
  2314. }
  2315. cmd_next = cmd_start;
  2316. if (update->post_prepare) {
  2317. curr_size = update->post_prepare(update, cmd_next);
  2318. cmd_next += curr_size;
  2319. submit_size += curr_size;
  2320. }
  2321. if (update->pre_clip) {
  2322. curr_size = update->pre_clip(update, cmd_next, num_hits);
  2323. cmd_next += curr_size;
  2324. submit_size += curr_size;
  2325. }
  2326. bb.x1 = INT_MAX;
  2327. bb.y1 = INT_MAX;
  2328. bb.x2 = INT_MIN;
  2329. bb.y2 = INT_MIN;
  2330. drm_atomic_helper_damage_iter_init(&iter, old_state, state);
  2331. drm_atomic_for_each_plane_damage(&iter, &clip) {
  2332. uint32_t fb_x = clip.x1;
  2333. uint32_t fb_y = clip.y1;
  2334. vmw_du_translate_to_crtc(state, &clip);
  2335. if (update->clip) {
  2336. curr_size = update->clip(update, cmd_next, &clip, fb_x,
  2337. fb_y);
  2338. cmd_next += curr_size;
  2339. submit_size += curr_size;
  2340. }
  2341. bb.x1 = min_t(int, bb.x1, clip.x1);
  2342. bb.y1 = min_t(int, bb.y1, clip.y1);
  2343. bb.x2 = max_t(int, bb.x2, clip.x2);
  2344. bb.y2 = max_t(int, bb.y2, clip.y2);
  2345. }
  2346. curr_size = update->post_clip(update, cmd_next, &bb);
  2347. submit_size += curr_size;
  2348. if (reserved_size < submit_size)
  2349. submit_size = 0;
  2350. vmw_fifo_commit(update->dev_priv, submit_size);
  2351. vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
  2352. update->out_fence, NULL);
  2353. return ret;
  2354. out_revert:
  2355. vmw_validation_revert(&val_ctx);
  2356. out_unref:
  2357. vmw_validation_unref_lists(&val_ctx);
  2358. return ret;
  2359. }