GrVkGpu.cpp 108 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721
  1. /*
  2. * Copyright 2015 Google Inc.
  3. *
  4. * Use of this source code is governed by a BSD-style license that can be
  5. * found in the LICENSE file.
  6. */
  7. #include "src/gpu/vk/GrVkGpu.h"
  8. #include "include/gpu/GrBackendSemaphore.h"
  9. #include "include/gpu/GrBackendSurface.h"
  10. #include "include/gpu/GrContextOptions.h"
  11. #include "include/private/SkTo.h"
  12. #include "src/core/SkConvertPixels.h"
  13. #include "src/core/SkMipMap.h"
  14. #include "src/gpu/GrContextPriv.h"
  15. #include "src/gpu/GrDataUtils.h"
  16. #include "src/gpu/GrGeometryProcessor.h"
  17. #include "src/gpu/GrGpuResourceCacheAccess.h"
  18. #include "src/gpu/GrMesh.h"
  19. #include "src/gpu/GrPipeline.h"
  20. #include "src/gpu/GrRenderTargetContext.h"
  21. #include "src/gpu/GrRenderTargetPriv.h"
  22. #include "src/gpu/GrTexturePriv.h"
  23. #include "src/gpu/SkGpuDevice.h"
  24. #include "src/gpu/SkGr.h"
  25. #include "src/gpu/vk/GrVkAMDMemoryAllocator.h"
  26. #include "src/gpu/vk/GrVkCommandBuffer.h"
  27. #include "src/gpu/vk/GrVkCommandPool.h"
  28. #include "src/gpu/vk/GrVkGpuCommandBuffer.h"
  29. #include "src/gpu/vk/GrVkImage.h"
  30. #include "src/gpu/vk/GrVkIndexBuffer.h"
  31. #include "src/gpu/vk/GrVkInterface.h"
  32. #include "src/gpu/vk/GrVkMemory.h"
  33. #include "src/gpu/vk/GrVkPipeline.h"
  34. #include "src/gpu/vk/GrVkPipelineState.h"
  35. #include "src/gpu/vk/GrVkRenderPass.h"
  36. #include "src/gpu/vk/GrVkResourceProvider.h"
  37. #include "src/gpu/vk/GrVkSemaphore.h"
  38. #include "src/gpu/vk/GrVkTexture.h"
  39. #include "src/gpu/vk/GrVkTextureRenderTarget.h"
  40. #include "src/gpu/vk/GrVkTransferBuffer.h"
  41. #include "src/gpu/vk/GrVkVertexBuffer.h"
  42. #include "src/image/SkImage_Gpu.h"
  43. #include "src/image/SkSurface_Gpu.h"
  44. #include "src/sksl/SkSLCompiler.h"
  45. #include "include/gpu/vk/GrVkExtensions.h"
  46. #include "include/gpu/vk/GrVkTypes.h"
  47. #include <utility>
  48. #if !defined(SK_BUILD_FOR_WIN)
  49. #include <unistd.h>
  50. #endif // !defined(SK_BUILD_FOR_WIN)
  51. #if defined(SK_BUILD_FOR_WIN) && defined(SK_DEBUG)
  52. #include "src/core/SkLeanWindows.h"
  53. #endif
  54. #define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
  55. #define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X)
  56. #define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X)
  57. sk_sp<GrGpu> GrVkGpu::Make(const GrVkBackendContext& backendContext,
  58. const GrContextOptions& options, GrContext* context) {
  59. if (backendContext.fInstance == VK_NULL_HANDLE ||
  60. backendContext.fPhysicalDevice == VK_NULL_HANDLE ||
  61. backendContext.fDevice == VK_NULL_HANDLE ||
  62. backendContext.fQueue == VK_NULL_HANDLE) {
  63. return nullptr;
  64. }
  65. if (!backendContext.fGetProc) {
  66. return nullptr;
  67. }
  68. PFN_vkEnumerateInstanceVersion localEnumerateInstanceVersion =
  69. reinterpret_cast<PFN_vkEnumerateInstanceVersion>(
  70. backendContext.fGetProc("vkEnumerateInstanceVersion",
  71. VK_NULL_HANDLE, VK_NULL_HANDLE));
  72. uint32_t instanceVersion = 0;
  73. if (!localEnumerateInstanceVersion) {
  74. instanceVersion = VK_MAKE_VERSION(1, 0, 0);
  75. } else {
  76. VkResult err = localEnumerateInstanceVersion(&instanceVersion);
  77. if (err) {
  78. SkDebugf("Failed to enumerate instance version. Err: %d\n", err);
  79. return nullptr;
  80. }
  81. }
  82. PFN_vkGetPhysicalDeviceProperties localGetPhysicalDeviceProperties =
  83. reinterpret_cast<PFN_vkGetPhysicalDeviceProperties>(
  84. backendContext.fGetProc("vkGetPhysicalDeviceProperties",
  85. backendContext.fInstance,
  86. VK_NULL_HANDLE));
  87. if (!localGetPhysicalDeviceProperties) {
  88. return nullptr;
  89. }
  90. VkPhysicalDeviceProperties physDeviceProperties;
  91. localGetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &physDeviceProperties);
  92. uint32_t physDevVersion = physDeviceProperties.apiVersion;
  93. uint32_t apiVersion = backendContext.fMaxAPIVersion ? backendContext.fMaxAPIVersion
  94. : instanceVersion;
  95. instanceVersion = SkTMin(instanceVersion, apiVersion);
  96. physDevVersion = SkTMin(physDevVersion, apiVersion);
  97. sk_sp<const GrVkInterface> interface;
  98. if (backendContext.fVkExtensions) {
  99. interface.reset(new GrVkInterface(backendContext.fGetProc,
  100. backendContext.fInstance,
  101. backendContext.fDevice,
  102. instanceVersion,
  103. physDevVersion,
  104. backendContext.fVkExtensions));
  105. if (!interface->validate(instanceVersion, physDevVersion, backendContext.fVkExtensions)) {
  106. return nullptr;
  107. }
  108. } else {
  109. GrVkExtensions extensions;
  110. // The only extension flag that may effect the vulkan backend is the swapchain extension. We
  111. // need to know if this is enabled to know if we can transition to a present layout when
  112. // flushing a surface.
  113. if (backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag) {
  114. const char* swapChainExtName = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
  115. extensions.init(backendContext.fGetProc, backendContext.fInstance,
  116. backendContext.fPhysicalDevice, 0, nullptr, 1, &swapChainExtName);
  117. }
  118. interface.reset(new GrVkInterface(backendContext.fGetProc,
  119. backendContext.fInstance,
  120. backendContext.fDevice,
  121. instanceVersion,
  122. physDevVersion,
  123. &extensions));
  124. if (!interface->validate(instanceVersion, physDevVersion, &extensions)) {
  125. return nullptr;
  126. }
  127. }
  128. sk_sp<GrVkGpu> vkGpu(new GrVkGpu(context, options, backendContext, interface,
  129. instanceVersion, physDevVersion));
  130. if (backendContext.fProtectedContext == GrProtected::kYes &&
  131. !vkGpu->vkCaps().supportsProtectedMemory()) {
  132. return nullptr;
  133. }
  134. return std::move(vkGpu);
  135. }
  136. ////////////////////////////////////////////////////////////////////////////////
  137. GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options,
  138. const GrVkBackendContext& backendContext, sk_sp<const GrVkInterface> interface,
  139. uint32_t instanceVersion, uint32_t physicalDeviceVersion)
  140. : INHERITED(context)
  141. , fInterface(std::move(interface))
  142. , fMemoryAllocator(backendContext.fMemoryAllocator)
  143. , fInstance(backendContext.fInstance)
  144. , fPhysicalDevice(backendContext.fPhysicalDevice)
  145. , fDevice(backendContext.fDevice)
  146. , fQueue(backendContext.fQueue)
  147. , fQueueIndex(backendContext.fGraphicsQueueIndex)
  148. , fResourceProvider(this)
  149. , fDisconnected(false)
  150. , fProtectedContext(backendContext.fProtectedContext) {
  151. SkASSERT(!backendContext.fOwnsInstanceAndDevice);
  152. if (!fMemoryAllocator) {
  153. // We were not given a memory allocator at creation
  154. fMemoryAllocator.reset(new GrVkAMDMemoryAllocator(backendContext.fPhysicalDevice,
  155. fDevice, fInterface));
  156. }
  157. fCompiler = new SkSL::Compiler();
  158. if (backendContext.fDeviceFeatures2) {
  159. fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendContext.fPhysicalDevice,
  160. *backendContext.fDeviceFeatures2, instanceVersion,
  161. physicalDeviceVersion,
  162. *backendContext.fVkExtensions, fProtectedContext));
  163. } else if (backendContext.fDeviceFeatures) {
  164. VkPhysicalDeviceFeatures2 features2;
  165. features2.pNext = nullptr;
  166. features2.features = *backendContext.fDeviceFeatures;
  167. fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendContext.fPhysicalDevice,
  168. features2, instanceVersion, physicalDeviceVersion,
  169. *backendContext.fVkExtensions, fProtectedContext));
  170. } else {
  171. VkPhysicalDeviceFeatures2 features;
  172. memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
  173. features.pNext = nullptr;
  174. if (backendContext.fFeatures & kGeometryShader_GrVkFeatureFlag) {
  175. features.features.geometryShader = true;
  176. }
  177. if (backendContext.fFeatures & kDualSrcBlend_GrVkFeatureFlag) {
  178. features.features.dualSrcBlend = true;
  179. }
  180. if (backendContext.fFeatures & kSampleRateShading_GrVkFeatureFlag) {
  181. features.features.sampleRateShading = true;
  182. }
  183. GrVkExtensions extensions;
  184. // The only extension flag that may effect the vulkan backend is the swapchain extension. We
  185. // need to know if this is enabled to know if we can transition to a present layout when
  186. // flushing a surface.
  187. if (backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag) {
  188. const char* swapChainExtName = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
  189. extensions.init(backendContext.fGetProc, backendContext.fInstance,
  190. backendContext.fPhysicalDevice, 0, nullptr, 1, &swapChainExtName);
  191. }
  192. fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendContext.fPhysicalDevice,
  193. features, instanceVersion, physicalDeviceVersion, extensions,
  194. fProtectedContext));
  195. }
  196. fCaps.reset(SkRef(fVkCaps.get()));
  197. VK_CALL(GetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &fPhysDevProps));
  198. VK_CALL(GetPhysicalDeviceMemoryProperties(backendContext.fPhysicalDevice, &fPhysDevMemProps));
  199. fResourceProvider.init();
  200. fCmdPool = fResourceProvider.findOrCreateCommandPool();
  201. fCurrentCmdBuffer = fCmdPool->getPrimaryCommandBuffer();
  202. SkASSERT(fCurrentCmdBuffer);
  203. fCurrentCmdBuffer->begin(this);
  204. }
  205. void GrVkGpu::destroyResources() {
  206. if (fCmdPool) {
  207. fCmdPool->getPrimaryCommandBuffer()->end(this);
  208. fCmdPool->close();
  209. }
  210. // wait for all commands to finish
  211. VkResult res = VK_CALL(QueueWaitIdle(fQueue));
  212. // On windows, sometimes calls to QueueWaitIdle return before actually signalling the fences
  213. // on the command buffers even though they have completed. This causes an assert to fire when
  214. // destroying the command buffers. Currently this ony seems to happen on windows, so we add a
  215. // sleep to make sure the fence signals.
  216. #ifdef SK_DEBUG
  217. if (this->vkCaps().mustSleepOnTearDown()) {
  218. #if defined(SK_BUILD_FOR_WIN)
  219. Sleep(10); // In milliseconds
  220. #else
  221. sleep(1); // In seconds
  222. #endif
  223. }
  224. #endif
  225. #ifdef SK_DEBUG
  226. SkASSERT(VK_SUCCESS == res || VK_ERROR_DEVICE_LOST == res);
  227. #endif
  228. if (fCmdPool) {
  229. fCmdPool->unref(this);
  230. fCmdPool = nullptr;
  231. }
  232. for (int i = 0; i < fSemaphoresToWaitOn.count(); ++i) {
  233. fSemaphoresToWaitOn[i]->unref(this);
  234. }
  235. fSemaphoresToWaitOn.reset();
  236. for (int i = 0; i < fSemaphoresToSignal.count(); ++i) {
  237. fSemaphoresToSignal[i]->unref(this);
  238. }
  239. fSemaphoresToSignal.reset();
  240. // must call this just before we destroy the command pool and VkDevice
  241. fResourceProvider.destroyResources(VK_ERROR_DEVICE_LOST == res);
  242. fMemoryAllocator.reset();
  243. fQueue = VK_NULL_HANDLE;
  244. fDevice = VK_NULL_HANDLE;
  245. fInstance = VK_NULL_HANDLE;
  246. }
  247. GrVkGpu::~GrVkGpu() {
  248. if (!fDisconnected) {
  249. this->destroyResources();
  250. }
  251. delete fCompiler;
  252. }
  253. void GrVkGpu::disconnect(DisconnectType type) {
  254. INHERITED::disconnect(type);
  255. if (!fDisconnected) {
  256. if (DisconnectType::kCleanup == type) {
  257. this->destroyResources();
  258. } else {
  259. if (fCmdPool) {
  260. fCmdPool->unrefAndAbandon();
  261. fCmdPool = nullptr;
  262. }
  263. for (int i = 0; i < fSemaphoresToWaitOn.count(); ++i) {
  264. fSemaphoresToWaitOn[i]->unrefAndAbandon();
  265. }
  266. for (int i = 0; i < fSemaphoresToSignal.count(); ++i) {
  267. fSemaphoresToSignal[i]->unrefAndAbandon();
  268. }
  269. // must call this just before we destroy the command pool and VkDevice
  270. fResourceProvider.abandonResources();
  271. fMemoryAllocator.reset();
  272. }
  273. fSemaphoresToWaitOn.reset();
  274. fSemaphoresToSignal.reset();
  275. fCurrentCmdBuffer = nullptr;
  276. fDisconnected = true;
  277. }
  278. }
  279. ///////////////////////////////////////////////////////////////////////////////
  280. GrGpuRTCommandBuffer* GrVkGpu::getCommandBuffer(
  281. GrRenderTarget* rt, GrSurfaceOrigin origin, const SkRect& bounds,
  282. const GrGpuRTCommandBuffer::LoadAndStoreInfo& colorInfo,
  283. const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo& stencilInfo) {
  284. if (!fCachedRTCommandBuffer) {
  285. fCachedRTCommandBuffer.reset(new GrVkGpuRTCommandBuffer(this));
  286. }
  287. fCachedRTCommandBuffer->set(rt, origin, colorInfo, stencilInfo);
  288. return fCachedRTCommandBuffer.get();
  289. }
  290. GrGpuTextureCommandBuffer* GrVkGpu::getCommandBuffer(GrTexture* texture, GrSurfaceOrigin origin) {
  291. if (!fCachedTexCommandBuffer) {
  292. fCachedTexCommandBuffer.reset(new GrVkGpuTextureCommandBuffer(this));
  293. }
  294. fCachedTexCommandBuffer->set(texture, origin);
  295. return fCachedTexCommandBuffer.get();
  296. }
  297. void GrVkGpu::submitCommandBuffer(SyncQueue sync, GrGpuFinishedProc finishedProc,
  298. GrGpuFinishedContext finishedContext) {
  299. TRACE_EVENT0("skia.gpu", TRACE_FUNC);
  300. SkASSERT(fCurrentCmdBuffer);
  301. SkASSERT(!fCachedRTCommandBuffer || !fCachedRTCommandBuffer->isActive());
  302. SkASSERT(!fCachedTexCommandBuffer || !fCachedTexCommandBuffer->isActive());
  303. if (!fCurrentCmdBuffer->hasWork() && kForce_SyncQueue != sync &&
  304. !fSemaphoresToSignal.count() && !fSemaphoresToWaitOn.count()) {
  305. SkASSERT(fDrawables.empty());
  306. fResourceProvider.checkCommandBuffers();
  307. if (finishedProc) {
  308. fResourceProvider.addFinishedProcToActiveCommandBuffers(finishedProc, finishedContext);
  309. }
  310. return;
  311. }
  312. fCurrentCmdBuffer->end(this);
  313. fCmdPool->close();
  314. fCurrentCmdBuffer->submitToQueue(this, fQueue, sync, fSemaphoresToSignal, fSemaphoresToWaitOn);
  315. if (finishedProc) {
  316. // Make sure this is called after closing the current command pool
  317. fResourceProvider.addFinishedProcToActiveCommandBuffers(finishedProc, finishedContext);
  318. }
  319. // We must delete and drawables that have been waitint till submit for us to destroy.
  320. fDrawables.reset();
  321. for (int i = 0; i < fSemaphoresToWaitOn.count(); ++i) {
  322. fSemaphoresToWaitOn[i]->unref(this);
  323. }
  324. fSemaphoresToWaitOn.reset();
  325. for (int i = 0; i < fSemaphoresToSignal.count(); ++i) {
  326. fSemaphoresToSignal[i]->unref(this);
  327. }
  328. fSemaphoresToSignal.reset();
  329. // Release old command pool and create a new one
  330. fCmdPool->unref(this);
  331. fResourceProvider.checkCommandBuffers();
  332. fCmdPool = fResourceProvider.findOrCreateCommandPool();
  333. fCurrentCmdBuffer = fCmdPool->getPrimaryCommandBuffer();
  334. fCurrentCmdBuffer->begin(this);
  335. }
  336. ///////////////////////////////////////////////////////////////////////////////
  337. sk_sp<GrGpuBuffer> GrVkGpu::onCreateBuffer(size_t size, GrGpuBufferType type,
  338. GrAccessPattern accessPattern, const void* data) {
  339. sk_sp<GrGpuBuffer> buff;
  340. switch (type) {
  341. case GrGpuBufferType::kVertex:
  342. SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
  343. kStatic_GrAccessPattern == accessPattern);
  344. buff = GrVkVertexBuffer::Make(this, size, kDynamic_GrAccessPattern == accessPattern);
  345. break;
  346. case GrGpuBufferType::kIndex:
  347. SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
  348. kStatic_GrAccessPattern == accessPattern);
  349. buff = GrVkIndexBuffer::Make(this, size, kDynamic_GrAccessPattern == accessPattern);
  350. break;
  351. case GrGpuBufferType::kXferCpuToGpu:
  352. SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
  353. kStream_GrAccessPattern == accessPattern);
  354. buff = GrVkTransferBuffer::Make(this, size, GrVkBuffer::kCopyRead_Type);
  355. break;
  356. case GrGpuBufferType::kXferGpuToCpu:
  357. SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
  358. kStream_GrAccessPattern == accessPattern);
  359. buff = GrVkTransferBuffer::Make(this, size, GrVkBuffer::kCopyWrite_Type);
  360. break;
  361. default:
  362. SK_ABORT("Unknown buffer type.");
  363. return nullptr;
  364. }
  365. if (data && buff) {
  366. buff->updateData(data, size);
  367. }
  368. return buff;
  369. }
  370. bool GrVkGpu::onWritePixels(GrSurface* surface, int left, int top, int width, int height,
  371. GrColorType srcColorType, const GrMipLevel texels[],
  372. int mipLevelCount) {
  373. GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture());
  374. if (!vkTex) {
  375. return false;
  376. }
  377. // Make sure we have at least the base level
  378. if (!mipLevelCount || !texels[0].fPixels) {
  379. return false;
  380. }
  381. SkASSERT(!GrVkFormatIsCompressed(vkTex->imageFormat()));
  382. bool success = false;
  383. bool linearTiling = vkTex->isLinearTiled();
  384. if (linearTiling) {
  385. if (mipLevelCount > 1) {
  386. SkDebugf("Can't upload mipmap data to linear tiled texture");
  387. return false;
  388. }
  389. if (VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayout()) {
  390. // Need to change the layout to general in order to perform a host write
  391. vkTex->setImageLayout(this,
  392. VK_IMAGE_LAYOUT_GENERAL,
  393. VK_ACCESS_HOST_WRITE_BIT,
  394. VK_PIPELINE_STAGE_HOST_BIT,
  395. false);
  396. this->submitCommandBuffer(kForce_SyncQueue);
  397. }
  398. success = this->uploadTexDataLinear(vkTex, left, top, width, height, srcColorType,
  399. texels[0].fPixels, texels[0].fRowBytes);
  400. } else {
  401. SkASSERT(mipLevelCount <= vkTex->texturePriv().maxMipMapLevel() + 1);
  402. success = this->uploadTexDataOptimal(vkTex, left, top, width, height, srcColorType, texels,
  403. mipLevelCount);
  404. }
  405. return success;
  406. }
  407. bool GrVkGpu::onTransferPixelsTo(GrTexture* texture, int left, int top, int width, int height,
  408. GrColorType bufferColorType, GrGpuBuffer* transferBuffer,
  409. size_t bufferOffset, size_t rowBytes) {
  410. // Vulkan only supports 4-byte aligned offsets
  411. if (SkToBool(bufferOffset & 0x2)) {
  412. return false;
  413. }
  414. GrVkTexture* vkTex = static_cast<GrVkTexture*>(texture);
  415. if (!vkTex) {
  416. return false;
  417. }
  418. // Can't transfer compressed data
  419. SkASSERT(!GrVkFormatIsCompressed(vkTex->imageFormat()));
  420. GrVkTransferBuffer* vkBuffer = static_cast<GrVkTransferBuffer*>(transferBuffer);
  421. if (!vkBuffer) {
  422. return false;
  423. }
  424. SkDEBUGCODE(
  425. SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
  426. SkIRect bounds = SkIRect::MakeWH(texture->width(), texture->height());
  427. SkASSERT(bounds.contains(subRect));
  428. )
  429. int bpp = GrColorTypeBytesPerPixel(bufferColorType);
  430. // Set up copy region
  431. VkBufferImageCopy region;
  432. memset(&region, 0, sizeof(VkBufferImageCopy));
  433. region.bufferOffset = bufferOffset;
  434. region.bufferRowLength = (uint32_t)(rowBytes/bpp);
  435. region.bufferImageHeight = 0;
  436. region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
  437. region.imageOffset = { left, top, 0 };
  438. region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
  439. // Change layout of our target so it can be copied to
  440. vkTex->setImageLayout(this,
  441. VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
  442. VK_ACCESS_TRANSFER_WRITE_BIT,
  443. VK_PIPELINE_STAGE_TRANSFER_BIT,
  444. false);
  445. // Copy the buffer to the image
  446. fCurrentCmdBuffer->copyBufferToImage(this,
  447. vkBuffer,
  448. vkTex,
  449. VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
  450. 1,
  451. &region);
  452. vkTex->texturePriv().markMipMapsDirty();
  453. return true;
  454. }
  455. bool GrVkGpu::onTransferPixelsFrom(GrSurface* surface, int left, int top, int width, int height,
  456. GrColorType bufferColorType, GrGpuBuffer* transferBuffer,
  457. size_t offset) {
  458. SkASSERT(surface);
  459. SkASSERT(transferBuffer);
  460. if (fProtectedContext == GrProtected::kYes) {
  461. return false;
  462. }
  463. GrVkTransferBuffer* vkBuffer = static_cast<GrVkTransferBuffer*>(transferBuffer);
  464. GrVkImage* srcImage;
  465. if (GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget())) {
  466. // Reading from render targets that wrap a secondary command buffer is not allowed since
  467. // it would require us to know the VkImage, which we don't have, as well as need us to
  468. // stop and start the VkRenderPass which we don't have access to.
  469. if (rt->wrapsSecondaryCommandBuffer()) {
  470. return false;
  471. }
  472. // resolve the render target if necessary
  473. switch (rt->getResolveType()) {
  474. case GrVkRenderTarget::kCantResolve_ResolveType:
  475. return false;
  476. case GrVkRenderTarget::kAutoResolves_ResolveType:
  477. break;
  478. case GrVkRenderTarget::kCanResolve_ResolveType:
  479. this->resolveRenderTargetNoFlush(rt);
  480. break;
  481. default:
  482. SK_ABORT("Unknown resolve type");
  483. }
  484. srcImage = rt;
  485. } else {
  486. srcImage = static_cast<GrVkTexture*>(surface->asTexture());
  487. }
  488. // Set up copy region
  489. VkBufferImageCopy region;
  490. memset(&region, 0, sizeof(VkBufferImageCopy));
  491. region.bufferOffset = offset;
  492. region.bufferRowLength = width;
  493. region.bufferImageHeight = 0;
  494. region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
  495. region.imageOffset = { left, top, 0 };
  496. region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
  497. srcImage->setImageLayout(this,
  498. VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
  499. VK_ACCESS_TRANSFER_READ_BIT,
  500. VK_PIPELINE_STAGE_TRANSFER_BIT,
  501. false);
  502. fCurrentCmdBuffer->copyImageToBuffer(this, srcImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
  503. vkBuffer, 1, &region);
  504. // Make sure the copy to buffer has finished.
  505. vkBuffer->addMemoryBarrier(this,
  506. VK_ACCESS_TRANSFER_WRITE_BIT,
  507. VK_ACCESS_HOST_READ_BIT,
  508. VK_PIPELINE_STAGE_TRANSFER_BIT,
  509. VK_PIPELINE_STAGE_HOST_BIT,
  510. false);
  511. return true;
  512. }
  513. void GrVkGpu::resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect,
  514. const SkIPoint& dstPoint) {
  515. SkASSERT(dst);
  516. SkASSERT(src && src->numSamples() > 1 && src->msaaImage());
  517. VkImageResolve resolveInfo;
  518. resolveInfo.srcSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
  519. resolveInfo.srcOffset = {srcRect.fLeft, srcRect.fTop, 0};
  520. resolveInfo.dstSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
  521. resolveInfo.dstOffset = {dstPoint.fX, dstPoint.fY, 0};
  522. resolveInfo.extent = {(uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1};
  523. GrVkImage* dstImage;
  524. GrRenderTarget* dstRT = dst->asRenderTarget();
  525. if (dstRT) {
  526. GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(dstRT);
  527. dstImage = vkRT;
  528. } else {
  529. SkASSERT(dst->asTexture());
  530. dstImage = static_cast<GrVkTexture*>(dst->asTexture());
  531. }
  532. dstImage->setImageLayout(this,
  533. VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
  534. VK_ACCESS_TRANSFER_WRITE_BIT,
  535. VK_PIPELINE_STAGE_TRANSFER_BIT,
  536. false);
  537. src->msaaImage()->setImageLayout(this,
  538. VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
  539. VK_ACCESS_TRANSFER_READ_BIT,
  540. VK_PIPELINE_STAGE_TRANSFER_BIT,
  541. false);
  542. fCurrentCmdBuffer->resolveImage(this, *src->msaaImage(), *dstImage, 1, &resolveInfo);
  543. }
  544. void GrVkGpu::internalResolveRenderTarget(GrRenderTarget* target, bool requiresSubmit) {
  545. if (target->needsResolve()) {
  546. SkASSERT(target->numSamples() > 1);
  547. GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(target);
  548. SkASSERT(rt->msaaImage());
  549. const SkIRect& srcRect = rt->getResolveRect();
  550. this->resolveImage(target, rt, srcRect, SkIPoint::Make(srcRect.fLeft, srcRect.fTop));
  551. rt->flagAsResolved();
  552. if (requiresSubmit) {
  553. this->submitCommandBuffer(kSkip_SyncQueue);
  554. }
  555. }
  556. }
  557. bool GrVkGpu::uploadTexDataLinear(GrVkTexture* tex, int left, int top, int width, int height,
  558. GrColorType dataColorType, const void* data, size_t rowBytes) {
  559. SkASSERT(data);
  560. SkASSERT(tex->isLinearTiled());
  561. SkDEBUGCODE(
  562. SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height);
  563. SkIRect bounds = SkIRect::MakeWH(tex->width(), tex->height());
  564. SkASSERT(bounds.contains(subRect));
  565. )
  566. int bpp = GrColorTypeBytesPerPixel(dataColorType);
  567. size_t trimRowBytes = width * bpp;
  568. SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() ||
  569. VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout());
  570. const VkImageSubresource subres = {
  571. VK_IMAGE_ASPECT_COLOR_BIT,
  572. 0, // mipLevel
  573. 0, // arraySlice
  574. };
  575. VkSubresourceLayout layout;
  576. const GrVkInterface* interface = this->vkInterface();
  577. GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
  578. tex->image(),
  579. &subres,
  580. &layout));
  581. const GrVkAlloc& alloc = tex->alloc();
  582. if (VK_NULL_HANDLE == alloc.fMemory) {
  583. return false;
  584. }
  585. VkDeviceSize offset = top * layout.rowPitch + left * bpp;
  586. VkDeviceSize size = height*layout.rowPitch;
  587. SkASSERT(size + offset <= alloc.fSize);
  588. void* mapPtr = GrVkMemory::MapAlloc(this, alloc);
  589. if (!mapPtr) {
  590. return false;
  591. }
  592. mapPtr = reinterpret_cast<char*>(mapPtr) + offset;
  593. SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), data, rowBytes, trimRowBytes,
  594. height);
  595. GrVkMemory::FlushMappedAlloc(this, alloc, offset, size);
  596. GrVkMemory::UnmapAlloc(this, alloc);
  597. return true;
  598. }
  599. bool GrVkGpu::uploadTexDataOptimal(GrVkTexture* tex, int left, int top, int width, int height,
  600. GrColorType dataColorType, const GrMipLevel texels[],
  601. int mipLevelCount) {
  602. SkASSERT(!tex->isLinearTiled());
  603. // The assumption is either that we have no mipmaps, or that our rect is the entire texture
  604. SkASSERT(1 == mipLevelCount ||
  605. (0 == left && 0 == top && width == tex->width() && height == tex->height()));
  606. // We assume that if the texture has mip levels, we either upload to all the levels or just the
  607. // first.
  608. SkASSERT(1 == mipLevelCount || mipLevelCount == (tex->texturePriv().maxMipMapLevel() + 1));
  609. if (width == 0 || height == 0) {
  610. return false;
  611. }
  612. if (GrPixelConfigToColorType(tex->config()) != dataColorType) {
  613. return false;
  614. }
  615. // For RGB_888x src data we are uploading it first to an RGBA texture and then copying it to the
  616. // dst RGB texture. Thus we do not upload mip levels for that.
  617. if (dataColorType == GrColorType::kRGB_888x && tex->imageFormat() == VK_FORMAT_R8G8B8_UNORM) {
  618. SkASSERT(tex->config() == kRGB_888_GrPixelConfig);
  619. // First check that we'll be able to do the copy to the to the R8G8B8 image in the end via a
  620. // blit or draw.
  621. if (!this->vkCaps().formatCanBeDstofBlit(VK_FORMAT_R8G8B8_UNORM, tex->isLinearTiled()) &&
  622. !this->vkCaps().maxRenderTargetSampleCount(VK_FORMAT_R8G8B8_UNORM)) {
  623. return false;
  624. }
  625. mipLevelCount = 1;
  626. }
  627. SkASSERT(this->vkCaps().isVkFormatTexturable(tex->imageFormat()));
  628. int bpp = GrColorTypeBytesPerPixel(dataColorType);
  629. // texels is const.
  630. // But we may need to adjust the fPixels ptr based on the copyRect, or fRowBytes.
  631. // Because of this we need to make a non-const shallow copy of texels.
  632. SkAutoTMalloc<GrMipLevel> texelsShallowCopy;
  633. texelsShallowCopy.reset(mipLevelCount);
  634. memcpy(texelsShallowCopy.get(), texels, mipLevelCount*sizeof(GrMipLevel));
  635. SkTArray<size_t> individualMipOffsets(mipLevelCount);
  636. individualMipOffsets.push_back(0);
  637. size_t combinedBufferSize = width * bpp * height;
  638. int currentWidth = width;
  639. int currentHeight = height;
  640. if (!texelsShallowCopy[0].fPixels) {
  641. combinedBufferSize = 0;
  642. }
  643. // The alignment must be at least 4 bytes and a multiple of the bytes per pixel of the image
  644. // config. This works with the assumption that the bytes in pixel config is always a power of 2.
  645. SkASSERT((bpp & (bpp - 1)) == 0);
  646. const size_t alignmentMask = 0x3 | (bpp - 1);
  647. for (int currentMipLevel = 1; currentMipLevel < mipLevelCount; currentMipLevel++) {
  648. currentWidth = SkTMax(1, currentWidth/2);
  649. currentHeight = SkTMax(1, currentHeight/2);
  650. if (texelsShallowCopy[currentMipLevel].fPixels) {
  651. const size_t trimmedSize = currentWidth * bpp * currentHeight;
  652. const size_t alignmentDiff = combinedBufferSize & alignmentMask;
  653. if (alignmentDiff != 0) {
  654. combinedBufferSize += alignmentMask - alignmentDiff + 1;
  655. }
  656. individualMipOffsets.push_back(combinedBufferSize);
  657. combinedBufferSize += trimmedSize;
  658. } else {
  659. individualMipOffsets.push_back(0);
  660. }
  661. }
  662. if (0 == combinedBufferSize) {
  663. // We don't actually have any data to upload so just return success
  664. return true;
  665. }
  666. // allocate buffer to hold our mip data
  667. sk_sp<GrVkTransferBuffer> transferBuffer =
  668. GrVkTransferBuffer::Make(this, combinedBufferSize, GrVkBuffer::kCopyRead_Type);
  669. if (!transferBuffer) {
  670. return false;
  671. }
  672. int uploadLeft = left;
  673. int uploadTop = top;
  674. GrVkTexture* uploadTexture = tex;
  675. // For uploading RGB_888x data to an R8G8B8_UNORM texture we must first upload the data to an
  676. // R8G8B8A8_UNORM image and then copy it.
  677. sk_sp<GrVkTexture> copyTexture;
  678. if (dataColorType == GrColorType::kRGB_888x && tex->imageFormat() == VK_FORMAT_R8G8B8_UNORM) {
  679. bool dstHasYcbcr = tex->ycbcrConversionInfo().isValid();
  680. if (!this->vkCaps().canCopyAsBlit(tex->config(), 1, false, dstHasYcbcr,
  681. kRGBA_8888_GrPixelConfig, 1, false, false)) {
  682. return false;
  683. }
  684. GrSurfaceDesc surfDesc;
  685. surfDesc.fWidth = width;
  686. surfDesc.fHeight = height;
  687. surfDesc.fConfig = kRGBA_8888_GrPixelConfig;
  688. VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT |
  689. VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
  690. VK_IMAGE_USAGE_TRANSFER_DST_BIT;
  691. GrVkImage::ImageDesc imageDesc;
  692. imageDesc.fImageType = VK_IMAGE_TYPE_2D;
  693. imageDesc.fFormat = VK_FORMAT_R8G8B8A8_UNORM;
  694. imageDesc.fWidth = width;
  695. imageDesc.fHeight = height;
  696. imageDesc.fLevels = 1;
  697. imageDesc.fSamples = 1;
  698. imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
  699. imageDesc.fUsageFlags = usageFlags;
  700. imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
  701. copyTexture = GrVkTexture::MakeNewTexture(this, SkBudgeted::kYes, surfDesc, imageDesc,
  702. GrMipMapsStatus::kNotAllocated);
  703. if (!copyTexture) {
  704. return false;
  705. }
  706. uploadTexture = copyTexture.get();
  707. uploadLeft = 0;
  708. uploadTop = 0;
  709. }
  710. char* buffer = (char*) transferBuffer->map();
  711. SkTArray<VkBufferImageCopy> regions(mipLevelCount);
  712. currentWidth = width;
  713. currentHeight = height;
  714. int layerHeight = uploadTexture->height();
  715. for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
  716. if (texelsShallowCopy[currentMipLevel].fPixels) {
  717. SkASSERT(1 == mipLevelCount || currentHeight == layerHeight);
  718. const size_t trimRowBytes = currentWidth * bpp;
  719. const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes;
  720. // copy data into the buffer, skipping the trailing bytes
  721. char* dst = buffer + individualMipOffsets[currentMipLevel];
  722. const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels;
  723. SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight);
  724. VkBufferImageCopy& region = regions.push_back();
  725. memset(&region, 0, sizeof(VkBufferImageCopy));
  726. region.bufferOffset = transferBuffer->offset() + individualMipOffsets[currentMipLevel];
  727. region.bufferRowLength = currentWidth;
  728. region.bufferImageHeight = currentHeight;
  729. region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(currentMipLevel), 0, 1 };
  730. region.imageOffset = {uploadLeft, uploadTop, 0};
  731. region.imageExtent = { (uint32_t)currentWidth, (uint32_t)currentHeight, 1 };
  732. }
  733. currentWidth = SkTMax(1, currentWidth/2);
  734. currentHeight = SkTMax(1, currentHeight/2);
  735. layerHeight = currentHeight;
  736. }
  737. // no need to flush non-coherent memory, unmap will do that for us
  738. transferBuffer->unmap();
  739. // Change layout of our target so it can be copied to
  740. uploadTexture->setImageLayout(this,
  741. VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
  742. VK_ACCESS_TRANSFER_WRITE_BIT,
  743. VK_PIPELINE_STAGE_TRANSFER_BIT,
  744. false);
  745. // Copy the buffer to the image
  746. fCurrentCmdBuffer->copyBufferToImage(this,
  747. transferBuffer.get(),
  748. uploadTexture,
  749. VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
  750. regions.count(),
  751. regions.begin());
  752. // If we copied the data into a temporary image first, copy that image into our main texture
  753. // now.
  754. if (copyTexture.get()) {
  755. SkASSERT(dataColorType == GrColorType::kRGB_888x);
  756. SkAssertResult(this->copySurface(tex, copyTexture.get(), SkIRect::MakeWH(width, height),
  757. SkIPoint::Make(left, top), false));
  758. }
  759. if (1 == mipLevelCount) {
  760. tex->texturePriv().markMipMapsDirty();
  761. }
  762. return true;
  763. }
  764. // It's probably possible to roll this into uploadTexDataOptimal,
  765. // but for now it's easier to maintain as a separate entity.
  766. bool GrVkGpu::uploadTexDataCompressed(GrVkTexture* tex, int left, int top, int width, int height,
  767. SkImage::CompressionType compressionType, const void* data) {
  768. SkASSERT(data);
  769. SkASSERT(!tex->isLinearTiled());
  770. // For now the assumption is that our rect is the entire texture.
  771. // Compressed textures are read-only so this should be a reasonable assumption.
  772. SkASSERT(0 == left && 0 == top && width == tex->width() && height == tex->height());
  773. if (width == 0 || height == 0) {
  774. return false;
  775. }
  776. SkImage::CompressionType textureCompressionType;
  777. if (!GrVkFormatToCompressionType(tex->imageFormat(), &textureCompressionType) ||
  778. textureCompressionType != compressionType) {
  779. return false;
  780. }
  781. SkASSERT(this->vkCaps().isVkFormatTexturable(tex->imageFormat()));
  782. size_t dataSize = GrCompressedDataSize(compressionType, width, height);
  783. // allocate buffer to hold our mip data
  784. sk_sp<GrVkTransferBuffer> transferBuffer =
  785. GrVkTransferBuffer::Make(this, dataSize, GrVkBuffer::kCopyRead_Type);
  786. if (!transferBuffer) {
  787. return false;
  788. }
  789. int uploadLeft = left;
  790. int uploadTop = top;
  791. GrVkTexture* uploadTexture = tex;
  792. char* buffer = (char*)transferBuffer->map();
  793. memcpy(buffer, data, dataSize);
  794. VkBufferImageCopy region;
  795. memset(&region, 0, sizeof(VkBufferImageCopy));
  796. region.bufferOffset = transferBuffer->offset();
  797. region.bufferRowLength = width;
  798. region.bufferImageHeight = height;
  799. region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
  800. region.imageOffset = { uploadLeft, uploadTop, 0 };
  801. region.imageExtent = { SkToU32(width), SkToU32(height), 1 };
  802. // no need to flush non-coherent memory, unmap will do that for us
  803. transferBuffer->unmap();
  804. // Change layout of our target so it can be copied to
  805. uploadTexture->setImageLayout(this,
  806. VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
  807. VK_ACCESS_TRANSFER_WRITE_BIT,
  808. VK_PIPELINE_STAGE_TRANSFER_BIT,
  809. false);
  810. // Copy the buffer to the image
  811. fCurrentCmdBuffer->copyBufferToImage(this,
  812. transferBuffer.get(),
  813. uploadTexture,
  814. VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
  815. 1,
  816. &region);
  817. return true;
  818. }
  819. ////////////////////////////////////////////////////////////////////////////////
  820. sk_sp<GrTexture> GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, GrRenderable renderable,
  821. int renderTargetSampleCnt, SkBudgeted budgeted,
  822. GrProtected isProtected, const GrMipLevel texels[],
  823. int mipLevelCount) {
  824. VkFormat pixelFormat;
  825. SkAssertResult(GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat));
  826. VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
  827. if (renderable == GrRenderable::kYes) {
  828. usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
  829. }
  830. // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
  831. // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
  832. // will be using this texture in some copy or not. Also this assumes, as is the current case,
  833. // that all render targets in vulkan are also textures. If we change this practice of setting
  834. // both bits, we must make sure to set the destination bit if we are uploading srcData to the
  835. // texture.
  836. usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
  837. // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
  838. // requested, this ImageDesc describes the resolved texture. Therefore we always have samples set
  839. // to 1.
  840. int mipLevels = !mipLevelCount ? 1 : mipLevelCount;
  841. GrVkImage::ImageDesc imageDesc;
  842. imageDesc.fImageType = VK_IMAGE_TYPE_2D;
  843. imageDesc.fFormat = pixelFormat;
  844. imageDesc.fWidth = desc.fWidth;
  845. imageDesc.fHeight = desc.fHeight;
  846. imageDesc.fLevels = mipLevels;
  847. imageDesc.fSamples = 1;
  848. imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
  849. imageDesc.fUsageFlags = usageFlags;
  850. imageDesc.fIsProtected = isProtected;
  851. GrMipMapsStatus mipMapsStatus = GrMipMapsStatus::kNotAllocated;
  852. if (mipLevels > 1) {
  853. mipMapsStatus = GrMipMapsStatus::kValid;
  854. for (int i = 0; i < mipLevels; ++i) {
  855. if (!texels[i].fPixels) {
  856. mipMapsStatus = GrMipMapsStatus::kDirty;
  857. break;
  858. }
  859. }
  860. }
  861. sk_sp<GrVkTexture> tex;
  862. if (renderable == GrRenderable::kYes) {
  863. tex = GrVkTextureRenderTarget::MakeNewTextureRenderTarget(
  864. this, budgeted, desc, renderTargetSampleCnt, imageDesc, mipMapsStatus);
  865. } else {
  866. tex = GrVkTexture::MakeNewTexture(this, budgeted, desc, imageDesc, mipMapsStatus);
  867. }
  868. if (!tex) {
  869. return nullptr;
  870. }
  871. auto colorType = GrPixelConfigToColorType(desc.fConfig);
  872. if (mipLevelCount) {
  873. if (!this->uploadTexDataOptimal(tex.get(), 0, 0, desc.fWidth, desc.fHeight, colorType,
  874. texels, mipLevelCount)) {
  875. tex->unref();
  876. return nullptr;
  877. }
  878. }
  879. if (this->caps()->shouldInitializeTextures()) {
  880. SkSTArray<1, VkImageSubresourceRange> ranges;
  881. bool inRange = false;
  882. for (uint32_t i = 0; i < tex->mipLevels(); ++i) {
  883. if (i >= static_cast<uint32_t>(mipLevelCount) || !texels[i].fPixels) {
  884. if (inRange) {
  885. ranges.back().levelCount++;
  886. } else {
  887. auto& range = ranges.push_back();
  888. range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
  889. range.baseArrayLayer = 0;
  890. range.baseMipLevel = i;
  891. range.layerCount = 1;
  892. range.levelCount = 1;
  893. inRange = true;
  894. }
  895. } else if (inRange) {
  896. inRange = false;
  897. }
  898. }
  899. if (!ranges.empty()) {
  900. static constexpr VkClearColorValue kZeroClearColor = {};
  901. tex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
  902. VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
  903. false);
  904. this->currentCommandBuffer()->clearColorImage(this, tex.get(), &kZeroClearColor,
  905. ranges.count(), ranges.begin());
  906. }
  907. }
  908. return std::move(tex);
  909. }
  910. sk_sp<GrTexture> GrVkGpu::onCreateCompressedTexture(int width, int height,
  911. SkImage::CompressionType compressionType,
  912. SkBudgeted budgeted, const void* data) {
  913. GrBackendFormat format = this->caps()->getBackendFormatFromCompressionType(compressionType);
  914. if (!format.getVkFormat()) {
  915. return nullptr;
  916. }
  917. VkFormat pixelFormat = *format.getVkFormat();
  918. VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
  919. // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
  920. // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
  921. // will be using this texture in some copy or not. Also this assumes, as is the current case,
  922. // that all render targets in vulkan are also textures. If we change this practice of setting
  923. // both bits, we must make sure to set the destination bit if we are uploading srcData to the
  924. // texture.
  925. usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
  926. // Compressed textures with MIP levels or multiple samples are not supported as of now.
  927. GrVkImage::ImageDesc imageDesc;
  928. imageDesc.fImageType = VK_IMAGE_TYPE_2D;
  929. imageDesc.fFormat = pixelFormat;
  930. imageDesc.fWidth = width;
  931. imageDesc.fHeight = height;
  932. imageDesc.fLevels = 1;
  933. imageDesc.fSamples = 1;
  934. imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
  935. imageDesc.fUsageFlags = usageFlags;
  936. imageDesc.fIsProtected = GrProtected::kNo;
  937. GrSurfaceDesc desc;
  938. desc.fConfig = GrCompressionTypePixelConfig(compressionType);
  939. desc.fWidth = width;
  940. desc.fHeight = height;
  941. auto tex = GrVkTexture::MakeNewTexture(this, budgeted, desc, imageDesc,
  942. GrMipMapsStatus::kNotAllocated);
  943. if (!tex) {
  944. return nullptr;
  945. }
  946. if (!this->uploadTexDataCompressed(tex.get(), 0, 0, desc.fWidth, desc.fHeight, compressionType,
  947. data)) {
  948. return nullptr;
  949. }
  950. return std::move(tex);
  951. }
  952. ////////////////////////////////////////////////////////////////////////////////
  953. void GrVkGpu::copyBuffer(GrVkBuffer* srcBuffer, GrVkBuffer* dstBuffer, VkDeviceSize srcOffset,
  954. VkDeviceSize dstOffset, VkDeviceSize size) {
  955. VkBufferCopy copyRegion;
  956. copyRegion.srcOffset = srcOffset;
  957. copyRegion.dstOffset = dstOffset;
  958. copyRegion.size = size;
  959. fCurrentCmdBuffer->copyBuffer(this, srcBuffer, dstBuffer, 1, &copyRegion);
  960. }
  961. bool GrVkGpu::updateBuffer(GrVkBuffer* buffer, const void* src,
  962. VkDeviceSize offset, VkDeviceSize size) {
  963. // Update the buffer
  964. fCurrentCmdBuffer->updateBuffer(this, buffer, offset, size, src);
  965. return true;
  966. }
  967. ////////////////////////////////////////////////////////////////////////////////
  968. static bool check_image_info(const GrVkCaps& caps,
  969. const GrVkImageInfo& info,
  970. GrColorType colorType,
  971. bool needsAllocation) {
  972. if (VK_NULL_HANDLE == info.fImage) {
  973. return false;
  974. }
  975. if (VK_NULL_HANDLE == info.fAlloc.fMemory && needsAllocation) {
  976. return false;
  977. }
  978. if (info.fYcbcrConversionInfo.isValid()) {
  979. if (!caps.supportsYcbcrConversion() || info.fFormat != VK_NULL_HANDLE) {
  980. return false;
  981. }
  982. }
  983. if (info.fImageLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR && !caps.supportsSwapchain()) {
  984. return false;
  985. }
  986. SkASSERT(GrVkFormatColorTypePairIsValid(info.fFormat, colorType));
  987. return true;
  988. }
  989. static bool check_tex_image_info(const GrVkCaps& caps, const GrVkImageInfo& info) {
  990. if (info.fImageTiling == VK_IMAGE_TILING_OPTIMAL) {
  991. if (!caps.isVkFormatTexturable(info.fFormat)) {
  992. return false;
  993. }
  994. } else {
  995. SkASSERT(info.fImageTiling == VK_IMAGE_TILING_LINEAR);
  996. if (!caps.isVkFormatTexturableLinearly(info.fFormat)) {
  997. return false;
  998. }
  999. }
  1000. return true;
  1001. }
  1002. static bool check_rt_image_info(const GrVkCaps& caps, const GrVkImageInfo& info) {
  1003. if (!caps.maxRenderTargetSampleCount(info.fFormat)) {
  1004. return false;
  1005. }
  1006. return true;
  1007. }
  1008. sk_sp<GrTexture> GrVkGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
  1009. GrColorType colorType, GrWrapOwnership ownership,
  1010. GrWrapCacheable cacheable, GrIOType ioType) {
  1011. GrVkImageInfo imageInfo;
  1012. if (!backendTex.getVkImageInfo(&imageInfo)) {
  1013. return nullptr;
  1014. }
  1015. if (!check_image_info(this->vkCaps(), imageInfo, colorType,
  1016. kAdopt_GrWrapOwnership == ownership)) {
  1017. return nullptr;
  1018. }
  1019. if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
  1020. return nullptr;
  1021. }
  1022. if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
  1023. return nullptr;
  1024. }
  1025. GrPixelConfig config = this->caps()->getConfigFromBackendFormat(backendTex.getBackendFormat(),
  1026. colorType);
  1027. SkASSERT(kUnknown_GrPixelConfig != config);
  1028. GrSurfaceDesc surfDesc;
  1029. surfDesc.fWidth = backendTex.width();
  1030. surfDesc.fHeight = backendTex.height();
  1031. surfDesc.fConfig = config;
  1032. sk_sp<GrVkImageLayout> layout = backendTex.getGrVkImageLayout();
  1033. SkASSERT(layout);
  1034. return GrVkTexture::MakeWrappedTexture(this, surfDesc, ownership, cacheable, ioType, imageInfo,
  1035. std::move(layout));
  1036. }
  1037. sk_sp<GrTexture> GrVkGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex,
  1038. int sampleCnt,
  1039. GrColorType colorType,
  1040. GrWrapOwnership ownership,
  1041. GrWrapCacheable cacheable) {
  1042. GrVkImageInfo imageInfo;
  1043. if (!backendTex.getVkImageInfo(&imageInfo)) {
  1044. return nullptr;
  1045. }
  1046. if (!check_image_info(this->vkCaps(), imageInfo, colorType,
  1047. kAdopt_GrWrapOwnership == ownership)) {
  1048. return nullptr;
  1049. }
  1050. if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
  1051. return nullptr;
  1052. }
  1053. if (!check_rt_image_info(this->vkCaps(), imageInfo)) {
  1054. return nullptr;
  1055. }
  1056. if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
  1057. return nullptr;
  1058. }
  1059. GrPixelConfig config = this->caps()->getConfigFromBackendFormat(backendTex.getBackendFormat(),
  1060. colorType);
  1061. SkASSERT(kUnknown_GrPixelConfig != config);
  1062. GrSurfaceDesc surfDesc;
  1063. surfDesc.fWidth = backendTex.width();
  1064. surfDesc.fHeight = backendTex.height();
  1065. surfDesc.fConfig = config;
  1066. sampleCnt = this->caps()->getRenderTargetSampleCount(sampleCnt, colorType,
  1067. backendTex.getBackendFormat());
  1068. sk_sp<GrVkImageLayout> layout = backendTex.getGrVkImageLayout();
  1069. SkASSERT(layout);
  1070. return GrVkTextureRenderTarget::MakeWrappedTextureRenderTarget(
  1071. this, surfDesc, sampleCnt, ownership, cacheable, imageInfo, std::move(layout));
  1072. }
  1073. sk_sp<GrRenderTarget> GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT,
  1074. GrColorType colorType) {
  1075. // Currently the Vulkan backend does not support wrapping of msaa render targets directly. In
  1076. // general this is not an issue since swapchain images in vulkan are never multisampled. Thus if
  1077. // you want a multisampled RT it is best to wrap the swapchain images and then let Skia handle
  1078. // creating and owning the MSAA images.
  1079. if (backendRT.sampleCnt() > 1) {
  1080. return nullptr;
  1081. }
  1082. GrVkImageInfo info;
  1083. if (!backendRT.getVkImageInfo(&info)) {
  1084. return nullptr;
  1085. }
  1086. GrPixelConfig config = this->caps()->getConfigFromBackendFormat(backendRT.getBackendFormat(),
  1087. colorType);
  1088. SkASSERT(kUnknown_GrPixelConfig != config);
  1089. if (!check_image_info(this->vkCaps(), info, colorType, false)) {
  1090. return nullptr;
  1091. }
  1092. if (!check_rt_image_info(this->vkCaps(), info)) {
  1093. return nullptr;
  1094. }
  1095. if (backendRT.isProtected() && (fProtectedContext == GrProtected::kNo)) {
  1096. return nullptr;
  1097. }
  1098. GrSurfaceDesc desc;
  1099. desc.fWidth = backendRT.width();
  1100. desc.fHeight = backendRT.height();
  1101. desc.fConfig = config;
  1102. sk_sp<GrVkImageLayout> layout = backendRT.getGrVkImageLayout();
  1103. sk_sp<GrVkRenderTarget> tgt =
  1104. GrVkRenderTarget::MakeWrappedRenderTarget(this, desc, 1, info, std::move(layout));
  1105. // We don't allow the client to supply a premade stencil buffer. We always create one if needed.
  1106. SkASSERT(!backendRT.stencilBits());
  1107. if (tgt) {
  1108. SkASSERT(tgt->canAttemptStencilAttachment());
  1109. }
  1110. return std::move(tgt);
  1111. }
  1112. sk_sp<GrRenderTarget> GrVkGpu::onWrapBackendTextureAsRenderTarget(const GrBackendTexture& tex,
  1113. int sampleCnt,
  1114. GrColorType grColorType) {
  1115. GrVkImageInfo imageInfo;
  1116. if (!tex.getVkImageInfo(&imageInfo)) {
  1117. return nullptr;
  1118. }
  1119. if (!check_image_info(this->vkCaps(), imageInfo, grColorType, false)) {
  1120. return nullptr;
  1121. }
  1122. if (!check_rt_image_info(this->vkCaps(), imageInfo)) {
  1123. return nullptr;
  1124. }
  1125. if (tex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
  1126. return nullptr;
  1127. }
  1128. GrPixelConfig config = this->caps()->getConfigFromBackendFormat(tex.getBackendFormat(),
  1129. grColorType);
  1130. SkASSERT(kUnknown_GrPixelConfig != config);
  1131. GrSurfaceDesc desc;
  1132. desc.fWidth = tex.width();
  1133. desc.fHeight = tex.height();
  1134. desc.fConfig = config;
  1135. sampleCnt = this->caps()->getRenderTargetSampleCount(sampleCnt, grColorType,
  1136. tex.getBackendFormat());
  1137. if (!sampleCnt) {
  1138. return nullptr;
  1139. }
  1140. sk_sp<GrVkImageLayout> layout = tex.getGrVkImageLayout();
  1141. SkASSERT(layout);
  1142. return GrVkRenderTarget::MakeWrappedRenderTarget(this, desc, sampleCnt, imageInfo,
  1143. std::move(layout));
  1144. }
  1145. sk_sp<GrRenderTarget> GrVkGpu::onWrapVulkanSecondaryCBAsRenderTarget(
  1146. const SkImageInfo& imageInfo, const GrVkDrawableInfo& vkInfo) {
  1147. int maxSize = this->caps()->maxTextureSize();
  1148. if (imageInfo.width() > maxSize || imageInfo.height() > maxSize) {
  1149. return nullptr;
  1150. }
  1151. GrBackendFormat backendFormat = GrBackendFormat::MakeVk(vkInfo.fFormat);
  1152. if (!backendFormat.isValid()) {
  1153. return nullptr;
  1154. }
  1155. GrColorType grColorType = SkColorTypeToGrColorType(imageInfo.colorType());
  1156. int sampleCnt = this->caps()->getRenderTargetSampleCount(1, grColorType, backendFormat);
  1157. if (!sampleCnt) {
  1158. return nullptr;
  1159. }
  1160. GrPixelConfig config = this->caps()->getConfigFromBackendFormat(backendFormat, grColorType);
  1161. if (config == kUnknown_GrPixelConfig) {
  1162. return nullptr;
  1163. }
  1164. GrSurfaceDesc desc;
  1165. desc.fWidth = imageInfo.width();
  1166. desc.fHeight = imageInfo.height();
  1167. desc.fConfig = config;
  1168. return GrVkRenderTarget::MakeSecondaryCBRenderTarget(this, desc, vkInfo);
  1169. }
  1170. bool GrVkGpu::onRegenerateMipMapLevels(GrTexture* tex) {
  1171. auto* vkTex = static_cast<GrVkTexture*>(tex);
  1172. // don't do anything for linearly tiled textures (can't have mipmaps)
  1173. if (vkTex->isLinearTiled()) {
  1174. SkDebugf("Trying to create mipmap for linear tiled texture");
  1175. return false;
  1176. }
  1177. // determine if we can blit to and from this format
  1178. const GrVkCaps& caps = this->vkCaps();
  1179. if (!caps.formatCanBeDstofBlit(vkTex->imageFormat(), false) ||
  1180. !caps.formatCanBeSrcofBlit(vkTex->imageFormat(), false) ||
  1181. !caps.mipMapSupport()) {
  1182. return false;
  1183. }
  1184. int width = tex->width();
  1185. int height = tex->height();
  1186. VkImageBlit blitRegion;
  1187. memset(&blitRegion, 0, sizeof(VkImageBlit));
  1188. // SkMipMap doesn't include the base level in the level count so we have to add 1
  1189. uint32_t levelCount = SkMipMap::ComputeLevelCount(tex->width(), tex->height()) + 1;
  1190. SkASSERT(levelCount == vkTex->mipLevels());
  1191. // change layout of the layers so we can write to them.
  1192. vkTex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_ACCESS_TRANSFER_WRITE_BIT,
  1193. VK_PIPELINE_STAGE_TRANSFER_BIT, false);
  1194. // setup memory barrier
  1195. SkASSERT(GrVkFormatIsSupported(vkTex->imageFormat()));
  1196. VkImageMemoryBarrier imageMemoryBarrier = {
  1197. VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
  1198. nullptr, // pNext
  1199. VK_ACCESS_TRANSFER_WRITE_BIT, // srcAccessMask
  1200. VK_ACCESS_TRANSFER_READ_BIT, // dstAccessMask
  1201. VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // oldLayout
  1202. VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // newLayout
  1203. VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
  1204. VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
  1205. vkTex->image(), // image
  1206. {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
  1207. };
  1208. // Blit the miplevels
  1209. uint32_t mipLevel = 1;
  1210. while (mipLevel < levelCount) {
  1211. int prevWidth = width;
  1212. int prevHeight = height;
  1213. width = SkTMax(1, width / 2);
  1214. height = SkTMax(1, height / 2);
  1215. imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
  1216. this->addImageMemoryBarrier(vkTex->resource(), VK_PIPELINE_STAGE_TRANSFER_BIT,
  1217. VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
  1218. blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel - 1, 0, 1 };
  1219. blitRegion.srcOffsets[0] = { 0, 0, 0 };
  1220. blitRegion.srcOffsets[1] = { prevWidth, prevHeight, 1 };
  1221. blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0, 1 };
  1222. blitRegion.dstOffsets[0] = { 0, 0, 0 };
  1223. blitRegion.dstOffsets[1] = { width, height, 1 };
  1224. fCurrentCmdBuffer->blitImage(this,
  1225. vkTex->resource(),
  1226. vkTex->image(),
  1227. VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
  1228. vkTex->resource(),
  1229. vkTex->image(),
  1230. VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
  1231. 1,
  1232. &blitRegion,
  1233. VK_FILTER_LINEAR);
  1234. ++mipLevel;
  1235. }
  1236. if (levelCount > 1) {
  1237. // This barrier logically is not needed, but it changes the final level to the same layout
  1238. // as all the others, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL. This makes tracking of the
  1239. // layouts and future layout changes easier. The alternative here would be to track layout
  1240. // and memory accesses per layer which doesn't seem work it.
  1241. imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
  1242. this->addImageMemoryBarrier(vkTex->resource(), VK_PIPELINE_STAGE_TRANSFER_BIT,
  1243. VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
  1244. vkTex->updateImageLayout(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
  1245. }
  1246. return true;
  1247. }
  1248. ////////////////////////////////////////////////////////////////////////////////
  1249. GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(
  1250. const GrRenderTarget* rt, int width, int height, int numStencilSamples) {
  1251. SkASSERT(numStencilSamples == rt->numSamples());
  1252. SkASSERT(width >= rt->width());
  1253. SkASSERT(height >= rt->height());
  1254. int samples = rt->numSamples();
  1255. const GrVkCaps::StencilFormat& sFmt = this->vkCaps().preferredStencilFormat();
  1256. GrVkStencilAttachment* stencil(GrVkStencilAttachment::Create(this,
  1257. width,
  1258. height,
  1259. samples,
  1260. sFmt));
  1261. fStats.incStencilAttachmentCreates();
  1262. return stencil;
  1263. }
  1264. ////////////////////////////////////////////////////////////////////////////////
  1265. bool copy_src_data(GrVkGpu* gpu, const GrVkAlloc& alloc, VkFormat vkFormat,
  1266. int width, int height,
  1267. const void* srcData, size_t srcRowBytes) {
  1268. SkASSERT(srcData);
  1269. SkASSERT(!GrVkFormatIsCompressed(vkFormat));
  1270. void* mapPtr = GrVkMemory::MapAlloc(gpu, alloc);
  1271. if (!mapPtr) {
  1272. return false;
  1273. }
  1274. size_t bytesPerPixel = GrVkBytesPerFormat(vkFormat);
  1275. const size_t trimRowBytes = width * bytesPerPixel;
  1276. if (!srcRowBytes) {
  1277. srcRowBytes = trimRowBytes;
  1278. }
  1279. SkASSERT(trimRowBytes * height <= alloc.fSize);
  1280. SkRectMemcpy(mapPtr, trimRowBytes, srcData, srcRowBytes, trimRowBytes, height);
  1281. GrVkMemory::FlushMappedAlloc(gpu, alloc, 0, alloc.fSize);
  1282. GrVkMemory::UnmapAlloc(gpu, alloc);
  1283. return true;
  1284. }
  1285. bool copy_compressed_src_data(GrVkGpu* gpu, const GrVkAlloc& alloc,
  1286. SkImage::CompressionType compressionType, int width, int height,
  1287. const void* data) {
  1288. SkASSERT(data);
  1289. void* mapPtr = GrVkMemory::MapAlloc(gpu, alloc);
  1290. if (!mapPtr) {
  1291. return false;
  1292. }
  1293. mapPtr = reinterpret_cast<char*>(mapPtr);
  1294. size_t dataSize = GrCompressedDataSize(compressionType, width, height);
  1295. SkASSERT(dataSize <= alloc.fSize);
  1296. memcpy(mapPtr, data, dataSize);
  1297. GrVkMemory::FlushMappedAlloc(gpu, alloc, 0, alloc.fSize);
  1298. GrVkMemory::UnmapAlloc(gpu, alloc);
  1299. return true;
  1300. }
  1301. static void set_image_layout(const GrVkInterface* vkInterface, VkCommandBuffer cmdBuffer,
  1302. GrVkImageInfo* info, VkImageLayout newLayout, uint32_t mipLevels,
  1303. VkAccessFlags dstAccessMask, VkPipelineStageFlagBits dstStageMask) {
  1304. VkAccessFlags srcAccessMask = GrVkImage::LayoutToSrcAccessMask(info->fImageLayout);
  1305. VkPipelineStageFlags srcStageMask = GrVkImage::LayoutToPipelineSrcStageFlags(
  1306. info->fImageLayout);
  1307. VkImageMemoryBarrier barrier;
  1308. memset(&barrier, 0, sizeof(VkImageMemoryBarrier));
  1309. barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
  1310. barrier.pNext = nullptr;
  1311. barrier.srcAccessMask = srcAccessMask;
  1312. barrier.dstAccessMask = dstAccessMask;
  1313. barrier.oldLayout = info->fImageLayout;
  1314. barrier.newLayout = newLayout;
  1315. barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
  1316. barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
  1317. barrier.image = info->fImage;
  1318. barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, mipLevels, 0, 1};
  1319. GR_VK_CALL(vkInterface, CmdPipelineBarrier(
  1320. cmdBuffer,
  1321. srcStageMask,
  1322. dstStageMask,
  1323. 0,
  1324. 0, nullptr,
  1325. 0, nullptr,
  1326. 1, &barrier));
  1327. info->fImageLayout = newLayout;
  1328. }
  1329. bool GrVkGpu::createVkImageForBackendSurface(VkFormat vkFormat, int w, int h, bool texturable,
  1330. bool renderable, GrMipMapped mipMapped,
  1331. const void* srcData, size_t srcRowBytes,
  1332. const SkColor4f* color, GrVkImageInfo* info,
  1333. GrProtected isProtected) {
  1334. SkASSERT(texturable || renderable);
  1335. if (!texturable) {
  1336. SkASSERT(GrMipMapped::kNo == mipMapped);
  1337. SkASSERT(!srcData);
  1338. }
  1339. if (fProtectedContext != isProtected) {
  1340. return false;
  1341. }
  1342. if (texturable && !fVkCaps->isVkFormatTexturable(vkFormat)) {
  1343. return false;
  1344. }
  1345. if (renderable && !fVkCaps->isFormatRenderable(vkFormat)) {
  1346. return false;
  1347. }
  1348. // Currently we don't support uploading pixel data when mipped.
  1349. if (srcData && GrMipMapped::kYes == mipMapped) {
  1350. return false;
  1351. }
  1352. VkImageUsageFlags usageFlags = 0;
  1353. usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
  1354. usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
  1355. if (texturable) {
  1356. usageFlags |= VK_IMAGE_USAGE_SAMPLED_BIT;
  1357. }
  1358. if (renderable) {
  1359. usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
  1360. }
  1361. // Figure out the number of mip levels.
  1362. uint32_t mipLevels = 1;
  1363. if (GrMipMapped::kYes == mipMapped) {
  1364. mipLevels = SkMipMap::ComputeLevelCount(w, h) + 1;
  1365. }
  1366. GrVkImage::ImageDesc imageDesc;
  1367. imageDesc.fImageType = VK_IMAGE_TYPE_2D;
  1368. imageDesc.fFormat = vkFormat;
  1369. imageDesc.fWidth = w;
  1370. imageDesc.fHeight = h;
  1371. imageDesc.fLevels = mipLevels;
  1372. imageDesc.fSamples = 1;
  1373. imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
  1374. imageDesc.fUsageFlags = usageFlags;
  1375. imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
  1376. imageDesc.fIsProtected = fProtectedContext;
  1377. if (!GrVkImage::InitImageInfo(this, imageDesc, info)) {
  1378. SkDebugf("Failed to init image info\n");
  1379. return false;
  1380. }
  1381. if (!srcData && !color) {
  1382. return true;
  1383. }
  1384. // We need to declare these early so that we can delete them at the end outside of
  1385. // the if block.
  1386. GrVkAlloc bufferAlloc;
  1387. VkBuffer buffer = VK_NULL_HANDLE;
  1388. VkResult err;
  1389. const VkCommandBufferAllocateInfo cmdInfo = {
  1390. VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
  1391. nullptr, // pNext
  1392. fCmdPool->vkCommandPool(), // commandPool
  1393. VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
  1394. 1 // bufferCount
  1395. };
  1396. VkCommandBuffer cmdBuffer;
  1397. err = VK_CALL(AllocateCommandBuffers(fDevice, &cmdInfo, &cmdBuffer));
  1398. if (err) {
  1399. GrVkImage::DestroyImageInfo(this, info);
  1400. return false;
  1401. }
  1402. VkCommandBufferBeginInfo cmdBufferBeginInfo;
  1403. memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
  1404. cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
  1405. cmdBufferBeginInfo.pNext = nullptr;
  1406. cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
  1407. cmdBufferBeginInfo.pInheritanceInfo = nullptr;
  1408. err = VK_CALL(BeginCommandBuffer(cmdBuffer, &cmdBufferBeginInfo));
  1409. SkASSERT(!err);
  1410. // Set image layout and add barrier
  1411. set_image_layout(this->vkInterface(), cmdBuffer, info, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
  1412. mipLevels, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT);
  1413. // TODO: Lift this to GrContext level.
  1414. SkImage::CompressionType compressionType;
  1415. bool isCompressed = GrVkFormatToCompressionType(vkFormat, &compressionType);
  1416. std::unique_ptr<char[]> tempData;
  1417. if (isCompressed && !srcData) {
  1418. SkASSERT(color);
  1419. size_t size = GrCompressedDataSize(compressionType, w, h);
  1420. tempData.reset(new char[size]);
  1421. GrFillInCompressedData(compressionType, w, h, tempData.get(), *color);
  1422. srcData = tempData.get();
  1423. }
  1424. if (srcData) {
  1425. size_t bytesPerPixel = GrVkBytesPerFormat(vkFormat);
  1426. SkASSERT(w && h);
  1427. SkTArray<size_t> individualMipOffsets(mipLevels);
  1428. SkImage::CompressionType compressionType;
  1429. bool isCompressed = GrVkFormatToCompressionType(vkFormat, &compressionType);
  1430. size_t combinedBufferSize;
  1431. if (isCompressed) {
  1432. // Compressed textures currently must be non-MIP mapped.
  1433. if (mipMapped == GrMipMapped::kYes) {
  1434. return false;
  1435. }
  1436. combinedBufferSize = GrCompressedDataSize(compressionType, w, h);
  1437. individualMipOffsets.push_back(0);
  1438. } else {
  1439. combinedBufferSize = GrComputeTightCombinedBufferSize(bytesPerPixel, w, h,
  1440. &individualMipOffsets, mipLevels);
  1441. }
  1442. VkBufferCreateInfo bufInfo;
  1443. memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
  1444. bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
  1445. bufInfo.flags = fProtectedContext == GrProtected::kYes ? VK_BUFFER_CREATE_PROTECTED_BIT : 0;
  1446. bufInfo.size = combinedBufferSize;
  1447. bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
  1448. bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
  1449. bufInfo.queueFamilyIndexCount = 0;
  1450. bufInfo.pQueueFamilyIndices = nullptr;
  1451. err = VK_CALL(CreateBuffer(fDevice, &bufInfo, nullptr, &buffer));
  1452. if (err) {
  1453. GrVkImage::DestroyImageInfo(this, info);
  1454. VK_CALL(EndCommandBuffer(cmdBuffer));
  1455. VK_CALL(FreeCommandBuffers(fDevice, fCmdPool->vkCommandPool(), 1, &cmdBuffer));
  1456. return false;
  1457. }
  1458. if (!GrVkMemory::AllocAndBindBufferMemory(this, buffer, GrVkBuffer::kCopyRead_Type, true,
  1459. &bufferAlloc)) {
  1460. GrVkImage::DestroyImageInfo(this, info);
  1461. VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
  1462. VK_CALL(EndCommandBuffer(cmdBuffer));
  1463. VK_CALL(FreeCommandBuffers(fDevice, fCmdPool->vkCommandPool(), 1, &cmdBuffer));
  1464. return false;
  1465. }
  1466. bool result;
  1467. if (isCompressed) {
  1468. result = copy_compressed_src_data(this, bufferAlloc, compressionType, w, h, srcData);
  1469. } else {
  1470. SkASSERT(1 == mipLevels);
  1471. result = copy_src_data(this, bufferAlloc, vkFormat, w, h, srcData, srcRowBytes);
  1472. }
  1473. if (!result) {
  1474. GrVkImage::DestroyImageInfo(this, info);
  1475. GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
  1476. VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
  1477. VK_CALL(EndCommandBuffer(cmdBuffer));
  1478. VK_CALL(FreeCommandBuffers(fDevice, fCmdPool->vkCommandPool(), 1, &cmdBuffer));
  1479. return false;
  1480. }
  1481. SkTArray<VkBufferImageCopy> regions(mipLevels);
  1482. int currentWidth = w;
  1483. int currentHeight = h;
  1484. for (uint32_t currentMipLevel = 0; currentMipLevel < mipLevels; currentMipLevel++) {
  1485. // Submit copy command
  1486. VkBufferImageCopy& region = regions.push_back();
  1487. memset(&region, 0, sizeof(VkBufferImageCopy));
  1488. region.bufferOffset = individualMipOffsets[currentMipLevel];
  1489. region.bufferRowLength = currentWidth;
  1490. region.bufferImageHeight = currentHeight;
  1491. region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, currentMipLevel, 0, 1};
  1492. region.imageOffset = {0, 0, 0};
  1493. region.imageExtent = {(uint32_t)currentWidth, (uint32_t)currentHeight, 1};
  1494. currentWidth = SkTMax(1, currentWidth / 2);
  1495. currentHeight = SkTMax(1, currentHeight / 2);
  1496. }
  1497. VK_CALL(CmdCopyBufferToImage(cmdBuffer, buffer, info->fImage, info->fImageLayout,
  1498. regions.count(), regions.begin()));
  1499. } else {
  1500. SkASSERT(color);
  1501. VkClearColorValue vkColor;
  1502. // If we ever support SINT or UINT formats this needs to be updated to use the int32 and
  1503. // uint32 union members in those cases.
  1504. vkColor.float32[0] = color->fR;
  1505. vkColor.float32[1] = color->fG;
  1506. vkColor.float32[2] = color->fB;
  1507. vkColor.float32[3] = color->fA;
  1508. VkImageSubresourceRange range;
  1509. range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
  1510. range.baseArrayLayer = 0;
  1511. range.baseMipLevel = 0;
  1512. range.layerCount = 1;
  1513. range.levelCount = mipLevels;
  1514. VK_CALL(CmdClearColorImage(cmdBuffer, info->fImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
  1515. &vkColor, 1, &range));
  1516. }
  1517. if (!srcData && renderable) {
  1518. SkASSERT(color);
  1519. // Change image layout to color-attachment-optimal since if we use this texture as a
  1520. // borrowed texture within Ganesh we are probably going to render to it
  1521. set_image_layout(this->vkInterface(), cmdBuffer, info,
  1522. VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, mipLevels,
  1523. VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
  1524. VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
  1525. VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT);
  1526. } else if (texturable) {
  1527. // Change image layout to shader read since if we use this texture as a borrowed
  1528. // texture within Ganesh we require that its layout be set to that
  1529. set_image_layout(this->vkInterface(), cmdBuffer, info,
  1530. VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, mipLevels,
  1531. VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
  1532. }
  1533. // End CommandBuffer
  1534. err = VK_CALL(EndCommandBuffer(cmdBuffer));
  1535. SkASSERT(!err);
  1536. // Create Fence for queue
  1537. VkFenceCreateInfo fenceInfo;
  1538. memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
  1539. fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
  1540. fenceInfo.pNext = nullptr;
  1541. fenceInfo.flags = 0;
  1542. VkFence fence = VK_NULL_HANDLE;
  1543. err = VK_CALL(CreateFence(fDevice, &fenceInfo, nullptr, &fence));
  1544. SkASSERT(!err);
  1545. VkProtectedSubmitInfo protectedSubmitInfo;
  1546. if (fProtectedContext == GrProtected::kYes) {
  1547. memset(&protectedSubmitInfo, 0, sizeof(VkProtectedSubmitInfo));
  1548. protectedSubmitInfo.sType = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO;
  1549. protectedSubmitInfo.pNext = nullptr;
  1550. protectedSubmitInfo.protectedSubmit = VK_TRUE;
  1551. }
  1552. VkSubmitInfo submitInfo;
  1553. memset(&submitInfo, 0, sizeof(VkSubmitInfo));
  1554. submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
  1555. submitInfo.pNext = fProtectedContext == GrProtected::kYes ? &protectedSubmitInfo : nullptr;
  1556. submitInfo.waitSemaphoreCount = 0;
  1557. submitInfo.pWaitSemaphores = nullptr;
  1558. submitInfo.pWaitDstStageMask = 0;
  1559. submitInfo.commandBufferCount = 1;
  1560. submitInfo.pCommandBuffers = &cmdBuffer;
  1561. submitInfo.signalSemaphoreCount = 0;
  1562. submitInfo.pSignalSemaphores = nullptr;
  1563. err = VK_CALL(QueueSubmit(this->queue(), 1, &submitInfo, fence));
  1564. SkASSERT(!err);
  1565. err = VK_CALL(WaitForFences(this->device(), 1, &fence, VK_TRUE, UINT64_MAX));
  1566. if (VK_TIMEOUT == err) {
  1567. GrVkImage::DestroyImageInfo(this, info);
  1568. if (buffer != VK_NULL_HANDLE) { // workaround for an older NVidia driver crash
  1569. GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
  1570. VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
  1571. }
  1572. VK_CALL(FreeCommandBuffers(fDevice, fCmdPool->vkCommandPool(), 1, &cmdBuffer));
  1573. VK_CALL(DestroyFence(this->device(), fence, nullptr));
  1574. SkDebugf("Fence failed to signal: %d\n", err);
  1575. SK_ABORT("failing");
  1576. }
  1577. SkASSERT(!err);
  1578. // Clean up transfer resources
  1579. if (buffer != VK_NULL_HANDLE) { // workaround for an older NVidia driver crash
  1580. GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
  1581. VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
  1582. }
  1583. VK_CALL(FreeCommandBuffers(fDevice, fCmdPool->vkCommandPool(), 1, &cmdBuffer));
  1584. VK_CALL(DestroyFence(this->device(), fence, nullptr));
  1585. return true;
  1586. }
  1587. #if GR_TEST_UTILS
  1588. static bool vk_format_to_pixel_config(VkFormat format, GrPixelConfig* config) {
  1589. GrPixelConfig dontCare;
  1590. if (!config) {
  1591. config = &dontCare;
  1592. }
  1593. switch (format) {
  1594. case VK_FORMAT_UNDEFINED:
  1595. *config = kUnknown_GrPixelConfig;
  1596. return false;
  1597. case VK_FORMAT_R8G8B8A8_UNORM:
  1598. *config = kRGBA_8888_GrPixelConfig;
  1599. return true;
  1600. case VK_FORMAT_R8G8B8_UNORM:
  1601. *config = kRGB_888_GrPixelConfig;
  1602. return true;
  1603. case VK_FORMAT_R8G8_UNORM:
  1604. *config = kRG_88_GrPixelConfig;
  1605. return true;
  1606. case VK_FORMAT_B8G8R8A8_UNORM:
  1607. *config = kBGRA_8888_GrPixelConfig;
  1608. return true;
  1609. case VK_FORMAT_R8G8B8A8_SRGB:
  1610. *config = kSRGBA_8888_GrPixelConfig;
  1611. return true;
  1612. case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
  1613. *config = kRGBA_1010102_GrPixelConfig;
  1614. return true;
  1615. case VK_FORMAT_R5G6B5_UNORM_PACK16:
  1616. *config = kRGB_565_GrPixelConfig;
  1617. return true;
  1618. case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
  1619. *config = kRGBA_4444_GrPixelConfig; // we're swizzling in this case
  1620. return true;
  1621. case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
  1622. *config = kRGBA_4444_GrPixelConfig;
  1623. return true;
  1624. case VK_FORMAT_R8_UNORM:
  1625. *config = kAlpha_8_GrPixelConfig;
  1626. return true;
  1627. case VK_FORMAT_R32G32B32A32_SFLOAT:
  1628. *config = kRGBA_float_GrPixelConfig;
  1629. return true;
  1630. case VK_FORMAT_R16G16B16A16_SFLOAT:
  1631. *config = kRGBA_half_GrPixelConfig;
  1632. return true;
  1633. case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
  1634. *config = kRGB_ETC1_GrPixelConfig;
  1635. return true;
  1636. case VK_FORMAT_R16_SFLOAT:
  1637. *config = kAlpha_half_as_Red_GrPixelConfig;
  1638. return true;
  1639. case VK_FORMAT_R16_UNORM:
  1640. *config = kR_16_GrPixelConfig;
  1641. return true;
  1642. case VK_FORMAT_R16G16_UNORM:
  1643. *config = kRG_1616_GrPixelConfig;
  1644. return true;
  1645. // Experimental (for Y416 and mutant P016/P010)
  1646. case VK_FORMAT_R16G16B16A16_UNORM:
  1647. *config = kRGBA_16161616_GrPixelConfig;
  1648. return true;
  1649. case VK_FORMAT_R16G16_SFLOAT:
  1650. *config = kRG_half_GrPixelConfig;
  1651. return true;
  1652. default:
  1653. return false;
  1654. }
  1655. SK_ABORT("Unexpected config");
  1656. return false;
  1657. }
  1658. #endif
  1659. GrBackendTexture GrVkGpu::createBackendTexture(int w, int h,
  1660. const GrBackendFormat& format,
  1661. GrMipMapped mipMapped,
  1662. GrRenderable renderable,
  1663. const void* srcData, size_t rowBytes,
  1664. const SkColor4f* color, GrProtected isProtected) {
  1665. const GrVkCaps& caps = this->vkCaps();
  1666. this->handleDirtyContext();
  1667. if (fProtectedContext != isProtected) {
  1668. return GrBackendTexture();
  1669. }
  1670. if (w > caps.maxTextureSize() || h > caps.maxTextureSize()) {
  1671. return GrBackendTexture();
  1672. }
  1673. const VkFormat* vkFormat = format.getVkFormat();
  1674. if (!vkFormat) {
  1675. SkDebugf("Could net get vkformat\n");
  1676. return GrBackendTexture();
  1677. }
  1678. if (!caps.isVkFormatTexturable(*vkFormat)) {
  1679. SkDebugf("Config is not texturable\n");
  1680. return GrBackendTexture();
  1681. }
  1682. GrVkImageInfo info;
  1683. if (!this->createVkImageForBackendSurface(*vkFormat, w, h, true,
  1684. GrRenderable::kYes == renderable, mipMapped, srcData,
  1685. rowBytes, color, &info, isProtected)) {
  1686. SkDebugf("Failed to create testing only image\n");
  1687. return GrBackendTexture();
  1688. }
  1689. GrBackendTexture beTex = GrBackendTexture(w, h, info);
  1690. #if GR_TEST_UTILS
  1691. // Lots of tests don't go through Skia's public interface which will set the config so for
  1692. // testing we make sure we set a config here.
  1693. GrPixelConfig config = kUnknown_GrPixelConfig;
  1694. if (!vk_format_to_pixel_config(*vkFormat, &config)) {
  1695. SkDebugf("Could net get vkformat\n");
  1696. }
  1697. beTex.setPixelConfig(config);
  1698. #endif
  1699. return beTex;
  1700. }
  1701. void GrVkGpu::deleteBackendTexture(const GrBackendTexture& tex) {
  1702. SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
  1703. GrVkImageInfo info;
  1704. if (tex.getVkImageInfo(&info)) {
  1705. GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
  1706. }
  1707. }
  1708. #if GR_TEST_UTILS
  1709. bool GrVkGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
  1710. SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
  1711. GrVkImageInfo backend;
  1712. if (!tex.getVkImageInfo(&backend)) {
  1713. return false;
  1714. }
  1715. if (backend.fImage && backend.fAlloc.fMemory) {
  1716. VkMemoryRequirements req;
  1717. memset(&req, 0, sizeof(req));
  1718. GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
  1719. backend.fImage,
  1720. &req));
  1721. // TODO: find a better check
  1722. // This will probably fail with a different driver
  1723. return (req.size > 0) && (req.size <= 8192 * 8192);
  1724. }
  1725. return false;
  1726. }
  1727. GrBackendRenderTarget GrVkGpu::createTestingOnlyBackendRenderTarget(int w, int h, GrColorType ct) {
  1728. this->handleDirtyContext();
  1729. if (w > this->caps()->maxRenderTargetSize() || h > this->caps()->maxRenderTargetSize()) {
  1730. return GrBackendRenderTarget();
  1731. }
  1732. auto config = GrColorTypeToPixelConfig(ct);
  1733. if (kUnknown_GrPixelConfig == config) {
  1734. return {};
  1735. }
  1736. VkFormat vkFormat;
  1737. if (!GrPixelConfigToVkFormat(config, &vkFormat)) {
  1738. return {};
  1739. }
  1740. GrVkImageInfo info;
  1741. if (!this->createVkImageForBackendSurface(vkFormat, w, h, false, true, GrMipMapped::kNo,
  1742. nullptr, 0, &SkColors::kTransparent, &info,
  1743. GrProtected::kNo)) {
  1744. return {};
  1745. }
  1746. GrBackendRenderTarget beRT = GrBackendRenderTarget(w, h, 1, 0, info);
  1747. // Lots of tests don't go through Skia's public interface which will set the config so for
  1748. // testing we make sure we set a config here.
  1749. beRT.setPixelConfig(config);
  1750. return beRT;
  1751. }
  1752. void GrVkGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
  1753. SkASSERT(GrBackendApi::kVulkan == rt.fBackend);
  1754. GrVkImageInfo info;
  1755. if (rt.getVkImageInfo(&info)) {
  1756. // something in the command buffer may still be using this, so force submit
  1757. this->submitCommandBuffer(kForce_SyncQueue);
  1758. GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
  1759. }
  1760. }
  1761. void GrVkGpu::testingOnly_flushGpuAndSync() {
  1762. this->submitCommandBuffer(kForce_SyncQueue);
  1763. }
  1764. #endif
  1765. ////////////////////////////////////////////////////////////////////////////////
  1766. void GrVkGpu::addBufferMemoryBarrier(const GrVkResource* resource,
  1767. VkPipelineStageFlags srcStageMask,
  1768. VkPipelineStageFlags dstStageMask,
  1769. bool byRegion,
  1770. VkBufferMemoryBarrier* barrier) const {
  1771. SkASSERT(fCurrentCmdBuffer);
  1772. SkASSERT(resource);
  1773. fCurrentCmdBuffer->pipelineBarrier(this,
  1774. resource,
  1775. srcStageMask,
  1776. dstStageMask,
  1777. byRegion,
  1778. GrVkCommandBuffer::kBufferMemory_BarrierType,
  1779. barrier);
  1780. }
  1781. void GrVkGpu::addImageMemoryBarrier(const GrVkResource* resource,
  1782. VkPipelineStageFlags srcStageMask,
  1783. VkPipelineStageFlags dstStageMask,
  1784. bool byRegion,
  1785. VkImageMemoryBarrier* barrier) const {
  1786. SkASSERT(fCurrentCmdBuffer);
  1787. SkASSERT(resource);
  1788. fCurrentCmdBuffer->pipelineBarrier(this,
  1789. resource,
  1790. srcStageMask,
  1791. dstStageMask,
  1792. byRegion,
  1793. GrVkCommandBuffer::kImageMemory_BarrierType,
  1794. barrier);
  1795. }
  1796. void GrVkGpu::onFinishFlush(GrSurfaceProxy* proxies[], int n,
  1797. SkSurface::BackendSurfaceAccess access, const GrFlushInfo& info,
  1798. const GrPrepareForExternalIORequests& externalRequests) {
  1799. SkASSERT(n >= 0);
  1800. SkASSERT(!n || proxies);
  1801. // Submit the current command buffer to the Queue. Whether we inserted semaphores or not does
  1802. // not effect what we do here.
  1803. if (n && access == SkSurface::BackendSurfaceAccess::kPresent) {
  1804. GrVkImage* image;
  1805. for (int i = 0; i < n; ++i) {
  1806. SkASSERT(proxies[i]->isInstantiated());
  1807. if (GrTexture* tex = proxies[i]->peekTexture()) {
  1808. image = static_cast<GrVkTexture*>(tex);
  1809. } else {
  1810. GrRenderTarget* rt = proxies[i]->peekRenderTarget();
  1811. SkASSERT(rt);
  1812. image = static_cast<GrVkRenderTarget*>(rt);
  1813. }
  1814. image->prepareForPresent(this);
  1815. }
  1816. }
  1817. // Handle requests for preparing for external IO
  1818. for (int i = 0; i < externalRequests.fNumImages; ++i) {
  1819. SkImage* image = externalRequests.fImages[i];
  1820. if (!image->isTextureBacked()) {
  1821. continue;
  1822. }
  1823. SkImage_GpuBase* gpuImage = static_cast<SkImage_GpuBase*>(as_IB(image));
  1824. sk_sp<GrTextureProxy> proxy = gpuImage->asTextureProxyRef(this->getContext());
  1825. SkASSERT(proxy);
  1826. if (!proxy->isInstantiated()) {
  1827. auto resourceProvider = this->getContext()->priv().resourceProvider();
  1828. if (!proxy->instantiate(resourceProvider)) {
  1829. continue;
  1830. }
  1831. }
  1832. GrTexture* tex = proxy->peekTexture();
  1833. if (!tex) {
  1834. continue;
  1835. }
  1836. GrVkTexture* vkTex = static_cast<GrVkTexture*>(tex);
  1837. vkTex->prepareForExternal(this);
  1838. }
  1839. for (int i = 0; i < externalRequests.fNumSurfaces; ++i) {
  1840. SkSurface* surface = externalRequests.fSurfaces[i];
  1841. if (!surface->getCanvas()->getGrContext()) {
  1842. continue;
  1843. }
  1844. SkSurface_Gpu* gpuSurface = static_cast<SkSurface_Gpu*>(surface);
  1845. auto* rtc = gpuSurface->getDevice()->accessRenderTargetContext();
  1846. sk_sp<GrRenderTargetProxy> proxy = rtc->asRenderTargetProxyRef();
  1847. if (!proxy->isInstantiated()) {
  1848. auto resourceProvider = this->getContext()->priv().resourceProvider();
  1849. if (!proxy->instantiate(resourceProvider)) {
  1850. continue;
  1851. }
  1852. }
  1853. GrRenderTarget* rt = proxy->peekRenderTarget();
  1854. SkASSERT(rt);
  1855. GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
  1856. if (externalRequests.fPrepareSurfaceForPresent &&
  1857. externalRequests.fPrepareSurfaceForPresent[i]) {
  1858. vkRT->prepareForPresent(this);
  1859. } else {
  1860. vkRT->prepareForExternal(this);
  1861. }
  1862. }
  1863. if (info.fFlags & kSyncCpu_GrFlushFlag) {
  1864. this->submitCommandBuffer(kForce_SyncQueue, info.fFinishedProc, info.fFinishedContext);
  1865. } else {
  1866. this->submitCommandBuffer(kSkip_SyncQueue, info.fFinishedProc, info.fFinishedContext);
  1867. }
  1868. }
  1869. static int get_surface_sample_cnt(GrSurface* surf) {
  1870. if (const GrRenderTarget* rt = surf->asRenderTarget()) {
  1871. return rt->numSamples();
  1872. }
  1873. return 0;
  1874. }
  1875. void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst, GrSurface* src, GrVkImage* dstImage,
  1876. GrVkImage* srcImage, const SkIRect& srcRect,
  1877. const SkIPoint& dstPoint) {
  1878. #ifdef SK_DEBUG
  1879. int dstSampleCnt = get_surface_sample_cnt(dst);
  1880. int srcSampleCnt = get_surface_sample_cnt(src);
  1881. bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
  1882. bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
  1883. SkASSERT(this->vkCaps().canCopyImage(dst->config(), dstSampleCnt, dstHasYcbcr,
  1884. src->config(), srcSampleCnt, srcHasYcbcr));
  1885. #endif
  1886. if (src->isProtected() && !dst->isProtected()) {
  1887. SkDebugf("Can't copy from protected memory to non-protected");
  1888. return;
  1889. }
  1890. // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
  1891. // the cache is flushed since it is only being written to.
  1892. dstImage->setImageLayout(this,
  1893. VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
  1894. VK_ACCESS_TRANSFER_WRITE_BIT,
  1895. VK_PIPELINE_STAGE_TRANSFER_BIT,
  1896. false);
  1897. srcImage->setImageLayout(this,
  1898. VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
  1899. VK_ACCESS_TRANSFER_READ_BIT,
  1900. VK_PIPELINE_STAGE_TRANSFER_BIT,
  1901. false);
  1902. VkImageCopy copyRegion;
  1903. memset(&copyRegion, 0, sizeof(VkImageCopy));
  1904. copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
  1905. copyRegion.srcOffset = { srcRect.fLeft, srcRect.fTop, 0 };
  1906. copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
  1907. copyRegion.dstOffset = { dstPoint.fX, dstPoint.fY, 0 };
  1908. copyRegion.extent = { (uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1 };
  1909. fCurrentCmdBuffer->copyImage(this,
  1910. srcImage,
  1911. VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
  1912. dstImage,
  1913. VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
  1914. 1,
  1915. &copyRegion);
  1916. SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
  1917. srcRect.width(), srcRect.height());
  1918. // The rect is already in device space so we pass in kTopLeft so no flip is done.
  1919. this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
  1920. }
  1921. void GrVkGpu::copySurfaceAsBlit(GrSurface* dst, GrSurface* src, GrVkImage* dstImage,
  1922. GrVkImage* srcImage, const SkIRect& srcRect,
  1923. const SkIPoint& dstPoint) {
  1924. #ifdef SK_DEBUG
  1925. int dstSampleCnt = get_surface_sample_cnt(dst);
  1926. int srcSampleCnt = get_surface_sample_cnt(src);
  1927. bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
  1928. bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
  1929. SkASSERT(this->vkCaps().canCopyAsBlit(dst->config(), dstSampleCnt, dstImage->isLinearTiled(),
  1930. dstHasYcbcr, src->config(), srcSampleCnt,
  1931. srcImage->isLinearTiled(), srcHasYcbcr));
  1932. #endif
  1933. if (src->isProtected() && !dst->isProtected()) {
  1934. SkDebugf("Can't copy from protected memory to non-protected");
  1935. return;
  1936. }
  1937. dstImage->setImageLayout(this,
  1938. VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
  1939. VK_ACCESS_TRANSFER_WRITE_BIT,
  1940. VK_PIPELINE_STAGE_TRANSFER_BIT,
  1941. false);
  1942. srcImage->setImageLayout(this,
  1943. VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
  1944. VK_ACCESS_TRANSFER_READ_BIT,
  1945. VK_PIPELINE_STAGE_TRANSFER_BIT,
  1946. false);
  1947. // Flip rect if necessary
  1948. SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, srcRect.width(),
  1949. srcRect.height());
  1950. VkImageBlit blitRegion;
  1951. memset(&blitRegion, 0, sizeof(VkImageBlit));
  1952. blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
  1953. blitRegion.srcOffsets[0] = { srcRect.fLeft, srcRect.fTop, 0 };
  1954. blitRegion.srcOffsets[1] = { srcRect.fRight, srcRect.fBottom, 1 };
  1955. blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
  1956. blitRegion.dstOffsets[0] = { dstRect.fLeft, dstRect.fTop, 0 };
  1957. blitRegion.dstOffsets[1] = { dstRect.fRight, dstRect.fBottom, 1 };
  1958. fCurrentCmdBuffer->blitImage(this,
  1959. *srcImage,
  1960. *dstImage,
  1961. 1,
  1962. &blitRegion,
  1963. VK_FILTER_NEAREST); // We never scale so any filter works here
  1964. // The rect is already in device space so we pass in kTopLeft so no flip is done.
  1965. this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
  1966. }
  1967. void GrVkGpu::copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
  1968. const SkIPoint& dstPoint) {
  1969. if (src->isProtected() && !dst->isProtected()) {
  1970. SkDebugf("Can't copy from protected memory to non-protected");
  1971. return;
  1972. }
  1973. GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget());
  1974. this->resolveImage(dst, srcRT, srcRect, dstPoint);
  1975. SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
  1976. srcRect.width(), srcRect.height());
  1977. // The rect is already in device space so we pass in kTopLeft so no flip is done.
  1978. this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
  1979. }
  1980. bool GrVkGpu::onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
  1981. const SkIPoint& dstPoint, bool canDiscardOutsideDstRect) {
  1982. #ifdef SK_DEBUG
  1983. if (GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget())) {
  1984. SkASSERT(!srcRT->wrapsSecondaryCommandBuffer());
  1985. }
  1986. if (GrVkRenderTarget* dstRT = static_cast<GrVkRenderTarget*>(dst->asRenderTarget())) {
  1987. SkASSERT(!dstRT->wrapsSecondaryCommandBuffer());
  1988. }
  1989. #endif
  1990. if (src->isProtected() && !dst->isProtected()) {
  1991. SkDebugf("Can't copy from protected memory to non-protected");
  1992. return false;
  1993. }
  1994. GrPixelConfig dstConfig = dst->config();
  1995. GrPixelConfig srcConfig = src->config();
  1996. int dstSampleCnt = get_surface_sample_cnt(dst);
  1997. int srcSampleCnt = get_surface_sample_cnt(src);
  1998. GrVkImage* dstImage;
  1999. GrVkImage* srcImage;
  2000. GrRenderTarget* dstRT = dst->asRenderTarget();
  2001. if (dstRT) {
  2002. GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(dstRT);
  2003. if (vkRT->wrapsSecondaryCommandBuffer()) {
  2004. return false;
  2005. }
  2006. dstImage = vkRT->numSamples() > 1 ? vkRT->msaaImage() : vkRT;
  2007. } else {
  2008. SkASSERT(dst->asTexture());
  2009. dstImage = static_cast<GrVkTexture*>(dst->asTexture());
  2010. }
  2011. GrRenderTarget* srcRT = src->asRenderTarget();
  2012. if (srcRT) {
  2013. GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(srcRT);
  2014. srcImage = vkRT->numSamples() > 1 ? vkRT->msaaImage() : vkRT;
  2015. } else {
  2016. SkASSERT(src->asTexture());
  2017. srcImage = static_cast<GrVkTexture*>(src->asTexture());
  2018. }
  2019. bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
  2020. bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
  2021. if (this->vkCaps().canCopyAsResolve(dstConfig, dstSampleCnt, dstHasYcbcr,
  2022. srcConfig, srcSampleCnt, srcHasYcbcr)) {
  2023. this->copySurfaceAsResolve(dst, src, srcRect, dstPoint);
  2024. return true;
  2025. }
  2026. if (this->vkCaps().canCopyImage(dstConfig, dstSampleCnt, dstHasYcbcr,
  2027. srcConfig, srcSampleCnt, srcHasYcbcr)) {
  2028. this->copySurfaceAsCopyImage(dst, src, dstImage, srcImage, srcRect, dstPoint);
  2029. return true;
  2030. }
  2031. if (this->vkCaps().canCopyAsBlit(dstConfig, dstSampleCnt, dstImage->isLinearTiled(),
  2032. dstHasYcbcr, srcConfig, srcSampleCnt,
  2033. srcImage->isLinearTiled(), srcHasYcbcr)) {
  2034. this->copySurfaceAsBlit(dst, src, dstImage, srcImage, srcRect, dstPoint);
  2035. return true;
  2036. }
  2037. return false;
  2038. }
  2039. bool GrVkGpu::onReadPixels(GrSurface* surface, int left, int top, int width, int height,
  2040. GrColorType dstColorType, void* buffer, size_t rowBytes) {
  2041. if (surface->isProtected()) {
  2042. return false;
  2043. }
  2044. if (GrPixelConfigToColorType(surface->config()) != dstColorType) {
  2045. return false;
  2046. }
  2047. GrVkImage* image = nullptr;
  2048. GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget());
  2049. if (rt) {
  2050. // Reading from render targets that wrap a secondary command buffer is not allowed since
  2051. // it would require us to know the VkImage, which we don't have, as well as need us to
  2052. // stop and start the VkRenderPass which we don't have access to.
  2053. if (rt->wrapsSecondaryCommandBuffer()) {
  2054. return false;
  2055. }
  2056. // resolve the render target if necessary
  2057. switch (rt->getResolveType()) {
  2058. case GrVkRenderTarget::kCantResolve_ResolveType:
  2059. return false;
  2060. case GrVkRenderTarget::kAutoResolves_ResolveType:
  2061. break;
  2062. case GrVkRenderTarget::kCanResolve_ResolveType:
  2063. this->resolveRenderTargetNoFlush(rt);
  2064. break;
  2065. default:
  2066. SK_ABORT("Unknown resolve type");
  2067. }
  2068. image = rt;
  2069. } else {
  2070. image = static_cast<GrVkTexture*>(surface->asTexture());
  2071. }
  2072. if (!image) {
  2073. return false;
  2074. }
  2075. // Skia's RGB_888x color type, which we map to the vulkan R8G8B8_UNORM, expects the data to be
  2076. // 32 bits, but the Vulkan format is only 24. So we first copy the surface into an R8G8B8A8
  2077. // image and then do the read pixels from that.
  2078. sk_sp<GrVkTextureRenderTarget> copySurface;
  2079. if (dstColorType == GrColorType::kRGB_888x && image->imageFormat() == VK_FORMAT_R8G8B8_UNORM) {
  2080. SkASSERT(surface->config() == kRGB_888_GrPixelConfig);
  2081. int srcSampleCount = 0;
  2082. if (rt) {
  2083. srcSampleCount = rt->numSamples();
  2084. }
  2085. bool srcHasYcbcr = image->ycbcrConversionInfo().isValid();
  2086. if (!this->vkCaps().canCopyAsBlit(kRGBA_8888_GrPixelConfig, 1, false, false,
  2087. surface->config(), srcSampleCount, image->isLinearTiled(),
  2088. srcHasYcbcr)) {
  2089. return false;
  2090. }
  2091. // Make a new surface that is RGBA to copy the RGB surface into.
  2092. GrSurfaceDesc surfDesc;
  2093. surfDesc.fWidth = width;
  2094. surfDesc.fHeight = height;
  2095. surfDesc.fConfig = kRGBA_8888_GrPixelConfig;
  2096. VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
  2097. VK_IMAGE_USAGE_SAMPLED_BIT |
  2098. VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
  2099. VK_IMAGE_USAGE_TRANSFER_DST_BIT;
  2100. GrVkImage::ImageDesc imageDesc;
  2101. imageDesc.fImageType = VK_IMAGE_TYPE_2D;
  2102. imageDesc.fFormat = VK_FORMAT_R8G8B8A8_UNORM;
  2103. imageDesc.fWidth = width;
  2104. imageDesc.fHeight = height;
  2105. imageDesc.fLevels = 1;
  2106. imageDesc.fSamples = 1;
  2107. imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
  2108. imageDesc.fUsageFlags = usageFlags;
  2109. imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
  2110. copySurface = GrVkTextureRenderTarget::MakeNewTextureRenderTarget(
  2111. this, SkBudgeted::kYes, surfDesc, 1, imageDesc, GrMipMapsStatus::kNotAllocated);
  2112. if (!copySurface) {
  2113. return false;
  2114. }
  2115. SkIRect srcRect = SkIRect::MakeXYWH(left, top, width, height);
  2116. SkAssertResult(this->copySurface(copySurface.get(), surface, srcRect, SkIPoint::Make(0,0)));
  2117. top = 0;
  2118. left = 0;
  2119. dstColorType = GrColorType::kRGBA_8888;
  2120. image = copySurface.get();
  2121. }
  2122. // Change layout of our target so it can be used as copy
  2123. image->setImageLayout(this,
  2124. VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
  2125. VK_ACCESS_TRANSFER_READ_BIT,
  2126. VK_PIPELINE_STAGE_TRANSFER_BIT,
  2127. false);
  2128. int bpp = GrColorTypeBytesPerPixel(dstColorType);
  2129. size_t tightRowBytes = bpp * width;
  2130. VkBufferImageCopy region;
  2131. memset(&region, 0, sizeof(VkBufferImageCopy));
  2132. bool copyFromOrigin = this->vkCaps().mustDoCopiesFromOrigin();
  2133. if (copyFromOrigin) {
  2134. region.imageOffset = { 0, 0, 0 };
  2135. region.imageExtent = { (uint32_t)(left + width), (uint32_t)(top + height), 1 };
  2136. } else {
  2137. VkOffset3D offset = { left, top, 0 };
  2138. region.imageOffset = offset;
  2139. region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
  2140. }
  2141. size_t transBufferRowBytes = bpp * region.imageExtent.width;
  2142. size_t imageRows = region.imageExtent.height;
  2143. auto transferBuffer = sk_sp<GrVkTransferBuffer>(
  2144. static_cast<GrVkTransferBuffer*>(this->createBuffer(transBufferRowBytes * imageRows,
  2145. GrGpuBufferType::kXferGpuToCpu,
  2146. kStream_GrAccessPattern)
  2147. .release()));
  2148. // Copy the image to a buffer so we can map it to cpu memory
  2149. region.bufferOffset = transferBuffer->offset();
  2150. region.bufferRowLength = 0; // Forces RowLength to be width. We handle the rowBytes below.
  2151. region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
  2152. region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
  2153. fCurrentCmdBuffer->copyImageToBuffer(this,
  2154. image,
  2155. VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
  2156. transferBuffer.get(),
  2157. 1,
  2158. &region);
  2159. // make sure the copy to buffer has finished
  2160. transferBuffer->addMemoryBarrier(this,
  2161. VK_ACCESS_TRANSFER_WRITE_BIT,
  2162. VK_ACCESS_HOST_READ_BIT,
  2163. VK_PIPELINE_STAGE_TRANSFER_BIT,
  2164. VK_PIPELINE_STAGE_HOST_BIT,
  2165. false);
  2166. // We need to submit the current command buffer to the Queue and make sure it finishes before
  2167. // we can copy the data out of the buffer.
  2168. this->submitCommandBuffer(kForce_SyncQueue);
  2169. void* mappedMemory = transferBuffer->map();
  2170. const GrVkAlloc& transAlloc = transferBuffer->alloc();
  2171. GrVkMemory::InvalidateMappedAlloc(this, transAlloc, 0, transAlloc.fSize);
  2172. if (copyFromOrigin) {
  2173. uint32_t skipRows = region.imageExtent.height - height;
  2174. mappedMemory = (char*)mappedMemory + transBufferRowBytes * skipRows + bpp * left;
  2175. }
  2176. SkRectMemcpy(buffer, rowBytes, mappedMemory, transBufferRowBytes, tightRowBytes, height);
  2177. transferBuffer->unmap();
  2178. return true;
  2179. }
  2180. // The RenderArea bounds we pass into BeginRenderPass must have a start x value that is a multiple
  2181. // of the granularity. The width must also be a multiple of the granularity or eaqual to the width
  2182. // the the entire attachment. Similar requirements for the y and height components.
  2183. void adjust_bounds_to_granularity(SkIRect* dstBounds, const SkIRect& srcBounds,
  2184. const VkExtent2D& granularity, int maxWidth, int maxHeight) {
  2185. // Adjust Width
  2186. if ((0 != granularity.width && 1 != granularity.width)) {
  2187. // Start with the right side of rect so we know if we end up going pass the maxWidth.
  2188. int rightAdj = srcBounds.fRight % granularity.width;
  2189. if (rightAdj != 0) {
  2190. rightAdj = granularity.width - rightAdj;
  2191. }
  2192. dstBounds->fRight = srcBounds.fRight + rightAdj;
  2193. if (dstBounds->fRight > maxWidth) {
  2194. dstBounds->fRight = maxWidth;
  2195. dstBounds->fLeft = 0;
  2196. } else {
  2197. dstBounds->fLeft = srcBounds.fLeft - srcBounds.fLeft % granularity.width;
  2198. }
  2199. } else {
  2200. dstBounds->fLeft = srcBounds.fLeft;
  2201. dstBounds->fRight = srcBounds.fRight;
  2202. }
  2203. // Adjust height
  2204. if ((0 != granularity.height && 1 != granularity.height)) {
  2205. // Start with the bottom side of rect so we know if we end up going pass the maxHeight.
  2206. int bottomAdj = srcBounds.fBottom % granularity.height;
  2207. if (bottomAdj != 0) {
  2208. bottomAdj = granularity.height - bottomAdj;
  2209. }
  2210. dstBounds->fBottom = srcBounds.fBottom + bottomAdj;
  2211. if (dstBounds->fBottom > maxHeight) {
  2212. dstBounds->fBottom = maxHeight;
  2213. dstBounds->fTop = 0;
  2214. } else {
  2215. dstBounds->fTop = srcBounds.fTop - srcBounds.fTop % granularity.height;
  2216. }
  2217. } else {
  2218. dstBounds->fTop = srcBounds.fTop;
  2219. dstBounds->fBottom = srcBounds.fBottom;
  2220. }
  2221. }
  2222. void GrVkGpu::submitSecondaryCommandBuffer(const SkTArray<GrVkSecondaryCommandBuffer*>& buffers,
  2223. const GrVkRenderPass* renderPass,
  2224. const VkClearValue* colorClear,
  2225. GrVkRenderTarget* target, GrSurfaceOrigin origin,
  2226. const SkIRect& bounds) {
  2227. SkASSERT (!target->wrapsSecondaryCommandBuffer());
  2228. const SkIRect* pBounds = &bounds;
  2229. SkIRect flippedBounds;
  2230. if (kBottomLeft_GrSurfaceOrigin == origin) {
  2231. flippedBounds = bounds;
  2232. flippedBounds.fTop = target->height() - bounds.fBottom;
  2233. flippedBounds.fBottom = target->height() - bounds.fTop;
  2234. pBounds = &flippedBounds;
  2235. }
  2236. // The bounds we use for the render pass should be of the granularity supported
  2237. // by the device.
  2238. const VkExtent2D& granularity = renderPass->granularity();
  2239. SkIRect adjustedBounds;
  2240. if ((0 != granularity.width && 1 != granularity.width) ||
  2241. (0 != granularity.height && 1 != granularity.height)) {
  2242. adjust_bounds_to_granularity(&adjustedBounds, *pBounds, granularity,
  2243. target->width(), target->height());
  2244. pBounds = &adjustedBounds;
  2245. }
  2246. #ifdef SK_DEBUG
  2247. uint32_t index;
  2248. bool result = renderPass->colorAttachmentIndex(&index);
  2249. SkASSERT(result && 0 == index);
  2250. result = renderPass->stencilAttachmentIndex(&index);
  2251. if (result) {
  2252. SkASSERT(1 == index);
  2253. }
  2254. #endif
  2255. VkClearValue clears[2];
  2256. clears[0].color = colorClear->color;
  2257. clears[1].depthStencil.depth = 0.0f;
  2258. clears[1].depthStencil.stencil = 0;
  2259. fCurrentCmdBuffer->beginRenderPass(this, renderPass, clears, *target, *pBounds, true);
  2260. for (int i = 0; i < buffers.count(); ++i) {
  2261. fCurrentCmdBuffer->executeCommands(this, buffers[i]);
  2262. }
  2263. fCurrentCmdBuffer->endRenderPass(this);
  2264. this->didWriteToSurface(target, origin, &bounds);
  2265. }
  2266. void GrVkGpu::submit(GrGpuCommandBuffer* buffer) {
  2267. if (buffer->asRTCommandBuffer()) {
  2268. SkASSERT(fCachedRTCommandBuffer.get() == buffer);
  2269. fCachedRTCommandBuffer->submit();
  2270. fCachedRTCommandBuffer->reset();
  2271. } else {
  2272. SkASSERT(fCachedTexCommandBuffer.get() == buffer);
  2273. fCachedTexCommandBuffer->submit();
  2274. fCachedTexCommandBuffer->reset();
  2275. }
  2276. }
  2277. GrFence SK_WARN_UNUSED_RESULT GrVkGpu::insertFence() {
  2278. VkFenceCreateInfo createInfo;
  2279. memset(&createInfo, 0, sizeof(VkFenceCreateInfo));
  2280. createInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
  2281. createInfo.pNext = nullptr;
  2282. createInfo.flags = 0;
  2283. VkFence fence = VK_NULL_HANDLE;
  2284. VK_CALL_ERRCHECK(CreateFence(this->device(), &createInfo, nullptr, &fence));
  2285. VK_CALL(QueueSubmit(this->queue(), 0, nullptr, fence));
  2286. GR_STATIC_ASSERT(sizeof(GrFence) >= sizeof(VkFence));
  2287. return (GrFence)fence;
  2288. }
  2289. bool GrVkGpu::waitFence(GrFence fence, uint64_t timeout) {
  2290. SkASSERT(VK_NULL_HANDLE != (VkFence)fence);
  2291. VkResult result = VK_CALL(WaitForFences(this->device(), 1, (VkFence*)&fence, VK_TRUE, timeout));
  2292. return (VK_SUCCESS == result);
  2293. }
  2294. void GrVkGpu::deleteFence(GrFence fence) const {
  2295. VK_CALL(DestroyFence(this->device(), (VkFence)fence, nullptr));
  2296. }
  2297. sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT GrVkGpu::makeSemaphore(bool isOwned) {
  2298. return GrVkSemaphore::Make(this, isOwned);
  2299. }
  2300. sk_sp<GrSemaphore> GrVkGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
  2301. GrResourceProvider::SemaphoreWrapType wrapType,
  2302. GrWrapOwnership ownership) {
  2303. return GrVkSemaphore::MakeWrapped(this, semaphore.vkSemaphore(), wrapType, ownership);
  2304. }
  2305. void GrVkGpu::insertSemaphore(sk_sp<GrSemaphore> semaphore) {
  2306. GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore.get());
  2307. GrVkSemaphore::Resource* resource = vkSem->getResource();
  2308. if (resource->shouldSignal()) {
  2309. resource->ref();
  2310. fSemaphoresToSignal.push_back(resource);
  2311. }
  2312. }
  2313. void GrVkGpu::waitSemaphore(sk_sp<GrSemaphore> semaphore) {
  2314. GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore.get());
  2315. GrVkSemaphore::Resource* resource = vkSem->getResource();
  2316. if (resource->shouldWait()) {
  2317. resource->ref();
  2318. fSemaphoresToWaitOn.push_back(resource);
  2319. }
  2320. }
  2321. sk_sp<GrSemaphore> GrVkGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
  2322. SkASSERT(texture);
  2323. GrVkTexture* vkTexture = static_cast<GrVkTexture*>(texture);
  2324. vkTexture->setImageLayout(this,
  2325. VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
  2326. VK_ACCESS_SHADER_READ_BIT,
  2327. VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
  2328. false);
  2329. this->submitCommandBuffer(kSkip_SyncQueue);
  2330. // The image layout change serves as a barrier, so no semaphore is needed.
  2331. // If we ever decide we need to return a semaphore here, we need to make sure GrVkSemaphore is
  2332. // thread safe so that only the first thread that tries to use the semaphore actually submits
  2333. // it. This additionally would also require thread safety in command buffer submissions to
  2334. // queues in general.
  2335. return nullptr;
  2336. }
  2337. void GrVkGpu::addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) {
  2338. fDrawables.emplace_back(std::move(drawable));
  2339. }
  2340. uint32_t GrVkGpu::getExtraSamplerKeyForProgram(const GrSamplerState& samplerState,
  2341. const GrBackendFormat& format) {
  2342. const GrVkYcbcrConversionInfo* ycbcrInfo = format.getVkYcbcrConversionInfo();
  2343. SkASSERT(ycbcrInfo);
  2344. if (!ycbcrInfo->isValid()) {
  2345. return 0;
  2346. }
  2347. const GrVkSampler* sampler = this->resourceProvider().findOrCreateCompatibleSampler(
  2348. samplerState, *ycbcrInfo);
  2349. return sampler->uniqueID();
  2350. }
  2351. void GrVkGpu::storeVkPipelineCacheData() {
  2352. if (this->getContext()->priv().getPersistentCache()) {
  2353. this->resourceProvider().storePipelineCacheData();
  2354. }
  2355. }