GrVkCommandBuffer.cpp 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981
  1. /*
  2. * Copyright 2015 Google Inc.
  3. *
  4. * Use of this source code is governed by a BSD-style license that can be
  5. * found in the LICENSE file.
  6. */
  7. #include "src/gpu/vk/GrVkCommandBuffer.h"
  8. #include "include/core/SkRect.h"
  9. #include "src/gpu/vk/GrVkCommandPool.h"
  10. #include "src/gpu/vk/GrVkFramebuffer.h"
  11. #include "src/gpu/vk/GrVkGpu.h"
  12. #include "src/gpu/vk/GrVkImage.h"
  13. #include "src/gpu/vk/GrVkImageView.h"
  14. #include "src/gpu/vk/GrVkIndexBuffer.h"
  15. #include "src/gpu/vk/GrVkPipeline.h"
  16. #include "src/gpu/vk/GrVkPipelineLayout.h"
  17. #include "src/gpu/vk/GrVkPipelineState.h"
  18. #include "src/gpu/vk/GrVkPipelineState.h"
  19. #include "src/gpu/vk/GrVkRenderPass.h"
  20. #include "src/gpu/vk/GrVkRenderTarget.h"
  21. #include "src/gpu/vk/GrVkTransferBuffer.h"
  22. #include "src/gpu/vk/GrVkUtil.h"
  23. #include "src/gpu/vk/GrVkVertexBuffer.h"
  24. void GrVkCommandBuffer::invalidateState() {
  25. for (auto& boundInputBuffer : fBoundInputBuffers) {
  26. boundInputBuffer = VK_NULL_HANDLE;
  27. }
  28. fBoundIndexBuffer = VK_NULL_HANDLE;
  29. memset(&fCachedViewport, 0, sizeof(VkViewport));
  30. fCachedViewport.width = - 1.0f; // Viewport must have a width greater than 0
  31. memset(&fCachedScissor, 0, sizeof(VkRect2D));
  32. fCachedScissor.offset.x = -1; // Scissor offset must be greater that 0 to be valid
  33. for (int i = 0; i < 4; ++i) {
  34. fCachedBlendConstant[i] = -1.0;
  35. }
  36. }
  37. void GrVkCommandBuffer::freeGPUData(GrVkGpu* gpu) const {
  38. TRACE_EVENT0("skia.gpu", TRACE_FUNC);
  39. SkASSERT(!fIsActive);
  40. for (int i = 0; i < fTrackedResources.count(); ++i) {
  41. fTrackedResources[i]->notifyRemovedFromCommandBuffer();
  42. fTrackedResources[i]->unref(gpu);
  43. }
  44. for (int i = 0; i < fTrackedRecycledResources.count(); ++i) {
  45. fTrackedRecycledResources[i]->notifyRemovedFromCommandBuffer();
  46. fTrackedRecycledResources[i]->recycle(const_cast<GrVkGpu*>(gpu));
  47. }
  48. for (int i = 0; i < fTrackedRecordingResources.count(); ++i) {
  49. fTrackedRecordingResources[i]->notifyRemovedFromCommandBuffer();
  50. fTrackedRecordingResources[i]->unref(gpu);
  51. }
  52. if (!this->isWrapped()) {
  53. GR_VK_CALL(gpu->vkInterface(), FreeCommandBuffers(gpu->device(), fCmdPool->vkCommandPool(),
  54. 1, &fCmdBuffer));
  55. }
  56. this->onFreeGPUData(gpu);
  57. }
  58. void GrVkCommandBuffer::abandonGPUData() const {
  59. SkDEBUGCODE(fResourcesReleased = true;)
  60. for (int i = 0; i < fTrackedResources.count(); ++i) {
  61. fTrackedResources[i]->notifyRemovedFromCommandBuffer();
  62. fTrackedResources[i]->unrefAndAbandon();
  63. }
  64. for (int i = 0; i < fTrackedRecycledResources.count(); ++i) {
  65. fTrackedRecycledResources[i]->notifyRemovedFromCommandBuffer();
  66. // We don't recycle resources when abandoning them.
  67. fTrackedRecycledResources[i]->unrefAndAbandon();
  68. }
  69. for (int i = 0; i < fTrackedRecordingResources.count(); ++i) {
  70. fTrackedRecordingResources[i]->notifyRemovedFromCommandBuffer();
  71. fTrackedRecordingResources[i]->unrefAndAbandon();
  72. }
  73. this->onAbandonGPUData();
  74. }
  75. void GrVkCommandBuffer::releaseResources(GrVkGpu* gpu) {
  76. TRACE_EVENT0("skia.gpu", TRACE_FUNC);
  77. SkDEBUGCODE(fResourcesReleased = true;)
  78. SkASSERT(!fIsActive);
  79. for (int i = 0; i < fTrackedResources.count(); ++i) {
  80. fTrackedResources[i]->notifyRemovedFromCommandBuffer();
  81. fTrackedResources[i]->unref(gpu);
  82. }
  83. for (int i = 0; i < fTrackedRecycledResources.count(); ++i) {
  84. fTrackedRecycledResources[i]->notifyRemovedFromCommandBuffer();
  85. fTrackedRecycledResources[i]->recycle(const_cast<GrVkGpu*>(gpu));
  86. }
  87. for (int i = 0; i < fTrackedRecordingResources.count(); ++i) {
  88. fTrackedRecordingResources[i]->notifyRemovedFromCommandBuffer();
  89. fTrackedRecordingResources[i]->unref(gpu);
  90. }
  91. if (++fNumResets > kNumRewindResetsBeforeFullReset) {
  92. fTrackedResources.reset();
  93. fTrackedRecycledResources.reset();
  94. fTrackedRecordingResources.reset();
  95. fTrackedResources.setReserve(kInitialTrackedResourcesCount);
  96. fTrackedRecycledResources.setReserve(kInitialTrackedResourcesCount);
  97. fTrackedRecordingResources.setReserve(kInitialTrackedResourcesCount);
  98. fNumResets = 0;
  99. } else {
  100. fTrackedResources.rewind();
  101. fTrackedRecycledResources.rewind();
  102. fTrackedRecordingResources.rewind();
  103. }
  104. this->invalidateState();
  105. this->onReleaseResources(gpu);
  106. }
  107. ////////////////////////////////////////////////////////////////////////////////
  108. // CommandBuffer commands
  109. ////////////////////////////////////////////////////////////////////////////////
  110. void GrVkCommandBuffer::pipelineBarrier(const GrVkGpu* gpu,
  111. const GrVkResource* resource,
  112. VkPipelineStageFlags srcStageMask,
  113. VkPipelineStageFlags dstStageMask,
  114. bool byRegion,
  115. BarrierType barrierType,
  116. void* barrier) {
  117. SkASSERT(!this->isWrapped());
  118. SkASSERT(fIsActive);
  119. // For images we can have barriers inside of render passes but they require us to add more
  120. // support in subpasses which need self dependencies to have barriers inside them. Also, we can
  121. // never have buffer barriers inside of a render pass. For now we will just assert that we are
  122. // not in a render pass.
  123. SkASSERT(!fActiveRenderPass);
  124. if (barrierType == kBufferMemory_BarrierType) {
  125. const VkBufferMemoryBarrier* barrierPtr = reinterpret_cast<VkBufferMemoryBarrier*>(barrier);
  126. fBufferBarriers.push_back(*barrierPtr);
  127. } else {
  128. SkASSERT(barrierType == kImageMemory_BarrierType);
  129. const VkImageMemoryBarrier* barrierPtr = reinterpret_cast<VkImageMemoryBarrier*>(barrier);
  130. // We need to check if we are adding a pipeline barrier that covers part of the same
  131. // subresource range as a barrier that is already in current batch. If it does, then we must
  132. // submit the first batch because the vulkan spec does not define a specific ordering for
  133. // barriers submitted in the same batch.
  134. // TODO: Look if we can gain anything by merging barriers together instead of submitting
  135. // the old ones.
  136. for (int i = 0; i < fImageBarriers.count(); ++i) {
  137. VkImageMemoryBarrier& currentBarrier = fImageBarriers[i];
  138. if (barrierPtr->image == currentBarrier.image) {
  139. const VkImageSubresourceRange newRange = barrierPtr->subresourceRange;
  140. const VkImageSubresourceRange oldRange = currentBarrier.subresourceRange;
  141. SkASSERT(newRange.aspectMask == oldRange.aspectMask);
  142. SkASSERT(newRange.baseArrayLayer == oldRange.baseArrayLayer);
  143. SkASSERT(newRange.layerCount == oldRange.layerCount);
  144. uint32_t newStart = newRange.baseMipLevel;
  145. uint32_t newEnd = newRange.baseMipLevel + newRange.levelCount - 1;
  146. uint32_t oldStart = oldRange.baseMipLevel;
  147. uint32_t oldEnd = oldRange.baseMipLevel + oldRange.levelCount - 1;
  148. if (SkTMax(newStart, oldStart) <= SkTMin(newEnd, oldEnd)) {
  149. this->submitPipelineBarriers(gpu);
  150. break;
  151. }
  152. }
  153. }
  154. fImageBarriers.push_back(*barrierPtr);
  155. }
  156. fBarriersByRegion |= byRegion;
  157. fSrcStageMask = fSrcStageMask | srcStageMask;
  158. fDstStageMask = fDstStageMask | dstStageMask;
  159. fHasWork = true;
  160. if (resource) {
  161. this->addResource(resource);
  162. }
  163. }
  164. void GrVkCommandBuffer::submitPipelineBarriers(const GrVkGpu* gpu) {
  165. SkASSERT(fIsActive);
  166. // Currently we never submit a pipeline barrier without at least one memory barrier.
  167. if (fBufferBarriers.count() || fImageBarriers.count()) {
  168. // For images we can have barriers inside of render passes but they require us to add more
  169. // support in subpasses which need self dependencies to have barriers inside them. Also, we
  170. // can never have buffer barriers inside of a render pass. For now we will just assert that
  171. // we are not in a render pass.
  172. SkASSERT(!fActiveRenderPass);
  173. SkASSERT(!this->isWrapped());
  174. SkASSERT(fSrcStageMask && fDstStageMask);
  175. VkDependencyFlags dependencyFlags = fBarriersByRegion ? VK_DEPENDENCY_BY_REGION_BIT : 0;
  176. GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(
  177. fCmdBuffer, fSrcStageMask, fDstStageMask, dependencyFlags, 0, nullptr,
  178. fBufferBarriers.count(), fBufferBarriers.begin(),
  179. fImageBarriers.count(), fImageBarriers.begin()));
  180. fBufferBarriers.reset();
  181. fImageBarriers.reset();
  182. fBarriersByRegion = false;
  183. fSrcStageMask = 0;
  184. fDstStageMask = 0;
  185. }
  186. SkASSERT(!fBufferBarriers.count());
  187. SkASSERT(!fImageBarriers.count());
  188. SkASSERT(!fBarriersByRegion);
  189. SkASSERT(!fSrcStageMask);
  190. SkASSERT(!fDstStageMask);
  191. }
  192. void GrVkCommandBuffer::bindInputBuffer(GrVkGpu* gpu, uint32_t binding,
  193. const GrVkVertexBuffer* vbuffer) {
  194. VkBuffer vkBuffer = vbuffer->buffer();
  195. SkASSERT(VK_NULL_HANDLE != vkBuffer);
  196. SkASSERT(binding < kMaxInputBuffers);
  197. // TODO: once vbuffer->offset() no longer always returns 0, we will need to track the offset
  198. // to know if we can skip binding or not.
  199. if (vkBuffer != fBoundInputBuffers[binding]) {
  200. VkDeviceSize offset = vbuffer->offset();
  201. GR_VK_CALL(gpu->vkInterface(), CmdBindVertexBuffers(fCmdBuffer,
  202. binding,
  203. 1,
  204. &vkBuffer,
  205. &offset));
  206. fBoundInputBuffers[binding] = vkBuffer;
  207. this->addResource(vbuffer->resource());
  208. }
  209. }
  210. void GrVkCommandBuffer::bindIndexBuffer(GrVkGpu* gpu, const GrVkIndexBuffer* ibuffer) {
  211. VkBuffer vkBuffer = ibuffer->buffer();
  212. SkASSERT(VK_NULL_HANDLE != vkBuffer);
  213. // TODO: once ibuffer->offset() no longer always returns 0, we will need to track the offset
  214. // to know if we can skip binding or not.
  215. if (vkBuffer != fBoundIndexBuffer) {
  216. GR_VK_CALL(gpu->vkInterface(), CmdBindIndexBuffer(fCmdBuffer,
  217. vkBuffer,
  218. ibuffer->offset(),
  219. VK_INDEX_TYPE_UINT16));
  220. fBoundIndexBuffer = vkBuffer;
  221. this->addResource(ibuffer->resource());
  222. }
  223. }
  224. void GrVkCommandBuffer::clearAttachments(const GrVkGpu* gpu,
  225. int numAttachments,
  226. const VkClearAttachment* attachments,
  227. int numRects,
  228. const VkClearRect* clearRects) {
  229. SkASSERT(fIsActive);
  230. SkASSERT(fActiveRenderPass);
  231. SkASSERT(numAttachments > 0);
  232. SkASSERT(numRects > 0);
  233. this->addingWork(gpu);
  234. #ifdef SK_DEBUG
  235. for (int i = 0; i < numAttachments; ++i) {
  236. if (attachments[i].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) {
  237. uint32_t testIndex;
  238. SkAssertResult(fActiveRenderPass->colorAttachmentIndex(&testIndex));
  239. SkASSERT(testIndex == attachments[i].colorAttachment);
  240. }
  241. }
  242. #endif
  243. GR_VK_CALL(gpu->vkInterface(), CmdClearAttachments(fCmdBuffer,
  244. numAttachments,
  245. attachments,
  246. numRects,
  247. clearRects));
  248. }
  249. void GrVkCommandBuffer::bindDescriptorSets(const GrVkGpu* gpu,
  250. GrVkPipelineState* pipelineState,
  251. GrVkPipelineLayout* layout,
  252. uint32_t firstSet,
  253. uint32_t setCount,
  254. const VkDescriptorSet* descriptorSets,
  255. uint32_t dynamicOffsetCount,
  256. const uint32_t* dynamicOffsets) {
  257. SkASSERT(fIsActive);
  258. GR_VK_CALL(gpu->vkInterface(), CmdBindDescriptorSets(fCmdBuffer,
  259. VK_PIPELINE_BIND_POINT_GRAPHICS,
  260. layout->layout(),
  261. firstSet,
  262. setCount,
  263. descriptorSets,
  264. dynamicOffsetCount,
  265. dynamicOffsets));
  266. this->addRecordingResource(layout);
  267. }
  268. void GrVkCommandBuffer::bindDescriptorSets(const GrVkGpu* gpu,
  269. const SkTArray<const GrVkRecycledResource*>& recycled,
  270. const SkTArray<const GrVkResource*>& resources,
  271. GrVkPipelineLayout* layout,
  272. uint32_t firstSet,
  273. uint32_t setCount,
  274. const VkDescriptorSet* descriptorSets,
  275. uint32_t dynamicOffsetCount,
  276. const uint32_t* dynamicOffsets) {
  277. SkASSERT(fIsActive);
  278. GR_VK_CALL(gpu->vkInterface(), CmdBindDescriptorSets(fCmdBuffer,
  279. VK_PIPELINE_BIND_POINT_GRAPHICS,
  280. layout->layout(),
  281. firstSet,
  282. setCount,
  283. descriptorSets,
  284. dynamicOffsetCount,
  285. dynamicOffsets));
  286. this->addRecordingResource(layout);
  287. for (int i = 0; i < recycled.count(); ++i) {
  288. this->addRecycledResource(recycled[i]);
  289. }
  290. for (int i = 0; i < resources.count(); ++i) {
  291. this->addResource(resources[i]);
  292. }
  293. }
  294. void GrVkCommandBuffer::bindPipeline(const GrVkGpu* gpu, const GrVkPipeline* pipeline) {
  295. SkASSERT(fIsActive);
  296. GR_VK_CALL(gpu->vkInterface(), CmdBindPipeline(fCmdBuffer,
  297. VK_PIPELINE_BIND_POINT_GRAPHICS,
  298. pipeline->pipeline()));
  299. this->addResource(pipeline);
  300. }
  301. void GrVkCommandBuffer::drawIndexed(const GrVkGpu* gpu,
  302. uint32_t indexCount,
  303. uint32_t instanceCount,
  304. uint32_t firstIndex,
  305. int32_t vertexOffset,
  306. uint32_t firstInstance) {
  307. SkASSERT(fIsActive);
  308. SkASSERT(fActiveRenderPass);
  309. this->addingWork(gpu);
  310. GR_VK_CALL(gpu->vkInterface(), CmdDrawIndexed(fCmdBuffer,
  311. indexCount,
  312. instanceCount,
  313. firstIndex,
  314. vertexOffset,
  315. firstInstance));
  316. }
  317. void GrVkCommandBuffer::draw(const GrVkGpu* gpu,
  318. uint32_t vertexCount,
  319. uint32_t instanceCount,
  320. uint32_t firstVertex,
  321. uint32_t firstInstance) {
  322. SkASSERT(fIsActive);
  323. SkASSERT(fActiveRenderPass);
  324. this->addingWork(gpu);
  325. GR_VK_CALL(gpu->vkInterface(), CmdDraw(fCmdBuffer,
  326. vertexCount,
  327. instanceCount,
  328. firstVertex,
  329. firstInstance));
  330. }
  331. void GrVkCommandBuffer::setViewport(const GrVkGpu* gpu,
  332. uint32_t firstViewport,
  333. uint32_t viewportCount,
  334. const VkViewport* viewports) {
  335. SkASSERT(fIsActive);
  336. SkASSERT(1 == viewportCount);
  337. if (memcmp(viewports, &fCachedViewport, sizeof(VkViewport))) {
  338. GR_VK_CALL(gpu->vkInterface(), CmdSetViewport(fCmdBuffer,
  339. firstViewport,
  340. viewportCount,
  341. viewports));
  342. fCachedViewport = viewports[0];
  343. }
  344. }
  345. void GrVkCommandBuffer::setScissor(const GrVkGpu* gpu,
  346. uint32_t firstScissor,
  347. uint32_t scissorCount,
  348. const VkRect2D* scissors) {
  349. SkASSERT(fIsActive);
  350. SkASSERT(1 == scissorCount);
  351. if (memcmp(scissors, &fCachedScissor, sizeof(VkRect2D))) {
  352. GR_VK_CALL(gpu->vkInterface(), CmdSetScissor(fCmdBuffer,
  353. firstScissor,
  354. scissorCount,
  355. scissors));
  356. fCachedScissor = scissors[0];
  357. }
  358. }
  359. void GrVkCommandBuffer::setBlendConstants(const GrVkGpu* gpu,
  360. const float blendConstants[4]) {
  361. SkASSERT(fIsActive);
  362. if (memcmp(blendConstants, fCachedBlendConstant, 4 * sizeof(float))) {
  363. GR_VK_CALL(gpu->vkInterface(), CmdSetBlendConstants(fCmdBuffer, blendConstants));
  364. memcpy(fCachedBlendConstant, blendConstants, 4 * sizeof(float));
  365. }
  366. }
  367. void GrVkCommandBuffer::addingWork(const GrVkGpu* gpu) {
  368. this->submitPipelineBarriers(gpu);
  369. fHasWork = true;
  370. }
  371. ///////////////////////////////////////////////////////////////////////////////
  372. // PrimaryCommandBuffer
  373. ////////////////////////////////////////////////////////////////////////////////
  374. GrVkPrimaryCommandBuffer::~GrVkPrimaryCommandBuffer() {
  375. // Should have ended any render pass we're in the middle of
  376. SkASSERT(!fActiveRenderPass);
  377. }
  378. GrVkPrimaryCommandBuffer* GrVkPrimaryCommandBuffer::Create(const GrVkGpu* gpu,
  379. GrVkCommandPool* cmdPool) {
  380. const VkCommandBufferAllocateInfo cmdInfo = {
  381. VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
  382. nullptr, // pNext
  383. cmdPool->vkCommandPool(), // commandPool
  384. VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
  385. 1 // bufferCount
  386. };
  387. VkCommandBuffer cmdBuffer;
  388. VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateCommandBuffers(gpu->device(),
  389. &cmdInfo,
  390. &cmdBuffer));
  391. if (err) {
  392. return nullptr;
  393. }
  394. return new GrVkPrimaryCommandBuffer(cmdBuffer, cmdPool);
  395. }
  396. void GrVkPrimaryCommandBuffer::begin(const GrVkGpu* gpu) {
  397. SkASSERT(!fIsActive);
  398. VkCommandBufferBeginInfo cmdBufferBeginInfo;
  399. memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
  400. cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
  401. cmdBufferBeginInfo.pNext = nullptr;
  402. cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
  403. cmdBufferBeginInfo.pInheritanceInfo = nullptr;
  404. GR_VK_CALL_ERRCHECK(gpu->vkInterface(), BeginCommandBuffer(fCmdBuffer,
  405. &cmdBufferBeginInfo));
  406. fIsActive = true;
  407. }
  408. void GrVkPrimaryCommandBuffer::end(GrVkGpu* gpu) {
  409. SkASSERT(fIsActive);
  410. SkASSERT(!fActiveRenderPass);
  411. this->submitPipelineBarriers(gpu);
  412. GR_VK_CALL_ERRCHECK(gpu->vkInterface(), EndCommandBuffer(fCmdBuffer));
  413. for (int i = 0; i < fTrackedRecordingResources.count(); ++i) {
  414. fTrackedRecordingResources[i]->unref(gpu);
  415. }
  416. fTrackedRecordingResources.rewind();
  417. this->invalidateState();
  418. fIsActive = false;
  419. fHasWork = false;
  420. }
  421. void GrVkPrimaryCommandBuffer::beginRenderPass(const GrVkGpu* gpu,
  422. const GrVkRenderPass* renderPass,
  423. const VkClearValue clearValues[],
  424. const GrVkRenderTarget& target,
  425. const SkIRect& bounds,
  426. bool forSecondaryCB) {
  427. SkASSERT(fIsActive);
  428. SkASSERT(!fActiveRenderPass);
  429. SkASSERT(renderPass->isCompatible(target));
  430. this->addingWork(gpu);
  431. VkRenderPassBeginInfo beginInfo;
  432. VkRect2D renderArea;
  433. renderArea.offset = { bounds.fLeft , bounds.fTop };
  434. renderArea.extent = { (uint32_t)bounds.width(), (uint32_t)bounds.height() };
  435. memset(&beginInfo, 0, sizeof(VkRenderPassBeginInfo));
  436. beginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
  437. beginInfo.pNext = nullptr;
  438. beginInfo.renderPass = renderPass->vkRenderPass();
  439. beginInfo.framebuffer = target.framebuffer()->framebuffer();
  440. beginInfo.renderArea = renderArea;
  441. beginInfo.clearValueCount = renderPass->clearValueCount();
  442. beginInfo.pClearValues = clearValues;
  443. VkSubpassContents contents = forSecondaryCB ? VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS
  444. : VK_SUBPASS_CONTENTS_INLINE;
  445. GR_VK_CALL(gpu->vkInterface(), CmdBeginRenderPass(fCmdBuffer, &beginInfo, contents));
  446. fActiveRenderPass = renderPass;
  447. this->addResource(renderPass);
  448. target.addResources(*this);
  449. }
  450. void GrVkPrimaryCommandBuffer::endRenderPass(const GrVkGpu* gpu) {
  451. SkASSERT(fIsActive);
  452. SkASSERT(fActiveRenderPass);
  453. this->addingWork(gpu);
  454. GR_VK_CALL(gpu->vkInterface(), CmdEndRenderPass(fCmdBuffer));
  455. fActiveRenderPass = nullptr;
  456. }
  457. void GrVkPrimaryCommandBuffer::executeCommands(const GrVkGpu* gpu,
  458. GrVkSecondaryCommandBuffer* buffer) {
  459. // The Vulkan spec allows secondary command buffers to be executed on a primary command buffer
  460. // if the command pools both were created from were created with the same queue family. However,
  461. // we currently always create them from the same pool.
  462. SkASSERT(buffer->commandPool() == fCmdPool);
  463. SkASSERT(fIsActive);
  464. SkASSERT(!buffer->fIsActive);
  465. SkASSERT(fActiveRenderPass);
  466. SkASSERT(fActiveRenderPass->isCompatible(*buffer->fActiveRenderPass));
  467. this->addingWork(gpu);
  468. GR_VK_CALL(gpu->vkInterface(), CmdExecuteCommands(fCmdBuffer, 1, &buffer->fCmdBuffer));
  469. buffer->ref();
  470. fSecondaryCommandBuffers.push_back(buffer);
  471. // When executing a secondary command buffer all state (besides render pass state) becomes
  472. // invalidated and must be reset. This includes bound buffers, pipelines, dynamic state, etc.
  473. this->invalidateState();
  474. }
  475. static void submit_to_queue(const GrVkInterface* interface,
  476. VkQueue queue,
  477. VkFence fence,
  478. uint32_t waitCount,
  479. const VkSemaphore* waitSemaphores,
  480. const VkPipelineStageFlags* waitStages,
  481. uint32_t commandBufferCount,
  482. const VkCommandBuffer* commandBuffers,
  483. uint32_t signalCount,
  484. const VkSemaphore* signalSemaphores,
  485. GrProtected protectedContext) {
  486. VkProtectedSubmitInfo protectedSubmitInfo;
  487. if (protectedContext == GrProtected::kYes) {
  488. memset(&protectedSubmitInfo, 0, sizeof(VkProtectedSubmitInfo));
  489. protectedSubmitInfo.sType = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO;
  490. protectedSubmitInfo.pNext = nullptr;
  491. protectedSubmitInfo.protectedSubmit = VK_TRUE;
  492. }
  493. VkSubmitInfo submitInfo;
  494. memset(&submitInfo, 0, sizeof(VkSubmitInfo));
  495. submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
  496. submitInfo.pNext = protectedContext == GrProtected::kYes ? &protectedSubmitInfo : nullptr;
  497. submitInfo.waitSemaphoreCount = waitCount;
  498. submitInfo.pWaitSemaphores = waitSemaphores;
  499. submitInfo.pWaitDstStageMask = waitStages;
  500. submitInfo.commandBufferCount = commandBufferCount;
  501. submitInfo.pCommandBuffers = commandBuffers;
  502. submitInfo.signalSemaphoreCount = signalCount;
  503. submitInfo.pSignalSemaphores = signalSemaphores;
  504. GR_VK_CALL_ERRCHECK(interface, QueueSubmit(queue, 1, &submitInfo, fence));
  505. }
  506. void GrVkPrimaryCommandBuffer::submitToQueue(
  507. const GrVkGpu* gpu,
  508. VkQueue queue,
  509. GrVkGpu::SyncQueue sync,
  510. SkTArray<GrVkSemaphore::Resource*>& signalSemaphores,
  511. SkTArray<GrVkSemaphore::Resource*>& waitSemaphores) {
  512. SkASSERT(!fIsActive);
  513. VkResult err;
  514. if (VK_NULL_HANDLE == fSubmitFence) {
  515. VkFenceCreateInfo fenceInfo;
  516. memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
  517. fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
  518. err = GR_VK_CALL(gpu->vkInterface(), CreateFence(gpu->device(), &fenceInfo, nullptr,
  519. &fSubmitFence));
  520. SkASSERT(!err);
  521. } else {
  522. GR_VK_CALL(gpu->vkInterface(), ResetFences(gpu->device(), 1, &fSubmitFence));
  523. }
  524. int signalCount = signalSemaphores.count();
  525. int waitCount = waitSemaphores.count();
  526. if (0 == signalCount && 0 == waitCount) {
  527. // This command buffer has no dependent semaphores so we can simply just submit it to the
  528. // queue with no worries.
  529. submit_to_queue(gpu->vkInterface(), queue, fSubmitFence, 0, nullptr, nullptr, 1,
  530. &fCmdBuffer, 0, nullptr,
  531. gpu->protectedContext() ? GrProtected::kYes : GrProtected::kNo);
  532. } else {
  533. SkTArray<VkSemaphore> vkSignalSems(signalCount);
  534. for (int i = 0; i < signalCount; ++i) {
  535. if (signalSemaphores[i]->shouldSignal()) {
  536. this->addResource(signalSemaphores[i]);
  537. vkSignalSems.push_back(signalSemaphores[i]->semaphore());
  538. }
  539. }
  540. SkTArray<VkSemaphore> vkWaitSems(waitCount);
  541. SkTArray<VkPipelineStageFlags> vkWaitStages(waitCount);
  542. for (int i = 0; i < waitCount; ++i) {
  543. if (waitSemaphores[i]->shouldWait()) {
  544. this->addResource(waitSemaphores[i]);
  545. vkWaitSems.push_back(waitSemaphores[i]->semaphore());
  546. vkWaitStages.push_back(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
  547. }
  548. }
  549. submit_to_queue(gpu->vkInterface(), queue, fSubmitFence, vkWaitSems.count(),
  550. vkWaitSems.begin(), vkWaitStages.begin(), 1, &fCmdBuffer,
  551. vkSignalSems.count(), vkSignalSems.begin(),
  552. gpu->protectedContext() ? GrProtected::kYes : GrProtected::kNo);
  553. for (int i = 0; i < signalCount; ++i) {
  554. signalSemaphores[i]->markAsSignaled();
  555. }
  556. for (int i = 0; i < waitCount; ++i) {
  557. waitSemaphores[i]->markAsWaited();
  558. }
  559. }
  560. if (GrVkGpu::kForce_SyncQueue == sync) {
  561. err = GR_VK_CALL(gpu->vkInterface(),
  562. WaitForFences(gpu->device(), 1, &fSubmitFence, true, UINT64_MAX));
  563. if (VK_TIMEOUT == err) {
  564. SkDebugf("Fence failed to signal: %d\n", err);
  565. SK_ABORT("failing");
  566. }
  567. SkASSERT(!err);
  568. fFinishedProcs.reset();
  569. // Destroy the fence
  570. GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
  571. fSubmitFence = VK_NULL_HANDLE;
  572. }
  573. }
  574. bool GrVkPrimaryCommandBuffer::finished(const GrVkGpu* gpu) {
  575. SkASSERT(!fIsActive);
  576. if (VK_NULL_HANDLE == fSubmitFence) {
  577. return true;
  578. }
  579. VkResult err = GR_VK_CALL(gpu->vkInterface(), GetFenceStatus(gpu->device(), fSubmitFence));
  580. switch (err) {
  581. case VK_SUCCESS:
  582. return true;
  583. case VK_NOT_READY:
  584. return false;
  585. default:
  586. SkDebugf("Error getting fence status: %d\n", err);
  587. SK_ABORT("failing");
  588. break;
  589. }
  590. return false;
  591. }
  592. void GrVkPrimaryCommandBuffer::addFinishedProc(sk_sp<GrRefCntedCallback> finishedProc) {
  593. fFinishedProcs.push_back(std::move(finishedProc));
  594. }
  595. void GrVkPrimaryCommandBuffer::onReleaseResources(GrVkGpu* gpu) {
  596. for (int i = 0; i < fSecondaryCommandBuffers.count(); ++i) {
  597. fSecondaryCommandBuffers[i]->releaseResources(gpu);
  598. }
  599. fFinishedProcs.reset();
  600. }
  601. void GrVkPrimaryCommandBuffer::recycleSecondaryCommandBuffers() {
  602. for (int i = 0; i < fSecondaryCommandBuffers.count(); ++i) {
  603. SkASSERT(fSecondaryCommandBuffers[i]->commandPool() == fCmdPool);
  604. fCmdPool->recycleSecondaryCommandBuffer(fSecondaryCommandBuffers[i]);
  605. }
  606. fSecondaryCommandBuffers.reset();
  607. }
  608. void GrVkPrimaryCommandBuffer::copyImage(const GrVkGpu* gpu,
  609. GrVkImage* srcImage,
  610. VkImageLayout srcLayout,
  611. GrVkImage* dstImage,
  612. VkImageLayout dstLayout,
  613. uint32_t copyRegionCount,
  614. const VkImageCopy* copyRegions) {
  615. SkASSERT(fIsActive);
  616. SkASSERT(!fActiveRenderPass);
  617. this->addingWork(gpu);
  618. this->addResource(srcImage->resource());
  619. this->addResource(dstImage->resource());
  620. GR_VK_CALL(gpu->vkInterface(), CmdCopyImage(fCmdBuffer,
  621. srcImage->image(),
  622. srcLayout,
  623. dstImage->image(),
  624. dstLayout,
  625. copyRegionCount,
  626. copyRegions));
  627. }
  628. void GrVkPrimaryCommandBuffer::blitImage(const GrVkGpu* gpu,
  629. const GrVkResource* srcResource,
  630. VkImage srcImage,
  631. VkImageLayout srcLayout,
  632. const GrVkResource* dstResource,
  633. VkImage dstImage,
  634. VkImageLayout dstLayout,
  635. uint32_t blitRegionCount,
  636. const VkImageBlit* blitRegions,
  637. VkFilter filter) {
  638. SkASSERT(fIsActive);
  639. SkASSERT(!fActiveRenderPass);
  640. this->addingWork(gpu);
  641. this->addResource(srcResource);
  642. this->addResource(dstResource);
  643. GR_VK_CALL(gpu->vkInterface(), CmdBlitImage(fCmdBuffer,
  644. srcImage,
  645. srcLayout,
  646. dstImage,
  647. dstLayout,
  648. blitRegionCount,
  649. blitRegions,
  650. filter));
  651. }
  652. void GrVkPrimaryCommandBuffer::blitImage(const GrVkGpu* gpu,
  653. const GrVkImage& srcImage,
  654. const GrVkImage& dstImage,
  655. uint32_t blitRegionCount,
  656. const VkImageBlit* blitRegions,
  657. VkFilter filter) {
  658. this->blitImage(gpu,
  659. srcImage.resource(),
  660. srcImage.image(),
  661. srcImage.currentLayout(),
  662. dstImage.resource(),
  663. dstImage.image(),
  664. dstImage.currentLayout(),
  665. blitRegionCount,
  666. blitRegions,
  667. filter);
  668. }
  669. void GrVkPrimaryCommandBuffer::copyImageToBuffer(const GrVkGpu* gpu,
  670. GrVkImage* srcImage,
  671. VkImageLayout srcLayout,
  672. GrVkTransferBuffer* dstBuffer,
  673. uint32_t copyRegionCount,
  674. const VkBufferImageCopy* copyRegions) {
  675. SkASSERT(fIsActive);
  676. SkASSERT(!fActiveRenderPass);
  677. this->addingWork(gpu);
  678. this->addResource(srcImage->resource());
  679. this->addResource(dstBuffer->resource());
  680. GR_VK_CALL(gpu->vkInterface(), CmdCopyImageToBuffer(fCmdBuffer,
  681. srcImage->image(),
  682. srcLayout,
  683. dstBuffer->buffer(),
  684. copyRegionCount,
  685. copyRegions));
  686. }
  687. void GrVkPrimaryCommandBuffer::copyBufferToImage(const GrVkGpu* gpu,
  688. GrVkTransferBuffer* srcBuffer,
  689. GrVkImage* dstImage,
  690. VkImageLayout dstLayout,
  691. uint32_t copyRegionCount,
  692. const VkBufferImageCopy* copyRegions) {
  693. SkASSERT(fIsActive);
  694. SkASSERT(!fActiveRenderPass);
  695. this->addingWork(gpu);
  696. this->addResource(srcBuffer->resource());
  697. this->addResource(dstImage->resource());
  698. GR_VK_CALL(gpu->vkInterface(), CmdCopyBufferToImage(fCmdBuffer,
  699. srcBuffer->buffer(),
  700. dstImage->image(),
  701. dstLayout,
  702. copyRegionCount,
  703. copyRegions));
  704. }
  705. void GrVkPrimaryCommandBuffer::copyBuffer(GrVkGpu* gpu,
  706. GrVkBuffer* srcBuffer,
  707. GrVkBuffer* dstBuffer,
  708. uint32_t regionCount,
  709. const VkBufferCopy* regions) {
  710. SkASSERT(fIsActive);
  711. SkASSERT(!fActiveRenderPass);
  712. this->addingWork(gpu);
  713. #ifdef SK_DEBUG
  714. for (uint32_t i = 0; i < regionCount; ++i) {
  715. const VkBufferCopy& region = regions[i];
  716. SkASSERT(region.size > 0);
  717. SkASSERT(region.srcOffset < srcBuffer->size());
  718. SkASSERT(region.dstOffset < dstBuffer->size());
  719. SkASSERT(region.srcOffset + region.size <= srcBuffer->size());
  720. SkASSERT(region.dstOffset + region.size <= dstBuffer->size());
  721. }
  722. #endif
  723. this->addResource(srcBuffer->resource());
  724. this->addResource(dstBuffer->resource());
  725. GR_VK_CALL(gpu->vkInterface(), CmdCopyBuffer(fCmdBuffer,
  726. srcBuffer->buffer(),
  727. dstBuffer->buffer(),
  728. regionCount,
  729. regions));
  730. }
  731. void GrVkPrimaryCommandBuffer::updateBuffer(GrVkGpu* gpu,
  732. GrVkBuffer* dstBuffer,
  733. VkDeviceSize dstOffset,
  734. VkDeviceSize dataSize,
  735. const void* data) {
  736. SkASSERT(fIsActive);
  737. SkASSERT(!fActiveRenderPass);
  738. SkASSERT(0 == (dstOffset & 0x03)); // four byte aligned
  739. // TODO: handle larger transfer sizes
  740. SkASSERT(dataSize <= 65536);
  741. SkASSERT(0 == (dataSize & 0x03)); // four byte aligned
  742. this->addingWork(gpu);
  743. this->addResource(dstBuffer->resource());
  744. GR_VK_CALL(gpu->vkInterface(), CmdUpdateBuffer(fCmdBuffer,
  745. dstBuffer->buffer(),
  746. dstOffset,
  747. dataSize,
  748. (const uint32_t*) data));
  749. }
  750. void GrVkPrimaryCommandBuffer::clearColorImage(const GrVkGpu* gpu,
  751. GrVkImage* image,
  752. const VkClearColorValue* color,
  753. uint32_t subRangeCount,
  754. const VkImageSubresourceRange* subRanges) {
  755. SkASSERT(fIsActive);
  756. SkASSERT(!fActiveRenderPass);
  757. this->addingWork(gpu);
  758. this->addResource(image->resource());
  759. GR_VK_CALL(gpu->vkInterface(), CmdClearColorImage(fCmdBuffer,
  760. image->image(),
  761. image->currentLayout(),
  762. color,
  763. subRangeCount,
  764. subRanges));
  765. }
  766. void GrVkPrimaryCommandBuffer::clearDepthStencilImage(const GrVkGpu* gpu,
  767. GrVkImage* image,
  768. const VkClearDepthStencilValue* color,
  769. uint32_t subRangeCount,
  770. const VkImageSubresourceRange* subRanges) {
  771. SkASSERT(fIsActive);
  772. SkASSERT(!fActiveRenderPass);
  773. this->addingWork(gpu);
  774. this->addResource(image->resource());
  775. GR_VK_CALL(gpu->vkInterface(), CmdClearDepthStencilImage(fCmdBuffer,
  776. image->image(),
  777. image->currentLayout(),
  778. color,
  779. subRangeCount,
  780. subRanges));
  781. }
  782. void GrVkPrimaryCommandBuffer::resolveImage(GrVkGpu* gpu,
  783. const GrVkImage& srcImage,
  784. const GrVkImage& dstImage,
  785. uint32_t regionCount,
  786. const VkImageResolve* regions) {
  787. SkASSERT(fIsActive);
  788. SkASSERT(!fActiveRenderPass);
  789. this->addingWork(gpu);
  790. this->addResource(srcImage.resource());
  791. this->addResource(dstImage.resource());
  792. GR_VK_CALL(gpu->vkInterface(), CmdResolveImage(fCmdBuffer,
  793. srcImage.image(),
  794. srcImage.currentLayout(),
  795. dstImage.image(),
  796. dstImage.currentLayout(),
  797. regionCount,
  798. regions));
  799. }
  800. void GrVkPrimaryCommandBuffer::onFreeGPUData(GrVkGpu* gpu) const {
  801. SkASSERT(!fActiveRenderPass);
  802. // Destroy the fence, if any
  803. if (VK_NULL_HANDLE != fSubmitFence) {
  804. GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
  805. }
  806. for (GrVkSecondaryCommandBuffer* buffer : fSecondaryCommandBuffers) {
  807. buffer->unref(gpu);
  808. }
  809. }
  810. void GrVkPrimaryCommandBuffer::onAbandonGPUData() const {
  811. SkASSERT(!fActiveRenderPass);
  812. for (GrVkSecondaryCommandBuffer* buffer : fSecondaryCommandBuffers) {
  813. buffer->unrefAndAbandon();
  814. }
  815. }
  816. ///////////////////////////////////////////////////////////////////////////////
  817. // SecondaryCommandBuffer
  818. ////////////////////////////////////////////////////////////////////////////////
  819. GrVkSecondaryCommandBuffer* GrVkSecondaryCommandBuffer::Create(const GrVkGpu* gpu,
  820. GrVkCommandPool* cmdPool) {
  821. SkASSERT(cmdPool);
  822. const VkCommandBufferAllocateInfo cmdInfo = {
  823. VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
  824. nullptr, // pNext
  825. cmdPool->vkCommandPool(), // commandPool
  826. VK_COMMAND_BUFFER_LEVEL_SECONDARY, // level
  827. 1 // bufferCount
  828. };
  829. VkCommandBuffer cmdBuffer;
  830. VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateCommandBuffers(gpu->device(),
  831. &cmdInfo,
  832. &cmdBuffer));
  833. if (err) {
  834. return nullptr;
  835. }
  836. return new GrVkSecondaryCommandBuffer(cmdBuffer, cmdPool);
  837. }
  838. GrVkSecondaryCommandBuffer* GrVkSecondaryCommandBuffer::Create(VkCommandBuffer cmdBuffer) {
  839. return new GrVkSecondaryCommandBuffer(cmdBuffer, nullptr);
  840. }
  841. void GrVkSecondaryCommandBuffer::begin(const GrVkGpu* gpu, const GrVkFramebuffer* framebuffer,
  842. const GrVkRenderPass* compatibleRenderPass) {
  843. SkASSERT(!fIsActive);
  844. SkASSERT(compatibleRenderPass);
  845. fActiveRenderPass = compatibleRenderPass;
  846. if (!this->isWrapped()) {
  847. VkCommandBufferInheritanceInfo inheritanceInfo;
  848. memset(&inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
  849. inheritanceInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
  850. inheritanceInfo.pNext = nullptr;
  851. inheritanceInfo.renderPass = fActiveRenderPass->vkRenderPass();
  852. inheritanceInfo.subpass = 0; // Currently only using 1 subpass for each render pass
  853. inheritanceInfo.framebuffer = framebuffer ? framebuffer->framebuffer() : VK_NULL_HANDLE;
  854. inheritanceInfo.occlusionQueryEnable = false;
  855. inheritanceInfo.queryFlags = 0;
  856. inheritanceInfo.pipelineStatistics = 0;
  857. VkCommandBufferBeginInfo cmdBufferBeginInfo;
  858. memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
  859. cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
  860. cmdBufferBeginInfo.pNext = nullptr;
  861. cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT |
  862. VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
  863. cmdBufferBeginInfo.pInheritanceInfo = &inheritanceInfo;
  864. GR_VK_CALL_ERRCHECK(gpu->vkInterface(), BeginCommandBuffer(fCmdBuffer,
  865. &cmdBufferBeginInfo));
  866. }
  867. fIsActive = true;
  868. }
  869. void GrVkSecondaryCommandBuffer::end(GrVkGpu* gpu) {
  870. SkASSERT(fIsActive);
  871. if (!this->isWrapped()) {
  872. GR_VK_CALL_ERRCHECK(gpu->vkInterface(), EndCommandBuffer(fCmdBuffer));
  873. }
  874. this->invalidateState();
  875. fIsActive = false;
  876. fHasWork = false;
  877. }