GrMtlGpuCommandBuffer.mm 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488
  1. /*
  2. * Copyright 2018 Google Inc.
  3. *
  4. * Use of this source code is governed by a BSD-style license that can be
  5. * found in the LICENSE file.
  6. */
  7. #include "src/gpu/mtl/GrMtlGpuCommandBuffer.h"
  8. #include "src/gpu/GrColor.h"
  9. #include "src/gpu/GrFixedClip.h"
  10. #include "src/gpu/GrRenderTargetPriv.h"
  11. #include "src/gpu/GrTexturePriv.h"
  12. #include "src/gpu/mtl/GrMtlCommandBuffer.h"
  13. #include "src/gpu/mtl/GrMtlPipelineState.h"
  14. #include "src/gpu/mtl/GrMtlPipelineStateBuilder.h"
  15. #include "src/gpu/mtl/GrMtlRenderTarget.h"
  16. #include "src/gpu/mtl/GrMtlTexture.h"
  17. #if !__has_feature(objc_arc)
  18. #error This file must be compiled with Arc. Use -fobjc-arc flag
  19. #endif
  20. GrMtlGpuRTCommandBuffer::GrMtlGpuRTCommandBuffer(
  21. GrMtlGpu* gpu, GrRenderTarget* rt, GrSurfaceOrigin origin, const SkRect& bounds,
  22. const GrGpuRTCommandBuffer::LoadAndStoreInfo& colorInfo,
  23. const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo& stencilInfo)
  24. : INHERITED(rt, origin)
  25. , fGpu(gpu)
  26. #ifdef SK_DEBUG
  27. , fRTBounds(bounds)
  28. #endif
  29. {
  30. this->setupRenderPass(colorInfo, stencilInfo);
  31. }
  32. GrMtlGpuRTCommandBuffer::~GrMtlGpuRTCommandBuffer() {
  33. SkASSERT(nil == fActiveRenderCmdEncoder);
  34. }
  35. void GrMtlGpuRTCommandBuffer::precreateCmdEncoder() {
  36. // For clears, we may not have an associated draw. So we prepare a cmdEncoder that
  37. // will be submitted whether there's a draw or not.
  38. SkASSERT(nil == fActiveRenderCmdEncoder);
  39. SkDEBUGCODE(id<MTLRenderCommandEncoder> cmdEncoder =)
  40. fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc, nullptr, this);
  41. SkASSERT(nil != cmdEncoder);
  42. }
  43. void GrMtlGpuRTCommandBuffer::submit() {
  44. if (!fRenderTarget) {
  45. return;
  46. }
  47. SkIRect iBounds;
  48. fBounds.roundOut(&iBounds);
  49. fGpu->submitIndirectCommandBuffer(fRenderTarget, fOrigin, &iBounds);
  50. }
  51. void GrMtlGpuRTCommandBuffer::copy(GrSurface* src, const SkIRect& srcRect,
  52. const SkIPoint& dstPoint) {
  53. // We cannot have an active encoder when we call copy since it requires its own
  54. // command encoder.
  55. SkASSERT(nil == fActiveRenderCmdEncoder);
  56. fGpu->copySurface(fRenderTarget, src, srcRect, dstPoint);
  57. }
  58. void GrMtlGpuRTCommandBuffer::transferFrom(const SkIRect& srcRect, GrColorType bufferColorType,
  59. GrGpuBuffer* transferBuffer, size_t offset) {
  60. // We cannot have an active encoder when we call transferFrom since it requires its own
  61. // command encoder.
  62. SkASSERT(nil == fActiveRenderCmdEncoder);
  63. fGpu->transferPixelsFrom(fRenderTarget, srcRect.fLeft, srcRect.fTop, srcRect.width(),
  64. srcRect.height(), bufferColorType, transferBuffer, offset);
  65. }
  66. GrMtlPipelineState* GrMtlGpuRTCommandBuffer::prepareDrawState(
  67. const GrPrimitiveProcessor& primProc,
  68. const GrPipeline& pipeline,
  69. const GrPipeline::FixedDynamicState* fixedDynamicState,
  70. GrPrimitiveType primType) {
  71. // TODO: resolve textures and regenerate mipmaps as needed
  72. const GrTextureProxy* const* primProcProxies = nullptr;
  73. if (fixedDynamicState) {
  74. primProcProxies = fixedDynamicState->fPrimitiveProcessorTextures;
  75. }
  76. SkASSERT(SkToBool(primProcProxies) == SkToBool(primProc.numTextureSamplers()));
  77. GrMtlPipelineState* pipelineState =
  78. fGpu->resourceProvider().findOrCreateCompatiblePipelineState(fRenderTarget, fOrigin,
  79. pipeline,
  80. primProc,
  81. primProcProxies,
  82. primType);
  83. if (!pipelineState) {
  84. return nullptr;
  85. }
  86. pipelineState->setData(fRenderTarget, fOrigin, primProc, pipeline, primProcProxies);
  87. fCurrentVertexStride = primProc.vertexStride();
  88. return pipelineState;
  89. }
  90. void GrMtlGpuRTCommandBuffer::onDraw(const GrPrimitiveProcessor& primProc,
  91. const GrPipeline& pipeline,
  92. const GrPipeline::FixedDynamicState* fixedDynamicState,
  93. const GrPipeline::DynamicStateArrays* dynamicStateArrays,
  94. const GrMesh meshes[],
  95. int meshCount,
  96. const SkRect& bounds) {
  97. if (!meshCount) {
  98. return;
  99. }
  100. auto prepareSampledImage = [&](GrTexture* texture, GrSamplerState::Filter filter) {
  101. GrMtlTexture* mtlTexture = static_cast<GrMtlTexture*>(texture);
  102. // We may need to resolve the texture first if it is also a render target
  103. GrMtlRenderTarget* texRT = static_cast<GrMtlRenderTarget*>(mtlTexture->asRenderTarget());
  104. if (texRT) {
  105. fGpu->resolveRenderTargetNoFlush(texRT);
  106. }
  107. // Check if we need to regenerate any mip maps
  108. if (GrSamplerState::Filter::kMipMap == filter &&
  109. (texture->width() != 1 || texture->height() != 1)) {
  110. SkASSERT(texture->texturePriv().mipMapped() == GrMipMapped::kYes);
  111. if (texture->texturePriv().mipMapsAreDirty()) {
  112. fGpu->regenerateMipMapLevels(texture);
  113. }
  114. }
  115. };
  116. if (dynamicStateArrays && dynamicStateArrays->fPrimitiveProcessorTextures) {
  117. for (int m = 0, i = 0; m < meshCount; ++m) {
  118. for (int s = 0; s < primProc.numTextureSamplers(); ++s, ++i) {
  119. auto texture = dynamicStateArrays->fPrimitiveProcessorTextures[i]->peekTexture();
  120. prepareSampledImage(texture, primProc.textureSampler(s).samplerState().filter());
  121. }
  122. }
  123. } else {
  124. for (int i = 0; i < primProc.numTextureSamplers(); ++i) {
  125. auto texture = fixedDynamicState->fPrimitiveProcessorTextures[i]->peekTexture();
  126. prepareSampledImage(texture, primProc.textureSampler(i).samplerState().filter());
  127. }
  128. }
  129. GrFragmentProcessor::Iter iter(pipeline);
  130. while (const GrFragmentProcessor* fp = iter.next()) {
  131. for (int i = 0; i < fp->numTextureSamplers(); ++i) {
  132. const GrFragmentProcessor::TextureSampler& sampler = fp->textureSampler(i);
  133. prepareSampledImage(sampler.peekTexture(), sampler.samplerState().filter());
  134. }
  135. }
  136. GrPrimitiveType primitiveType = meshes[0].primitiveType();
  137. GrMtlPipelineState* pipelineState = this->prepareDrawState(primProc, pipeline,
  138. fixedDynamicState, primitiveType);
  139. if (!pipelineState) {
  140. return;
  141. }
  142. SkASSERT(nil == fActiveRenderCmdEncoder);
  143. fActiveRenderCmdEncoder = fGpu->commandBuffer()->getRenderCommandEncoder(
  144. fRenderPassDesc, pipelineState, this);
  145. SkASSERT(fActiveRenderCmdEncoder);
  146. [fActiveRenderCmdEncoder setRenderPipelineState:pipelineState->mtlPipelineState()];
  147. pipelineState->setDrawState(fActiveRenderCmdEncoder, pipeline.outputSwizzle(),
  148. pipeline.getXferProcessor());
  149. bool dynamicScissor =
  150. pipeline.isScissorEnabled() && dynamicStateArrays && dynamicStateArrays->fScissorRects;
  151. if (!pipeline.isScissorEnabled()) {
  152. GrMtlPipelineState::SetDynamicScissorRectState(fActiveRenderCmdEncoder,
  153. fRenderTarget, fOrigin,
  154. SkIRect::MakeWH(fRenderTarget->width(),
  155. fRenderTarget->height()));
  156. } else if (!dynamicScissor) {
  157. SkASSERT(fixedDynamicState);
  158. GrMtlPipelineState::SetDynamicScissorRectState(fActiveRenderCmdEncoder,
  159. fRenderTarget, fOrigin,
  160. fixedDynamicState->fScissorRect);
  161. }
  162. for (int i = 0; i < meshCount; ++i) {
  163. const GrMesh& mesh = meshes[i];
  164. SkASSERT(nil != fActiveRenderCmdEncoder);
  165. if (mesh.primitiveType() != primitiveType) {
  166. SkDEBUGCODE(pipelineState = nullptr);
  167. primitiveType = mesh.primitiveType();
  168. pipelineState = this->prepareDrawState(primProc, pipeline, fixedDynamicState,
  169. primitiveType);
  170. if (!pipelineState) {
  171. return;
  172. }
  173. [fActiveRenderCmdEncoder setRenderPipelineState:pipelineState->mtlPipelineState()];
  174. pipelineState->setDrawState(fActiveRenderCmdEncoder, pipeline.outputSwizzle(),
  175. pipeline.getXferProcessor());
  176. }
  177. if (dynamicScissor) {
  178. GrMtlPipelineState::SetDynamicScissorRectState(fActiveRenderCmdEncoder, fRenderTarget,
  179. fOrigin,
  180. dynamicStateArrays->fScissorRects[i]);
  181. }
  182. mesh.sendToGpu(this);
  183. }
  184. fActiveRenderCmdEncoder = nil;
  185. fBounds.join(bounds);
  186. }
  187. void GrMtlGpuRTCommandBuffer::onClear(const GrFixedClip& clip, const SkPMColor4f& color) {
  188. // if we end up here from absClear, the clear bounds may be bigger than the RT proxy bounds -
  189. // but in that case, scissor should be enabled, so this check should still succeed
  190. SkASSERT(!clip.scissorEnabled() || clip.scissorRect().contains(fRTBounds));
  191. fRenderPassDesc.colorAttachments[0].clearColor = MTLClearColorMake(color.fR, color.fG, color.fB,
  192. color.fA);
  193. fRenderPassDesc.colorAttachments[0].loadAction = MTLLoadActionClear;
  194. this->precreateCmdEncoder();
  195. fRenderPassDesc.colorAttachments[0].loadAction = MTLLoadActionLoad;
  196. }
  197. void GrMtlGpuRTCommandBuffer::onClearStencilClip(const GrFixedClip& clip, bool insideStencilMask) {
  198. SkASSERT(!clip.hasWindowRectangles());
  199. GrStencilAttachment* sb = fRenderTarget->renderTargetPriv().getStencilAttachment();
  200. // this should only be called internally when we know we have a
  201. // stencil buffer.
  202. SkASSERT(sb);
  203. int stencilBitCount = sb->bits();
  204. // The contract with the callers does not guarantee that we preserve all bits in the stencil
  205. // during this clear. Thus we will clear the entire stencil to the desired value.
  206. if (insideStencilMask) {
  207. fRenderPassDesc.stencilAttachment.clearStencil = (1 << (stencilBitCount - 1));
  208. } else {
  209. fRenderPassDesc.stencilAttachment.clearStencil = 0;
  210. }
  211. fRenderPassDesc.stencilAttachment.loadAction = MTLLoadActionClear;
  212. this->precreateCmdEncoder();
  213. fRenderPassDesc.stencilAttachment.loadAction = MTLLoadActionLoad;
  214. }
  215. void GrMtlGpuRTCommandBuffer::initRenderState(id<MTLRenderCommandEncoder> encoder) {
  216. [encoder pushDebugGroup:@"initRenderState"];
  217. [encoder setFrontFacingWinding:MTLWindingCounterClockwise];
  218. // Strictly speaking we shouldn't have to set this, as the default viewport is the size of
  219. // the drawable used to generate the renderCommandEncoder -- but just in case.
  220. MTLViewport viewport = { 0.0, 0.0,
  221. (double) fRenderTarget->width(), (double) fRenderTarget->height(),
  222. 0.0, 1.0 };
  223. [encoder setViewport:viewport];
  224. this->resetBufferBindings();
  225. [encoder popDebugGroup];
  226. }
  227. void GrMtlGpuRTCommandBuffer::setupRenderPass(
  228. const GrGpuRTCommandBuffer::LoadAndStoreInfo& colorInfo,
  229. const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo& stencilInfo) {
  230. const static MTLLoadAction mtlLoadAction[] {
  231. MTLLoadActionLoad,
  232. MTLLoadActionClear,
  233. MTLLoadActionDontCare
  234. };
  235. GR_STATIC_ASSERT((int)GrLoadOp::kLoad == 0);
  236. GR_STATIC_ASSERT((int)GrLoadOp::kClear == 1);
  237. GR_STATIC_ASSERT((int)GrLoadOp::kDiscard == 2);
  238. SkASSERT(colorInfo.fLoadOp <= GrLoadOp::kDiscard);
  239. SkASSERT(stencilInfo.fLoadOp <= GrLoadOp::kDiscard);
  240. const static MTLStoreAction mtlStoreAction[] {
  241. MTLStoreActionStore,
  242. MTLStoreActionDontCare
  243. };
  244. GR_STATIC_ASSERT((int)GrStoreOp::kStore == 0);
  245. GR_STATIC_ASSERT((int)GrStoreOp::kDiscard == 1);
  246. SkASSERT(colorInfo.fStoreOp <= GrStoreOp::kDiscard);
  247. SkASSERT(stencilInfo.fStoreOp <= GrStoreOp::kDiscard);
  248. auto renderPassDesc = [MTLRenderPassDescriptor renderPassDescriptor];
  249. renderPassDesc.colorAttachments[0].texture =
  250. static_cast<GrMtlRenderTarget*>(fRenderTarget)->mtlColorTexture();
  251. renderPassDesc.colorAttachments[0].slice = 0;
  252. renderPassDesc.colorAttachments[0].level = 0;
  253. const SkPMColor4f& clearColor = colorInfo.fClearColor;
  254. renderPassDesc.colorAttachments[0].clearColor =
  255. MTLClearColorMake(clearColor[0], clearColor[1], clearColor[2], clearColor[3]);
  256. renderPassDesc.colorAttachments[0].loadAction =
  257. mtlLoadAction[static_cast<int>(colorInfo.fLoadOp)];
  258. renderPassDesc.colorAttachments[0].storeAction =
  259. mtlStoreAction[static_cast<int>(colorInfo.fStoreOp)];
  260. const GrMtlStencilAttachment* stencil = static_cast<GrMtlStencilAttachment*>(
  261. fRenderTarget->renderTargetPriv().getStencilAttachment());
  262. if (stencil) {
  263. renderPassDesc.stencilAttachment.texture = stencil->stencilView();
  264. }
  265. renderPassDesc.stencilAttachment.clearStencil = 0;
  266. renderPassDesc.stencilAttachment.loadAction =
  267. mtlLoadAction[static_cast<int>(stencilInfo.fLoadOp)];
  268. renderPassDesc.stencilAttachment.storeAction =
  269. mtlStoreAction[static_cast<int>(stencilInfo.fStoreOp)];
  270. fRenderPassDesc = renderPassDesc;
  271. // Manage initial clears
  272. if (colorInfo.fLoadOp == GrLoadOp::kClear || stencilInfo.fLoadOp == GrLoadOp::kClear) {
  273. fBounds = SkRect::MakeWH(fRenderTarget->width(),
  274. fRenderTarget->height());
  275. this->precreateCmdEncoder();
  276. if (colorInfo.fLoadOp == GrLoadOp::kClear) {
  277. fRenderPassDesc.colorAttachments[0].loadAction = MTLLoadActionLoad;
  278. }
  279. if (stencilInfo.fLoadOp == GrLoadOp::kClear) {
  280. fRenderPassDesc.stencilAttachment.loadAction = MTLLoadActionLoad;
  281. }
  282. } else {
  283. fBounds.setEmpty();
  284. }
  285. }
  286. static MTLPrimitiveType gr_to_mtl_primitive(GrPrimitiveType primitiveType) {
  287. const static MTLPrimitiveType mtlPrimitiveType[] {
  288. MTLPrimitiveTypeTriangle,
  289. MTLPrimitiveTypeTriangleStrip,
  290. MTLPrimitiveTypePoint,
  291. MTLPrimitiveTypeLine,
  292. MTLPrimitiveTypeLineStrip
  293. };
  294. GR_STATIC_ASSERT((int)GrPrimitiveType::kTriangles == 0);
  295. GR_STATIC_ASSERT((int)GrPrimitiveType::kTriangleStrip == 1);
  296. GR_STATIC_ASSERT((int)GrPrimitiveType::kPoints == 2);
  297. GR_STATIC_ASSERT((int)GrPrimitiveType::kLines == 3);
  298. GR_STATIC_ASSERT((int)GrPrimitiveType::kLineStrip == 4);
  299. SkASSERT(primitiveType <= GrPrimitiveType::kLineStrip);
  300. return mtlPrimitiveType[static_cast<int>(primitiveType)];
  301. }
  302. void GrMtlGpuRTCommandBuffer::bindGeometry(const GrBuffer* vertexBuffer,
  303. size_t vertexOffset,
  304. const GrBuffer* instanceBuffer) {
  305. size_t bufferIndex = GrMtlUniformHandler::kLastUniformBinding + 1;
  306. if (vertexBuffer) {
  307. SkASSERT(!vertexBuffer->isCpuBuffer());
  308. SkASSERT(!static_cast<const GrGpuBuffer*>(vertexBuffer)->isMapped());
  309. const GrMtlBuffer* grMtlBuffer = static_cast<const GrMtlBuffer*>(vertexBuffer);
  310. this->setVertexBuffer(fActiveRenderCmdEncoder, grMtlBuffer, vertexOffset, bufferIndex++);
  311. }
  312. if (instanceBuffer) {
  313. SkASSERT(!instanceBuffer->isCpuBuffer());
  314. SkASSERT(!static_cast<const GrGpuBuffer*>(instanceBuffer)->isMapped());
  315. const GrMtlBuffer* grMtlBuffer = static_cast<const GrMtlBuffer*>(instanceBuffer);
  316. this->setVertexBuffer(fActiveRenderCmdEncoder, grMtlBuffer, 0, bufferIndex++);
  317. }
  318. }
  319. void GrMtlGpuRTCommandBuffer::sendMeshToGpu(GrPrimitiveType primitiveType,
  320. const GrBuffer* vertexBuffer,
  321. int vertexCount,
  322. int baseVertex) {
  323. this->bindGeometry(vertexBuffer, 0, nullptr);
  324. SkASSERT(primitiveType != GrPrimitiveType::kLinesAdjacency); // Geometry shaders not supported.
  325. [fActiveRenderCmdEncoder drawPrimitives:gr_to_mtl_primitive(primitiveType)
  326. vertexStart:baseVertex
  327. vertexCount:vertexCount];
  328. }
  329. void GrMtlGpuRTCommandBuffer::sendIndexedMeshToGpu(GrPrimitiveType primitiveType,
  330. const GrBuffer* indexBuffer,
  331. int indexCount,
  332. int baseIndex,
  333. uint16_t /*minIndexValue*/,
  334. uint16_t /*maxIndexValue*/,
  335. const GrBuffer* vertexBuffer,
  336. int baseVertex,
  337. GrPrimitiveRestart restart) {
  338. this->bindGeometry(vertexBuffer, fCurrentVertexStride*baseVertex, nullptr);
  339. SkASSERT(primitiveType != GrPrimitiveType::kLinesAdjacency); // Geometry shaders not supported.
  340. id<MTLBuffer> mtlIndexBuffer = nil;
  341. if (indexBuffer) {
  342. SkASSERT(!indexBuffer->isCpuBuffer());
  343. SkASSERT(!static_cast<const GrGpuBuffer*>(indexBuffer)->isMapped());
  344. mtlIndexBuffer = static_cast<const GrMtlBuffer*>(indexBuffer)->mtlBuffer();
  345. SkASSERT(mtlIndexBuffer);
  346. }
  347. SkASSERT(restart == GrPrimitiveRestart::kNo);
  348. size_t indexOffset = static_cast<const GrMtlBuffer*>(indexBuffer)->offset() +
  349. sizeof(uint16_t) * baseIndex;
  350. [fActiveRenderCmdEncoder drawIndexedPrimitives:gr_to_mtl_primitive(primitiveType)
  351. indexCount:indexCount
  352. indexType:MTLIndexTypeUInt16
  353. indexBuffer:mtlIndexBuffer
  354. indexBufferOffset:indexOffset];
  355. fGpu->stats()->incNumDraws();
  356. }
  357. void GrMtlGpuRTCommandBuffer::sendInstancedMeshToGpu(GrPrimitiveType primitiveType,
  358. const GrBuffer* vertexBuffer,
  359. int vertexCount,
  360. int baseVertex,
  361. const GrBuffer* instanceBuffer,
  362. int instanceCount,
  363. int baseInstance) {
  364. this->bindGeometry(vertexBuffer, 0, instanceBuffer);
  365. SkASSERT(primitiveType != GrPrimitiveType::kLinesAdjacency); // Geometry shaders not supported.
  366. [fActiveRenderCmdEncoder drawPrimitives:gr_to_mtl_primitive(primitiveType)
  367. vertexStart:baseVertex
  368. vertexCount:vertexCount
  369. instanceCount:instanceCount
  370. baseInstance:baseInstance];
  371. }
  372. void GrMtlGpuRTCommandBuffer::sendIndexedInstancedMeshToGpu(GrPrimitiveType primitiveType,
  373. const GrBuffer* indexBuffer,
  374. int indexCount,
  375. int baseIndex,
  376. const GrBuffer* vertexBuffer,
  377. int baseVertex,
  378. const GrBuffer* instanceBuffer,
  379. int instanceCount,
  380. int baseInstance,
  381. GrPrimitiveRestart restart) {
  382. this->bindGeometry(vertexBuffer, 0, instanceBuffer);
  383. SkASSERT(primitiveType != GrPrimitiveType::kLinesAdjacency); // Geometry shaders not supported.
  384. id<MTLBuffer> mtlIndexBuffer = nil;
  385. if (indexBuffer) {
  386. SkASSERT(!indexBuffer->isCpuBuffer());
  387. SkASSERT(!static_cast<const GrGpuBuffer*>(indexBuffer)->isMapped());
  388. mtlIndexBuffer = static_cast<const GrMtlBuffer*>(indexBuffer)->mtlBuffer();
  389. SkASSERT(mtlIndexBuffer);
  390. }
  391. SkASSERT(restart == GrPrimitiveRestart::kNo);
  392. size_t indexOffset = static_cast<const GrMtlBuffer*>(indexBuffer)->offset() +
  393. sizeof(uint16_t) * baseIndex;
  394. [fActiveRenderCmdEncoder drawIndexedPrimitives:gr_to_mtl_primitive(primitiveType)
  395. indexCount:indexCount
  396. indexType:MTLIndexTypeUInt16
  397. indexBuffer:mtlIndexBuffer
  398. indexBufferOffset:indexOffset
  399. instanceCount:instanceCount
  400. baseVertex:baseVertex
  401. baseInstance:baseInstance];
  402. fGpu->stats()->incNumDraws();
  403. }
  404. void GrMtlGpuRTCommandBuffer::setVertexBuffer(id<MTLRenderCommandEncoder> encoder,
  405. const GrMtlBuffer* buffer,
  406. size_t vertexOffset,
  407. size_t index) {
  408. SkASSERT(index < 4);
  409. id<MTLBuffer> mtlVertexBuffer = buffer->mtlBuffer();
  410. SkASSERT(mtlVertexBuffer);
  411. // Apple recommends using setVertexBufferOffset: when changing the offset
  412. // for a currently bound vertex buffer, rather than setVertexBuffer:
  413. size_t offset = buffer->offset() + vertexOffset;
  414. if (fBufferBindings[index].fBuffer != mtlVertexBuffer) {
  415. [encoder setVertexBuffer: mtlVertexBuffer
  416. offset: offset
  417. atIndex: index];
  418. fBufferBindings[index].fBuffer = mtlVertexBuffer;
  419. fBufferBindings[index].fOffset = offset;
  420. } else if (fBufferBindings[index].fOffset != offset) {
  421. [encoder setVertexBufferOffset: offset
  422. atIndex: index];
  423. fBufferBindings[index].fOffset = offset;
  424. }
  425. }
  426. void GrMtlGpuRTCommandBuffer::resetBufferBindings() {
  427. for (size_t i = 0; i < kNumBindings; ++i) {
  428. fBufferBindings[i].fBuffer = nil;
  429. }
  430. }