GrOpFlushState.h 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174
  1. /*
  2. * Copyright 2015 Google Inc.
  3. *
  4. * Use of this source code is governed by a BSD-style license that can be
  5. * found in the LICENSE file.
  6. */
  7. #ifndef GrOpFlushState_DEFINED
  8. #define GrOpFlushState_DEFINED
  9. #include <utility>
  10. #include "src/core/SkArenaAlloc.h"
  11. #include "src/core/SkArenaAllocList.h"
  12. #include "src/gpu/GrAppliedClip.h"
  13. #include "src/gpu/GrBufferAllocPool.h"
  14. #include "src/gpu/GrDeferredUpload.h"
  15. #include "src/gpu/GrDeinstantiateProxyTracker.h"
  16. #include "src/gpu/GrRenderTargetProxy.h"
  17. #include "src/gpu/ops/GrMeshDrawOp.h"
  18. class GrGpu;
  19. class GrGpuCommandBuffer;
  20. class GrGpuRTCommandBuffer;
  21. class GrResourceProvider;
  22. /** Tracks the state across all the GrOps (really just the GrDrawOps) in a GrOpList flush. */
  23. class GrOpFlushState final : public GrDeferredUploadTarget, public GrMeshDrawOp::Target {
  24. public:
  25. // vertexSpace and indexSpace may either be null or an alloation of size
  26. // GrBufferAllocPool::kDefaultBufferSize. If the latter, then CPU memory is only allocated for
  27. // vertices/indices when a buffer larger than kDefaultBufferSize is required.
  28. GrOpFlushState(GrGpu*, GrResourceProvider*, GrTokenTracker*,
  29. sk_sp<GrBufferAllocPool::CpuBufferCache> = nullptr);
  30. ~GrOpFlushState() final { this->reset(); }
  31. /** This is called after each op has a chance to prepare its draws and before the draws are
  32. executed. */
  33. void preExecuteDraws();
  34. void doUpload(GrDeferredTextureUploadFn&);
  35. /** Called as ops are executed. Must be called in the same order as the ops were prepared. */
  36. void executeDrawsAndUploadsForMeshDrawOp(
  37. const GrOp* op, const SkRect& chainBounds, GrProcessorSet&&,
  38. GrPipeline::InputFlags = GrPipeline::InputFlags::kNone,
  39. const GrUserStencilSettings* = &GrUserStencilSettings::kUnused);
  40. GrGpuCommandBuffer* commandBuffer() { return fCommandBuffer; }
  41. // Helper function used by Ops that are only called via RenderTargetOpLists
  42. GrGpuRTCommandBuffer* rtCommandBuffer();
  43. void setCommandBuffer(GrGpuCommandBuffer* buffer) { fCommandBuffer = buffer; }
  44. GrGpu* gpu() { return fGpu; }
  45. void reset();
  46. /** Additional data required on a per-op basis when executing GrOps. */
  47. struct OpArgs {
  48. GrSurfaceOrigin origin() const { return fProxy->origin(); }
  49. GrRenderTarget* renderTarget() const { return fProxy->peekRenderTarget(); }
  50. GrOp* fOp;
  51. // TODO: do we still need the dst proxy here?
  52. GrRenderTargetProxy* fProxy;
  53. GrAppliedClip* fAppliedClip;
  54. GrSwizzle fOutputSwizzle;
  55. GrXferProcessor::DstProxy fDstProxy;
  56. };
  57. void setOpArgs(OpArgs* opArgs) { fOpArgs = opArgs; }
  58. const OpArgs& drawOpArgs() const {
  59. SkASSERT(fOpArgs);
  60. SkASSERT(fOpArgs->fOp);
  61. return *fOpArgs;
  62. }
  63. /** Overrides of GrDeferredUploadTarget. */
  64. const GrTokenTracker* tokenTracker() final { return fTokenTracker; }
  65. GrDeferredUploadToken addInlineUpload(GrDeferredTextureUploadFn&&) final;
  66. GrDeferredUploadToken addASAPUpload(GrDeferredTextureUploadFn&&) final;
  67. /** Overrides of GrMeshDrawOp::Target. */
  68. void recordDraw(
  69. sk_sp<const GrGeometryProcessor>, const GrMesh[], int meshCnt,
  70. const GrPipeline::FixedDynamicState*, const GrPipeline::DynamicStateArrays*) final;
  71. void* makeVertexSpace(size_t vertexSize, int vertexCount, sk_sp<const GrBuffer>*,
  72. int* startVertex) final;
  73. uint16_t* makeIndexSpace(int indexCount, sk_sp<const GrBuffer>*, int* startIndex) final;
  74. void* makeVertexSpaceAtLeast(size_t vertexSize, int minVertexCount, int fallbackVertexCount,
  75. sk_sp<const GrBuffer>*, int* startVertex,
  76. int* actualVertexCount) final;
  77. uint16_t* makeIndexSpaceAtLeast(int minIndexCount, int fallbackIndexCount,
  78. sk_sp<const GrBuffer>*, int* startIndex,
  79. int* actualIndexCount) final;
  80. void putBackIndices(int indexCount) final;
  81. void putBackVertices(int vertices, size_t vertexStride) final;
  82. GrRenderTargetProxy* proxy() const final { return fOpArgs->fProxy; }
  83. const GrAppliedClip* appliedClip() final { return fOpArgs->fAppliedClip; }
  84. GrAppliedClip detachAppliedClip() final;
  85. const GrXferProcessor::DstProxy& dstProxy() const final { return fOpArgs->fDstProxy; }
  86. GrDeferredUploadTarget* deferredUploadTarget() final { return this; }
  87. const GrCaps& caps() const final;
  88. GrResourceProvider* resourceProvider() const final { return fResourceProvider; }
  89. GrStrikeCache* glyphCache() const final;
  90. // At this point we know we're flushing so full access to the GrAtlasManager is required (and
  91. // permissible).
  92. GrAtlasManager* atlasManager() const final;
  93. GrDeinstantiateProxyTracker* deinstantiateProxyTracker() { return &fDeinstantiateProxyTracker; }
  94. private:
  95. /** GrMeshDrawOp::Target override. */
  96. SkArenaAlloc* allocator() override { return &fArena; }
  97. struct InlineUpload {
  98. InlineUpload(GrDeferredTextureUploadFn&& upload, GrDeferredUploadToken token)
  99. : fUpload(std::move(upload)), fUploadBeforeToken(token) {}
  100. GrDeferredTextureUploadFn fUpload;
  101. GrDeferredUploadToken fUploadBeforeToken;
  102. };
  103. // A set of contiguous draws that share a draw token, geometry processor, and pipeline. The
  104. // meshes for the draw are stored in the fMeshes array. The reason for coalescing meshes
  105. // that share a geometry processor into a Draw is that it allows the Gpu object to setup
  106. // the shared state once and then issue draws for each mesh.
  107. struct Draw {
  108. ~Draw();
  109. sk_sp<const GrGeometryProcessor> fGeometryProcessor;
  110. const GrPipeline::FixedDynamicState* fFixedDynamicState;
  111. const GrPipeline::DynamicStateArrays* fDynamicStateArrays;
  112. const GrMesh* fMeshes = nullptr;
  113. const GrOp* fOp = nullptr;
  114. int fMeshCnt = 0;
  115. };
  116. // Storage for ops' pipelines, draws, and inline uploads.
  117. SkArenaAlloc fArena{sizeof(GrPipeline) * 100};
  118. // Store vertex and index data on behalf of ops that are flushed.
  119. GrVertexBufferAllocPool fVertexPool;
  120. GrIndexBufferAllocPool fIndexPool;
  121. // Data stored on behalf of the ops being flushed.
  122. SkArenaAllocList<GrDeferredTextureUploadFn> fASAPUploads;
  123. SkArenaAllocList<InlineUpload> fInlineUploads;
  124. SkArenaAllocList<Draw> fDraws;
  125. // All draws we store have an implicit draw token. This is the draw token for the first draw
  126. // in fDraws.
  127. GrDeferredUploadToken fBaseDrawToken = GrDeferredUploadToken::AlreadyFlushedToken();
  128. // Info about the op that is currently preparing or executing using the flush state or null if
  129. // an op is not currently preparing of executing.
  130. OpArgs* fOpArgs = nullptr;
  131. GrGpu* fGpu;
  132. GrResourceProvider* fResourceProvider;
  133. GrTokenTracker* fTokenTracker;
  134. GrGpuCommandBuffer* fCommandBuffer = nullptr;
  135. // Variables that are used to track where we are in lists as ops are executed
  136. SkArenaAllocList<Draw>::Iter fCurrDraw;
  137. SkArenaAllocList<InlineUpload>::Iter fCurrUpload;
  138. // Used to track the proxies that need to be deinstantiated after we finish a flush
  139. GrDeinstantiateProxyTracker fDeinstantiateProxyTracker;
  140. };
  141. #endif