GrMtlResourceProvider.mm 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
  1. /*
  2. * Copyright 2018 Google Inc.
  3. *
  4. * Use of this source code is governed by a BSD-style license that can be
  5. * found in the LICENSE file.
  6. */
  7. #include "src/gpu/mtl/GrMtlResourceProvider.h"
  8. #include "src/gpu/mtl/GrMtlCommandBuffer.h"
  9. #include "src/gpu/mtl/GrMtlGpu.h"
  10. #include "src/gpu/mtl/GrMtlPipelineState.h"
  11. #include "src/gpu/mtl/GrMtlUtil.h"
  12. #include "src/sksl/SkSLCompiler.h"
  13. #if !__has_feature(objc_arc)
  14. #error This file must be compiled with Arc. Use -fobjc-arc flag
  15. #endif
  16. GrMtlResourceProvider::GrMtlResourceProvider(GrMtlGpu* gpu)
  17. : fGpu(gpu) {
  18. fPipelineStateCache.reset(new PipelineStateCache(gpu));
  19. fBufferSuballocator.reset(new BufferSuballocator(gpu->device(), kBufferSuballocatorStartSize));
  20. }
  21. GrMtlPipelineState* GrMtlResourceProvider::findOrCreateCompatiblePipelineState(
  22. GrRenderTarget* renderTarget, GrSurfaceOrigin origin,
  23. const GrPipeline& pipeline, const GrPrimitiveProcessor& proc,
  24. const GrTextureProxy* const primProcProxies[], GrPrimitiveType primType) {
  25. return fPipelineStateCache->refPipelineState(renderTarget, origin, proc, primProcProxies,
  26. pipeline, primType);
  27. }
  28. ////////////////////////////////////////////////////////////////////////////////////////////////
  29. GrMtlDepthStencil* GrMtlResourceProvider::findOrCreateCompatibleDepthStencilState(
  30. const GrStencilSettings& stencil, GrSurfaceOrigin origin) {
  31. GrMtlDepthStencil* depthStencilState;
  32. GrMtlDepthStencil::Key key = GrMtlDepthStencil::GenerateKey(stencil, origin);
  33. depthStencilState = fDepthStencilStates.find(key);
  34. if (!depthStencilState) {
  35. depthStencilState = GrMtlDepthStencil::Create(fGpu, stencil, origin);
  36. fDepthStencilStates.add(depthStencilState);
  37. }
  38. SkASSERT(depthStencilState);
  39. return depthStencilState;
  40. }
  41. GrMtlSampler* GrMtlResourceProvider::findOrCreateCompatibleSampler(const GrSamplerState& params,
  42. uint32_t maxMipLevel) {
  43. GrMtlSampler* sampler;
  44. sampler = fSamplers.find(GrMtlSampler::GenerateKey(params, maxMipLevel));
  45. if (!sampler) {
  46. sampler = GrMtlSampler::Create(fGpu, params, maxMipLevel);
  47. fSamplers.add(sampler);
  48. }
  49. SkASSERT(sampler);
  50. return sampler;
  51. }
  52. void GrMtlResourceProvider::destroyResources() {
  53. // Iterate through all stored GrMtlSamplers and unref them before resetting the hash.
  54. SkTDynamicHash<GrMtlSampler, GrMtlSampler::Key>::Iter samplerIter(&fSamplers);
  55. for (; !samplerIter.done(); ++samplerIter) {
  56. (*samplerIter).unref();
  57. }
  58. fSamplers.reset();
  59. // Iterate through all stored GrMtlDepthStencils and unref them before resetting the hash.
  60. SkTDynamicHash<GrMtlDepthStencil, GrMtlDepthStencil::Key>::Iter dsIter(&fDepthStencilStates);
  61. for (; !dsIter.done(); ++dsIter) {
  62. (*dsIter).unref();
  63. }
  64. fDepthStencilStates.reset();
  65. fPipelineStateCache->release();
  66. }
  67. ////////////////////////////////////////////////////////////////////////////////////////////////
  68. #ifdef GR_PIPELINE_STATE_CACHE_STATS
  69. // Display pipeline state cache usage
  70. static const bool c_DisplayMtlPipelineCache{false};
  71. #endif
  72. struct GrMtlResourceProvider::PipelineStateCache::Entry {
  73. Entry(GrMtlGpu* gpu, GrMtlPipelineState* pipelineState)
  74. : fGpu(gpu)
  75. , fPipelineState(pipelineState) {}
  76. GrMtlGpu* fGpu;
  77. std::unique_ptr<GrMtlPipelineState> fPipelineState;
  78. };
  79. GrMtlResourceProvider::PipelineStateCache::PipelineStateCache(GrMtlGpu* gpu)
  80. : fMap(kMaxEntries)
  81. , fGpu(gpu)
  82. #ifdef GR_PIPELINE_STATE_CACHE_STATS
  83. , fTotalRequests(0)
  84. , fCacheMisses(0)
  85. #endif
  86. {}
  87. GrMtlResourceProvider::PipelineStateCache::~PipelineStateCache() {
  88. SkASSERT(0 == fMap.count());
  89. // dump stats
  90. #ifdef GR_PIPELINE_STATE_CACHE_STATS
  91. if (c_DisplayMtlPipelineCache) {
  92. SkDebugf("--- Pipeline State Cache ---\n");
  93. SkDebugf("Total requests: %d\n", fTotalRequests);
  94. SkDebugf("Cache misses: %d\n", fCacheMisses);
  95. SkDebugf("Cache miss %%: %f\n", (fTotalRequests > 0) ?
  96. 100.f * fCacheMisses / fTotalRequests :
  97. 0.f);
  98. SkDebugf("---------------------\n");
  99. }
  100. #endif
  101. }
  102. void GrMtlResourceProvider::PipelineStateCache::release() {
  103. fMap.reset();
  104. }
  105. GrMtlPipelineState* GrMtlResourceProvider::PipelineStateCache::refPipelineState(
  106. GrRenderTarget* renderTarget,
  107. GrSurfaceOrigin origin,
  108. const GrPrimitiveProcessor& primProc,
  109. const GrTextureProxy* const primProcProxies[],
  110. const GrPipeline& pipeline,
  111. GrPrimitiveType primType) {
  112. #ifdef GR_PIPELINE_STATE_CACHE_STATS
  113. ++fTotalRequests;
  114. #endif
  115. // Get GrMtlProgramDesc
  116. GrMtlPipelineStateBuilder::Desc desc;
  117. if (!GrMtlPipelineStateBuilder::Desc::Build(&desc, renderTarget, primProc, pipeline, primType,
  118. fGpu)) {
  119. GrCapsDebugf(fGpu->caps(), "Failed to build mtl program descriptor!\n");
  120. return nullptr;
  121. }
  122. // If we knew the shader won't depend on origin, we could skip this (and use the same program
  123. // for both origins). Instrumenting all fragment processors would be difficult and error prone.
  124. desc.setSurfaceOriginKey(GrGLSLFragmentShaderBuilder::KeyForSurfaceOrigin(origin));
  125. std::unique_ptr<Entry>* entry = fMap.find(desc);
  126. if (!entry) {
  127. #ifdef GR_PIPELINE_STATE_CACHE_STATS
  128. ++fCacheMisses;
  129. #endif
  130. GrMtlPipelineState* pipelineState(GrMtlPipelineStateBuilder::CreatePipelineState(
  131. fGpu, renderTarget, origin, primProc, primProcProxies, pipeline, &desc));
  132. if (nullptr == pipelineState) {
  133. return nullptr;
  134. }
  135. entry = fMap.insert(desc, std::unique_ptr<Entry>(new Entry(fGpu, pipelineState)));
  136. return (*entry)->fPipelineState.get();
  137. }
  138. return (*entry)->fPipelineState.get();
  139. }
  140. ////////////////////////////////////////////////////////////////////////////////////////////////
  141. static id<MTLBuffer> alloc_dynamic_buffer(id<MTLDevice> device, size_t size) {
  142. return [device newBufferWithLength: size
  143. #ifdef SK_BUILD_FOR_MAC
  144. options: MTLResourceStorageModeManaged];
  145. #else
  146. options: MTLResourceStorageModeShared];
  147. #endif
  148. }
  149. // The idea here is that we create a ring buffer which is used for all dynamic allocations
  150. // below a certain size. When a dynamic GrMtlBuffer is mapped, it grabs a portion of this
  151. // buffer and uses it. On a subsequent map it will grab a different portion of the buffer.
  152. // This prevents the buffer from overwriting itself before it's submitted to the command
  153. // stream.
  154. GrMtlResourceProvider::BufferSuballocator::BufferSuballocator(id<MTLDevice> device, size_t size)
  155. : fBuffer(alloc_dynamic_buffer(device, size))
  156. , fTotalSize(size)
  157. , fHead(0)
  158. , fTail(0) {
  159. // We increment fHead and fTail without bound and let overflow handle any wrapping.
  160. // Because of this, size needs to be a power of two.
  161. SkASSERT(SkIsPow2(size));
  162. }
  163. id<MTLBuffer> GrMtlResourceProvider::BufferSuballocator::getAllocation(size_t size,
  164. size_t* offset) {
  165. // capture current state locally (because fTail could be overwritten by the completion handler)
  166. size_t head, tail;
  167. SkAutoSpinlock lock(fMutex);
  168. head = fHead;
  169. tail = fTail;
  170. // The head and tail indices increment without bound, wrapping with overflow,
  171. // so we need to mod them down to the actual bounds of the allocation to determine
  172. // which blocks are available.
  173. size_t modHead = head & (fTotalSize - 1);
  174. size_t modTail = tail & (fTotalSize - 1);
  175. bool full = (head != tail && modHead == modTail);
  176. // We don't want large allocations to eat up this buffer, so we allocate them separately.
  177. if (full || size > fTotalSize/2) {
  178. return nil;
  179. }
  180. // case 1: free space lies at the beginning and/or the end of the buffer
  181. if (modHead >= modTail) {
  182. // check for room at the end
  183. if (fTotalSize - modHead < size) {
  184. // no room at the end, check the beginning
  185. if (modTail < size) {
  186. // no room at the beginning
  187. return nil;
  188. }
  189. // we are going to allocate from the beginning, adjust head to '0' position
  190. head += fTotalSize - modHead;
  191. modHead = 0;
  192. }
  193. // case 2: free space lies in the middle of the buffer, check for room there
  194. } else if (modTail - modHead < size) {
  195. // no room in the middle
  196. return nil;
  197. }
  198. *offset = modHead;
  199. // We're not sure what the usage of the next allocation will be --
  200. // to be safe we'll use 16 byte alignment.
  201. fHead = GrSizeAlignUp(head + size, 16);
  202. return fBuffer;
  203. }
  204. void GrMtlResourceProvider::BufferSuballocator::addCompletionHandler(
  205. GrMtlCommandBuffer* cmdBuffer) {
  206. this->ref();
  207. SkAutoSpinlock lock(fMutex);
  208. size_t newTail = fHead;
  209. cmdBuffer->addCompletedHandler(^(id <MTLCommandBuffer>commandBuffer) {
  210. // Make sure SkAutoSpinlock goes out of scope before
  211. // the BufferSuballocator is potentially deleted.
  212. {
  213. SkAutoSpinlock lock(fMutex);
  214. fTail = newTail;
  215. }
  216. this->unref();
  217. });
  218. }
  219. id<MTLBuffer> GrMtlResourceProvider::getDynamicBuffer(size_t size, size_t* offset) {
  220. id<MTLBuffer> buffer = fBufferSuballocator->getAllocation(size, offset);
  221. if (buffer) {
  222. return buffer;
  223. }
  224. // Try to grow allocation (old allocation will age out).
  225. // We grow up to a maximum size, and only grow if the requested allocation will
  226. // fit into half of the new buffer (to prevent very large transient buffers forcing
  227. // growth when they'll never fit anyway).
  228. if (fBufferSuballocator->size() < kBufferSuballocatorMaxSize &&
  229. size <= fBufferSuballocator->size()) {
  230. fBufferSuballocator.reset(new BufferSuballocator(fGpu->device(),
  231. 2*fBufferSuballocator->size()));
  232. id<MTLBuffer> buffer = fBufferSuballocator->getAllocation(size, offset);
  233. if (buffer) {
  234. return buffer;
  235. }
  236. }
  237. *offset = 0;
  238. return alloc_dynamic_buffer(fGpu->device(), size);
  239. }
  240. void GrMtlResourceProvider::addBufferCompletionHandler(GrMtlCommandBuffer* cmdBuffer) {
  241. fBufferSuballocator->addCompletionHandler(cmdBuffer);
  242. }