GrBufferAllocPool.cpp 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534
  1. /*
  2. * Copyright 2010 Google Inc.
  3. *
  4. * Use of this source code is governed by a BSD-style license that can be
  5. * found in the LICENSE file.
  6. */
  7. #include "include/gpu/GrContext.h"
  8. #include "include/gpu/GrTypes.h"
  9. #include "include/private/SkMacros.h"
  10. #include "src/core/SkSafeMath.h"
  11. #include "src/core/SkTraceEvent.h"
  12. #include "src/gpu/GrBufferAllocPool.h"
  13. #include "src/gpu/GrCaps.h"
  14. #include "src/gpu/GrContextPriv.h"
  15. #include "src/gpu/GrCpuBuffer.h"
  16. #include "src/gpu/GrGpu.h"
  17. #include "src/gpu/GrGpuBuffer.h"
  18. #include "src/gpu/GrResourceProvider.h"
  19. sk_sp<GrBufferAllocPool::CpuBufferCache> GrBufferAllocPool::CpuBufferCache::Make(
  20. int maxBuffersToCache) {
  21. return sk_sp<CpuBufferCache>(new CpuBufferCache(maxBuffersToCache));
  22. }
  23. GrBufferAllocPool::CpuBufferCache::CpuBufferCache(int maxBuffersToCache)
  24. : fMaxBuffersToCache(maxBuffersToCache) {
  25. if (fMaxBuffersToCache) {
  26. fBuffers.reset(new Buffer[fMaxBuffersToCache]);
  27. }
  28. }
  29. sk_sp<GrCpuBuffer> GrBufferAllocPool::CpuBufferCache::makeBuffer(size_t size,
  30. bool mustBeInitialized) {
  31. SkASSERT(size > 0);
  32. Buffer* result = nullptr;
  33. if (size == kDefaultBufferSize) {
  34. int i = 0;
  35. for (; i < fMaxBuffersToCache && fBuffers[i].fBuffer; ++i) {
  36. SkASSERT(fBuffers[i].fBuffer->size() == kDefaultBufferSize);
  37. if (fBuffers[i].fBuffer->unique()) {
  38. result = &fBuffers[i];
  39. }
  40. }
  41. if (!result && i < fMaxBuffersToCache) {
  42. fBuffers[i].fBuffer = GrCpuBuffer::Make(size);
  43. result = &fBuffers[i];
  44. }
  45. }
  46. Buffer tempResult;
  47. if (!result) {
  48. tempResult.fBuffer = GrCpuBuffer::Make(size);
  49. result = &tempResult;
  50. }
  51. if (mustBeInitialized && !result->fCleared) {
  52. result->fCleared = true;
  53. memset(result->fBuffer->data(), 0, result->fBuffer->size());
  54. }
  55. return result->fBuffer;
  56. }
  57. void GrBufferAllocPool::CpuBufferCache::releaseAll() {
  58. for (int i = 0; i < fMaxBuffersToCache && fBuffers[i].fBuffer; ++i) {
  59. fBuffers[i].fBuffer.reset();
  60. fBuffers[i].fCleared = false;
  61. }
  62. }
  63. //////////////////////////////////////////////////////////////////////////////
  64. #ifdef SK_DEBUG
  65. #define VALIDATE validate
  66. #else
  67. static void VALIDATE(bool = false) {}
  68. #endif
  69. #define UNMAP_BUFFER(block) \
  70. do { \
  71. TRACE_EVENT_INSTANT1("skia.gpu", "GrBufferAllocPool Unmapping Buffer", \
  72. TRACE_EVENT_SCOPE_THREAD, "percent_unwritten", \
  73. (float)((block).fBytesFree) / (block).fBuffer->size()); \
  74. SkASSERT(!block.fBuffer->isCpuBuffer()); \
  75. static_cast<GrGpuBuffer*>(block.fBuffer.get())->unmap(); \
  76. } while (false)
  77. constexpr size_t GrBufferAllocPool::kDefaultBufferSize;
  78. GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, GrGpuBufferType bufferType,
  79. sk_sp<CpuBufferCache> cpuBufferCache)
  80. : fBlocks(8)
  81. , fCpuBufferCache(std::move(cpuBufferCache))
  82. , fGpu(gpu)
  83. , fBufferType(bufferType) {}
  84. void GrBufferAllocPool::deleteBlocks() {
  85. if (fBlocks.count()) {
  86. GrBuffer* buffer = fBlocks.back().fBuffer.get();
  87. if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
  88. UNMAP_BUFFER(fBlocks.back());
  89. }
  90. }
  91. while (!fBlocks.empty()) {
  92. this->destroyBlock();
  93. }
  94. SkASSERT(!fBufferPtr);
  95. }
  96. GrBufferAllocPool::~GrBufferAllocPool() {
  97. VALIDATE();
  98. this->deleteBlocks();
  99. }
  100. void GrBufferAllocPool::reset() {
  101. VALIDATE();
  102. fBytesInUse = 0;
  103. this->deleteBlocks();
  104. this->resetCpuData(0);
  105. VALIDATE();
  106. }
  107. void GrBufferAllocPool::unmap() {
  108. VALIDATE();
  109. if (fBufferPtr) {
  110. BufferBlock& block = fBlocks.back();
  111. GrBuffer* buffer = block.fBuffer.get();
  112. if (!buffer->isCpuBuffer()) {
  113. if (static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
  114. UNMAP_BUFFER(block);
  115. } else {
  116. size_t flushSize = block.fBuffer->size() - block.fBytesFree;
  117. this->flushCpuData(fBlocks.back(), flushSize);
  118. }
  119. }
  120. fBufferPtr = nullptr;
  121. }
  122. VALIDATE();
  123. }
  124. #ifdef SK_DEBUG
  125. void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
  126. bool wasDestroyed = false;
  127. if (fBufferPtr) {
  128. SkASSERT(!fBlocks.empty());
  129. const GrBuffer* buffer = fBlocks.back().fBuffer.get();
  130. if (!buffer->isCpuBuffer() && !static_cast<const GrGpuBuffer*>(buffer)->isMapped()) {
  131. SkASSERT(fCpuStagingBuffer && fCpuStagingBuffer->data() == fBufferPtr);
  132. }
  133. } else if (!fBlocks.empty()) {
  134. const GrBuffer* buffer = fBlocks.back().fBuffer.get();
  135. SkASSERT(buffer->isCpuBuffer() || !static_cast<const GrGpuBuffer*>(buffer)->isMapped());
  136. }
  137. size_t bytesInUse = 0;
  138. for (int i = 0; i < fBlocks.count() - 1; ++i) {
  139. const GrBuffer* buffer = fBlocks[i].fBuffer.get();
  140. SkASSERT(buffer->isCpuBuffer() || !static_cast<const GrGpuBuffer*>(buffer)->isMapped());
  141. }
  142. for (int i = 0; !wasDestroyed && i < fBlocks.count(); ++i) {
  143. GrBuffer* buffer = fBlocks[i].fBuffer.get();
  144. if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->wasDestroyed()) {
  145. wasDestroyed = true;
  146. } else {
  147. size_t bytes = fBlocks[i].fBuffer->size() - fBlocks[i].fBytesFree;
  148. bytesInUse += bytes;
  149. SkASSERT(bytes || unusedBlockAllowed);
  150. }
  151. }
  152. if (!wasDestroyed) {
  153. SkASSERT(bytesInUse == fBytesInUse);
  154. if (unusedBlockAllowed) {
  155. SkASSERT((fBytesInUse && !fBlocks.empty()) ||
  156. (!fBytesInUse && (fBlocks.count() < 2)));
  157. } else {
  158. SkASSERT((0 == fBytesInUse) == fBlocks.empty());
  159. }
  160. }
  161. }
  162. #endif
  163. void* GrBufferAllocPool::makeSpace(size_t size,
  164. size_t alignment,
  165. sk_sp<const GrBuffer>* buffer,
  166. size_t* offset) {
  167. VALIDATE();
  168. SkASSERT(buffer);
  169. SkASSERT(offset);
  170. if (fBufferPtr) {
  171. BufferBlock& back = fBlocks.back();
  172. size_t usedBytes = back.fBuffer->size() - back.fBytesFree;
  173. size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
  174. SkSafeMath safeMath;
  175. size_t alignedSize = safeMath.add(pad, size);
  176. if (!safeMath.ok()) {
  177. return nullptr;
  178. }
  179. if (alignedSize <= back.fBytesFree) {
  180. memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
  181. usedBytes += pad;
  182. *offset = usedBytes;
  183. *buffer = back.fBuffer;
  184. back.fBytesFree -= alignedSize;
  185. fBytesInUse += alignedSize;
  186. VALIDATE();
  187. return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
  188. }
  189. }
  190. // We could honor the space request using by a partial update of the current
  191. // VB (if there is room). But we don't currently use draw calls to GL that
  192. // allow the driver to know that previously issued draws won't read from
  193. // the part of the buffer we update. Also, the GL buffer implementation
  194. // may be cheating on the actual buffer size by shrinking the buffer on
  195. // updateData() if the amount of data passed is less than the full buffer
  196. // size.
  197. if (!this->createBlock(size)) {
  198. return nullptr;
  199. }
  200. SkASSERT(fBufferPtr);
  201. *offset = 0;
  202. BufferBlock& back = fBlocks.back();
  203. *buffer = back.fBuffer;
  204. back.fBytesFree -= size;
  205. fBytesInUse += size;
  206. VALIDATE();
  207. return fBufferPtr;
  208. }
  209. void* GrBufferAllocPool::makeSpaceAtLeast(size_t minSize,
  210. size_t fallbackSize,
  211. size_t alignment,
  212. sk_sp<const GrBuffer>* buffer,
  213. size_t* offset,
  214. size_t* actualSize) {
  215. VALIDATE();
  216. SkASSERT(buffer);
  217. SkASSERT(offset);
  218. SkASSERT(actualSize);
  219. if (fBufferPtr) {
  220. BufferBlock& back = fBlocks.back();
  221. size_t usedBytes = back.fBuffer->size() - back.fBytesFree;
  222. size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
  223. if ((minSize + pad) <= back.fBytesFree) {
  224. // Consume padding first, to make subsequent alignment math easier
  225. memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
  226. usedBytes += pad;
  227. back.fBytesFree -= pad;
  228. fBytesInUse += pad;
  229. // Give caller all remaining space in this block up to fallbackSize (but aligned
  230. // correctly)
  231. size_t size;
  232. if (back.fBytesFree >= fallbackSize) {
  233. SkASSERT(GrSizeAlignDown(fallbackSize, alignment) == fallbackSize);
  234. size = fallbackSize;
  235. } else {
  236. size = GrSizeAlignDown(back.fBytesFree, alignment);
  237. }
  238. *offset = usedBytes;
  239. *buffer = back.fBuffer;
  240. *actualSize = size;
  241. back.fBytesFree -= size;
  242. fBytesInUse += size;
  243. VALIDATE();
  244. return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
  245. }
  246. }
  247. // We could honor the space request using by a partial update of the current
  248. // VB (if there is room). But we don't currently use draw calls to GL that
  249. // allow the driver to know that previously issued draws won't read from
  250. // the part of the buffer we update. Also, the GL buffer implementation
  251. // may be cheating on the actual buffer size by shrinking the buffer on
  252. // updateData() if the amount of data passed is less than the full buffer
  253. // size.
  254. if (!this->createBlock(fallbackSize)) {
  255. return nullptr;
  256. }
  257. SkASSERT(fBufferPtr);
  258. *offset = 0;
  259. BufferBlock& back = fBlocks.back();
  260. *buffer = back.fBuffer;
  261. *actualSize = fallbackSize;
  262. back.fBytesFree -= fallbackSize;
  263. fBytesInUse += fallbackSize;
  264. VALIDATE();
  265. return fBufferPtr;
  266. }
  267. void GrBufferAllocPool::putBack(size_t bytes) {
  268. VALIDATE();
  269. while (bytes) {
  270. // caller shouldn't try to put back more than they've taken
  271. SkASSERT(!fBlocks.empty());
  272. BufferBlock& block = fBlocks.back();
  273. size_t bytesUsed = block.fBuffer->size() - block.fBytesFree;
  274. if (bytes >= bytesUsed) {
  275. bytes -= bytesUsed;
  276. fBytesInUse -= bytesUsed;
  277. // if we locked a vb to satisfy the make space and we're releasing
  278. // beyond it, then unmap it.
  279. GrBuffer* buffer = block.fBuffer.get();
  280. if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
  281. UNMAP_BUFFER(block);
  282. }
  283. this->destroyBlock();
  284. } else {
  285. block.fBytesFree += bytes;
  286. fBytesInUse -= bytes;
  287. bytes = 0;
  288. break;
  289. }
  290. }
  291. VALIDATE();
  292. }
  293. bool GrBufferAllocPool::createBlock(size_t requestSize) {
  294. size_t size = SkTMax(requestSize, kDefaultBufferSize);
  295. VALIDATE();
  296. BufferBlock& block = fBlocks.push_back();
  297. block.fBuffer = this->getBuffer(size);
  298. if (!block.fBuffer) {
  299. fBlocks.pop_back();
  300. return false;
  301. }
  302. block.fBytesFree = block.fBuffer->size();
  303. if (fBufferPtr) {
  304. SkASSERT(fBlocks.count() > 1);
  305. BufferBlock& prev = fBlocks.fromBack(1);
  306. GrBuffer* buffer = prev.fBuffer.get();
  307. if (!buffer->isCpuBuffer()) {
  308. if (static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
  309. UNMAP_BUFFER(prev);
  310. } else {
  311. this->flushCpuData(prev, prev.fBuffer->size() - prev.fBytesFree);
  312. }
  313. }
  314. fBufferPtr = nullptr;
  315. }
  316. SkASSERT(!fBufferPtr);
  317. // If the buffer is CPU-backed we "map" it because it is free to do so and saves a copy.
  318. // Otherwise when buffer mapping is supported we map if the buffer size is greater than the
  319. // threshold.
  320. if (block.fBuffer->isCpuBuffer()) {
  321. fBufferPtr = static_cast<GrCpuBuffer*>(block.fBuffer.get())->data();
  322. SkASSERT(fBufferPtr);
  323. } else {
  324. if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
  325. size > fGpu->caps()->bufferMapThreshold()) {
  326. fBufferPtr = static_cast<GrGpuBuffer*>(block.fBuffer.get())->map();
  327. }
  328. }
  329. if (!fBufferPtr) {
  330. this->resetCpuData(block.fBytesFree);
  331. fBufferPtr = fCpuStagingBuffer->data();
  332. }
  333. VALIDATE(true);
  334. return true;
  335. }
  336. void GrBufferAllocPool::destroyBlock() {
  337. SkASSERT(!fBlocks.empty());
  338. SkASSERT(fBlocks.back().fBuffer->isCpuBuffer() ||
  339. !static_cast<GrGpuBuffer*>(fBlocks.back().fBuffer.get())->isMapped());
  340. fBlocks.pop_back();
  341. fBufferPtr = nullptr;
  342. }
  343. void GrBufferAllocPool::resetCpuData(size_t newSize) {
  344. SkASSERT(newSize >= kDefaultBufferSize || !newSize);
  345. if (!newSize) {
  346. fCpuStagingBuffer.reset();
  347. return;
  348. }
  349. if (fCpuStagingBuffer && newSize <= fCpuStagingBuffer->size()) {
  350. return;
  351. }
  352. bool mustInitialize = fGpu->caps()->mustClearUploadedBufferData();
  353. fCpuStagingBuffer = fCpuBufferCache ? fCpuBufferCache->makeBuffer(newSize, mustInitialize)
  354. : GrCpuBuffer::Make(newSize);
  355. }
  356. void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) {
  357. SkASSERT(block.fBuffer.get());
  358. SkASSERT(!block.fBuffer.get()->isCpuBuffer());
  359. GrGpuBuffer* buffer = static_cast<GrGpuBuffer*>(block.fBuffer.get());
  360. SkASSERT(!buffer->isMapped());
  361. SkASSERT(fCpuStagingBuffer && fCpuStagingBuffer->data() == fBufferPtr);
  362. SkASSERT(flushSize <= buffer->size());
  363. VALIDATE(true);
  364. if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
  365. flushSize > fGpu->caps()->bufferMapThreshold()) {
  366. void* data = buffer->map();
  367. if (data) {
  368. memcpy(data, fBufferPtr, flushSize);
  369. UNMAP_BUFFER(block);
  370. return;
  371. }
  372. }
  373. buffer->updateData(fBufferPtr, flushSize);
  374. VALIDATE(true);
  375. }
  376. sk_sp<GrBuffer> GrBufferAllocPool::getBuffer(size_t size) {
  377. auto resourceProvider = fGpu->getContext()->priv().resourceProvider();
  378. if (fGpu->caps()->preferClientSideDynamicBuffers()) {
  379. bool mustInitialize = fGpu->caps()->mustClearUploadedBufferData();
  380. return fCpuBufferCache ? fCpuBufferCache->makeBuffer(size, mustInitialize)
  381. : GrCpuBuffer::Make(size);
  382. }
  383. return resourceProvider->createBuffer(size, fBufferType, kDynamic_GrAccessPattern);
  384. }
  385. ////////////////////////////////////////////////////////////////////////////////
  386. GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu, sk_sp<CpuBufferCache> cpuBufferCache)
  387. : GrBufferAllocPool(gpu, GrGpuBufferType::kVertex, std::move(cpuBufferCache)) {}
  388. void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
  389. int vertexCount,
  390. sk_sp<const GrBuffer>* buffer,
  391. int* startVertex) {
  392. SkASSERT(vertexCount >= 0);
  393. SkASSERT(buffer);
  394. SkASSERT(startVertex);
  395. size_t offset SK_INIT_TO_AVOID_WARNING;
  396. void* ptr = INHERITED::makeSpace(SkSafeMath::Mul(vertexSize, vertexCount),
  397. vertexSize,
  398. buffer,
  399. &offset);
  400. SkASSERT(0 == offset % vertexSize);
  401. *startVertex = static_cast<int>(offset / vertexSize);
  402. return ptr;
  403. }
  404. void* GrVertexBufferAllocPool::makeSpaceAtLeast(size_t vertexSize, int minVertexCount,
  405. int fallbackVertexCount,
  406. sk_sp<const GrBuffer>* buffer, int* startVertex,
  407. int* actualVertexCount) {
  408. SkASSERT(minVertexCount >= 0);
  409. SkASSERT(fallbackVertexCount >= minVertexCount);
  410. SkASSERT(buffer);
  411. SkASSERT(startVertex);
  412. SkASSERT(actualVertexCount);
  413. size_t offset SK_INIT_TO_AVOID_WARNING;
  414. size_t actualSize SK_INIT_TO_AVOID_WARNING;
  415. void* ptr = INHERITED::makeSpaceAtLeast(SkSafeMath::Mul(vertexSize, minVertexCount),
  416. SkSafeMath::Mul(vertexSize, fallbackVertexCount),
  417. vertexSize,
  418. buffer,
  419. &offset,
  420. &actualSize);
  421. SkASSERT(0 == offset % vertexSize);
  422. *startVertex = static_cast<int>(offset / vertexSize);
  423. SkASSERT(0 == actualSize % vertexSize);
  424. SkASSERT(actualSize >= vertexSize * minVertexCount);
  425. *actualVertexCount = static_cast<int>(actualSize / vertexSize);
  426. return ptr;
  427. }
  428. ////////////////////////////////////////////////////////////////////////////////
  429. GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu, sk_sp<CpuBufferCache> cpuBufferCache)
  430. : GrBufferAllocPool(gpu, GrGpuBufferType::kIndex, std::move(cpuBufferCache)) {}
  431. void* GrIndexBufferAllocPool::makeSpace(int indexCount, sk_sp<const GrBuffer>* buffer,
  432. int* startIndex) {
  433. SkASSERT(indexCount >= 0);
  434. SkASSERT(buffer);
  435. SkASSERT(startIndex);
  436. size_t offset SK_INIT_TO_AVOID_WARNING;
  437. void* ptr = INHERITED::makeSpace(SkSafeMath::Mul(indexCount, sizeof(uint16_t)),
  438. sizeof(uint16_t),
  439. buffer,
  440. &offset);
  441. SkASSERT(0 == offset % sizeof(uint16_t));
  442. *startIndex = static_cast<int>(offset / sizeof(uint16_t));
  443. return ptr;
  444. }
  445. void* GrIndexBufferAllocPool::makeSpaceAtLeast(int minIndexCount, int fallbackIndexCount,
  446. sk_sp<const GrBuffer>* buffer, int* startIndex,
  447. int* actualIndexCount) {
  448. SkASSERT(minIndexCount >= 0);
  449. SkASSERT(fallbackIndexCount >= minIndexCount);
  450. SkASSERT(buffer);
  451. SkASSERT(startIndex);
  452. SkASSERT(actualIndexCount);
  453. size_t offset SK_INIT_TO_AVOID_WARNING;
  454. size_t actualSize SK_INIT_TO_AVOID_WARNING;
  455. void* ptr = INHERITED::makeSpaceAtLeast(SkSafeMath::Mul(minIndexCount, sizeof(uint16_t)),
  456. SkSafeMath::Mul(fallbackIndexCount, sizeof(uint16_t)),
  457. sizeof(uint16_t),
  458. buffer,
  459. &offset,
  460. &actualSize);
  461. SkASSERT(0 == offset % sizeof(uint16_t));
  462. *startIndex = static_cast<int>(offset / sizeof(uint16_t));
  463. SkASSERT(0 == actualSize % sizeof(uint16_t));
  464. SkASSERT(actualSize >= minIndexCount * sizeof(uint16_t));
  465. *actualIndexCount = static_cast<int>(actualSize / sizeof(uint16_t));
  466. return ptr;
  467. }