GrMemoryPool.cpp 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217
  1. /*
  2. * Copyright 2012 Google Inc.
  3. *
  4. * Use of this source code is governed by a BSD-style license that can be
  5. * found in the LICENSE file.
  6. */
  7. #include "include/private/SkMalloc.h"
  8. #include "src/gpu/GrMemoryPool.h"
  9. #include "src/gpu/ops/GrOp.h"
  10. #ifdef SK_DEBUG
  11. #include <atomic>
  12. #endif
  13. #ifdef SK_DEBUG
  14. #define VALIDATE this->validate()
  15. #else
  16. #define VALIDATE
  17. #endif
  18. void GrOpMemoryPool::release(std::unique_ptr<GrOp> op) {
  19. GrOp* tmp = op.release();
  20. SkASSERT(tmp);
  21. tmp->~GrOp();
  22. fMemoryPool.release(tmp);
  23. }
  24. constexpr size_t GrMemoryPool::kSmallestMinAllocSize;
  25. GrMemoryPool::GrMemoryPool(size_t preallocSize, size_t minAllocSize) {
  26. SkDEBUGCODE(fAllocationCnt = 0);
  27. SkDEBUGCODE(fAllocBlockCnt = 0);
  28. minAllocSize = SkTMax<size_t>(GrSizeAlignUp(minAllocSize, kAlignment), kSmallestMinAllocSize);
  29. preallocSize = SkTMax<size_t>(GrSizeAlignUp(preallocSize, kAlignment), minAllocSize);
  30. fMinAllocSize = minAllocSize;
  31. fSize = 0;
  32. fHead = CreateBlock(preallocSize);
  33. fTail = fHead;
  34. fHead->fNext = nullptr;
  35. fHead->fPrev = nullptr;
  36. VALIDATE;
  37. };
  38. GrMemoryPool::~GrMemoryPool() {
  39. VALIDATE;
  40. #ifdef SK_DEBUG
  41. int i = 0;
  42. int n = fAllocatedIDs.count();
  43. fAllocatedIDs.foreach([&i, n] (int32_t id) {
  44. if (++i == 1) {
  45. SkDebugf("Leaked IDs (in no particular order): %d", id);
  46. } else if (i < 11) {
  47. SkDebugf(", %d%s", id, (n == i ? "\n" : ""));
  48. } else if (i == 11) {
  49. SkDebugf(", ...\n");
  50. }
  51. });
  52. #endif
  53. SkASSERT(0 == fAllocationCnt);
  54. SkASSERT(fHead == fTail);
  55. SkASSERT(0 == fHead->fLiveCount);
  56. DeleteBlock(fHead);
  57. };
  58. void* GrMemoryPool::allocate(size_t size) {
  59. VALIDATE;
  60. size += kPerAllocPad;
  61. size = GrSizeAlignUp(size, kAlignment);
  62. if (fTail->fFreeSize < size) {
  63. size_t blockSize = size + kHeaderSize;
  64. blockSize = SkTMax<size_t>(blockSize, fMinAllocSize);
  65. BlockHeader* block = CreateBlock(blockSize);
  66. block->fPrev = fTail;
  67. block->fNext = nullptr;
  68. SkASSERT(nullptr == fTail->fNext);
  69. fTail->fNext = block;
  70. fTail = block;
  71. fSize += block->fSize;
  72. SkDEBUGCODE(++fAllocBlockCnt);
  73. }
  74. SkASSERT(kAssignedMarker == fTail->fBlockSentinal);
  75. SkASSERT(fTail->fFreeSize >= size);
  76. intptr_t ptr = fTail->fCurrPtr;
  77. // We stash a pointer to the block header, just before the allocated space,
  78. // so that we can decrement the live count on delete in constant time.
  79. AllocHeader* allocData = reinterpret_cast<AllocHeader*>(ptr);
  80. SkDEBUGCODE(allocData->fSentinal = kAssignedMarker);
  81. SkDEBUGCODE(allocData->fID = []{
  82. static std::atomic<int32_t> nextID{1};
  83. return nextID++;
  84. }());
  85. // You can set a breakpoint here when a leaked ID is allocated to see the stack frame.
  86. SkDEBUGCODE(fAllocatedIDs.add(allocData->fID));
  87. allocData->fHeader = fTail;
  88. ptr += kPerAllocPad;
  89. fTail->fPrevPtr = fTail->fCurrPtr;
  90. fTail->fCurrPtr += size;
  91. fTail->fFreeSize -= size;
  92. fTail->fLiveCount += 1;
  93. SkDEBUGCODE(++fAllocationCnt);
  94. VALIDATE;
  95. return reinterpret_cast<void*>(ptr);
  96. }
  97. void GrMemoryPool::release(void* p) {
  98. VALIDATE;
  99. intptr_t ptr = reinterpret_cast<intptr_t>(p) - kPerAllocPad;
  100. AllocHeader* allocData = reinterpret_cast<AllocHeader*>(ptr);
  101. SkASSERT(kAssignedMarker == allocData->fSentinal);
  102. SkDEBUGCODE(allocData->fSentinal = kFreedMarker);
  103. SkDEBUGCODE(fAllocatedIDs.remove(allocData->fID));
  104. BlockHeader* block = allocData->fHeader;
  105. SkASSERT(kAssignedMarker == block->fBlockSentinal);
  106. if (1 == block->fLiveCount) {
  107. // the head block is special, it is reset rather than deleted
  108. if (fHead == block) {
  109. fHead->fCurrPtr = reinterpret_cast<intptr_t>(fHead) + kHeaderSize;
  110. fHead->fLiveCount = 0;
  111. fHead->fFreeSize = fHead->fSize - kHeaderSize;
  112. } else {
  113. BlockHeader* prev = block->fPrev;
  114. BlockHeader* next = block->fNext;
  115. SkASSERT(prev);
  116. prev->fNext = next;
  117. if (next) {
  118. next->fPrev = prev;
  119. } else {
  120. SkASSERT(fTail == block);
  121. fTail = prev;
  122. }
  123. fSize -= block->fSize;
  124. DeleteBlock(block);
  125. SkDEBUGCODE(fAllocBlockCnt--);
  126. }
  127. } else {
  128. --block->fLiveCount;
  129. // Trivial reclaim: if we're releasing the most recent allocation, reuse it
  130. if (block->fPrevPtr == ptr) {
  131. block->fFreeSize += (block->fCurrPtr - block->fPrevPtr);
  132. block->fCurrPtr = block->fPrevPtr;
  133. }
  134. }
  135. SkDEBUGCODE(--fAllocationCnt);
  136. VALIDATE;
  137. }
  138. GrMemoryPool::BlockHeader* GrMemoryPool::CreateBlock(size_t blockSize) {
  139. blockSize = SkTMax<size_t>(blockSize, kHeaderSize);
  140. BlockHeader* block =
  141. reinterpret_cast<BlockHeader*>(sk_malloc_throw(blockSize));
  142. // we assume malloc gives us aligned memory
  143. SkASSERT(!(reinterpret_cast<intptr_t>(block) % kAlignment));
  144. SkDEBUGCODE(block->fBlockSentinal = kAssignedMarker);
  145. block->fLiveCount = 0;
  146. block->fFreeSize = blockSize - kHeaderSize;
  147. block->fCurrPtr = reinterpret_cast<intptr_t>(block) + kHeaderSize;
  148. block->fPrevPtr = 0; // gcc warns on assigning nullptr to an intptr_t.
  149. block->fSize = blockSize;
  150. return block;
  151. }
  152. void GrMemoryPool::DeleteBlock(BlockHeader* block) {
  153. SkASSERT(kAssignedMarker == block->fBlockSentinal);
  154. SkDEBUGCODE(block->fBlockSentinal = kFreedMarker); // FWIW
  155. sk_free(block);
  156. }
  157. void GrMemoryPool::validate() {
  158. #ifdef SK_DEBUG
  159. BlockHeader* block = fHead;
  160. BlockHeader* prev = nullptr;
  161. SkASSERT(block);
  162. int allocCount = 0;
  163. do {
  164. SkASSERT(kAssignedMarker == block->fBlockSentinal);
  165. allocCount += block->fLiveCount;
  166. SkASSERT(prev == block->fPrev);
  167. if (prev) {
  168. SkASSERT(prev->fNext == block);
  169. }
  170. intptr_t b = reinterpret_cast<intptr_t>(block);
  171. size_t ptrOffset = block->fCurrPtr - b;
  172. size_t totalSize = ptrOffset + block->fFreeSize;
  173. intptr_t userStart = b + kHeaderSize;
  174. SkASSERT(!(b % kAlignment));
  175. SkASSERT(!(totalSize % kAlignment));
  176. SkASSERT(!(block->fCurrPtr % kAlignment));
  177. if (fHead != block) {
  178. SkASSERT(block->fLiveCount);
  179. SkASSERT(totalSize >= fMinAllocSize);
  180. } else {
  181. SkASSERT(totalSize == block->fSize);
  182. }
  183. if (!block->fLiveCount) {
  184. SkASSERT(ptrOffset == kHeaderSize);
  185. SkASSERT(userStart == block->fCurrPtr);
  186. } else {
  187. AllocHeader* allocData = reinterpret_cast<AllocHeader*>(userStart);
  188. SkASSERT(allocData->fSentinal == kAssignedMarker ||
  189. allocData->fSentinal == kFreedMarker);
  190. SkASSERT(block == allocData->fHeader);
  191. }
  192. prev = block;
  193. } while ((block = block->fNext));
  194. SkASSERT(allocCount == fAllocationCnt);
  195. SkASSERT(fAllocationCnt == fAllocatedIDs.count());
  196. SkASSERT(prev == fTail);
  197. SkASSERT(fAllocBlockCnt != 0 || fSize == 0);
  198. #endif
  199. }