SkRWBuffer.cpp 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364
  1. /*
  2. * Copyright 2015 Google Inc.
  3. *
  4. * Use of this source code is governed by a BSD-style license that can be
  5. * found in the LICENSE file.
  6. */
  7. #include "include/core/SkRWBuffer.h"
  8. #include "include/core/SkStream.h"
  9. #include "include/private/SkMalloc.h"
  10. #include "include/private/SkTo.h"
  11. #include "src/core/SkMakeUnique.h"
  12. #include <atomic>
  13. #include <new>
  14. // Force small chunks to be a page's worth
  15. static const size_t kMinAllocSize = 4096;
  16. struct SkBufferBlock {
  17. SkBufferBlock* fNext; // updated by the writer
  18. size_t fUsed; // updated by the writer
  19. const size_t fCapacity;
  20. SkBufferBlock(size_t capacity) : fNext(nullptr), fUsed(0), fCapacity(capacity) {}
  21. const void* startData() const { return this + 1; }
  22. size_t avail() const { return fCapacity - fUsed; }
  23. void* availData() { return (char*)this->startData() + fUsed; }
  24. static SkBufferBlock* Alloc(size_t length) {
  25. size_t capacity = LengthToCapacity(length);
  26. void* buffer = sk_malloc_throw(sizeof(SkBufferBlock) + capacity);
  27. return new (buffer) SkBufferBlock(capacity);
  28. }
  29. // Return number of bytes actually appended. Important that we always completely this block
  30. // before spilling into the next, since the reader uses fCapacity to know how many it can read.
  31. //
  32. size_t append(const void* src, size_t length) {
  33. this->validate();
  34. size_t amount = SkTMin(this->avail(), length);
  35. memcpy(this->availData(), src, amount);
  36. fUsed += amount;
  37. this->validate();
  38. return amount;
  39. }
  40. // Do not call in the reader thread, since the writer may be updating fUsed.
  41. // (The assertion is still true, but TSAN still may complain about its raciness.)
  42. void validate() const {
  43. #ifdef SK_DEBUG
  44. SkASSERT(fCapacity > 0);
  45. SkASSERT(fUsed <= fCapacity);
  46. #endif
  47. }
  48. private:
  49. static size_t LengthToCapacity(size_t length) {
  50. const size_t minSize = kMinAllocSize - sizeof(SkBufferBlock);
  51. return SkTMax(length, minSize);
  52. }
  53. };
  54. struct SkBufferHead {
  55. mutable std::atomic<int32_t> fRefCnt;
  56. SkBufferBlock fBlock;
  57. SkBufferHead(size_t capacity) : fRefCnt(1), fBlock(capacity) {}
  58. static size_t LengthToCapacity(size_t length) {
  59. const size_t minSize = kMinAllocSize - sizeof(SkBufferHead);
  60. return SkTMax(length, minSize);
  61. }
  62. static SkBufferHead* Alloc(size_t length) {
  63. size_t capacity = LengthToCapacity(length);
  64. size_t size = sizeof(SkBufferHead) + capacity;
  65. void* buffer = sk_malloc_throw(size);
  66. return new (buffer) SkBufferHead(capacity);
  67. }
  68. void ref() const {
  69. SkAssertResult(fRefCnt.fetch_add(+1, std::memory_order_relaxed));
  70. }
  71. void unref() const {
  72. // A release here acts in place of all releases we "should" have been doing in ref().
  73. int32_t oldRefCnt = fRefCnt.fetch_add(-1, std::memory_order_acq_rel);
  74. SkASSERT(oldRefCnt);
  75. if (1 == oldRefCnt) {
  76. // Like unique(), the acquire is only needed on success.
  77. SkBufferBlock* block = fBlock.fNext;
  78. sk_free((void*)this);
  79. while (block) {
  80. SkBufferBlock* next = block->fNext;
  81. sk_free(block);
  82. block = next;
  83. }
  84. }
  85. }
  86. void validate(size_t minUsed, const SkBufferBlock* tail = nullptr) const {
  87. #ifdef SK_DEBUG
  88. SkASSERT(fRefCnt.load(std::memory_order_relaxed) > 0);
  89. size_t totalUsed = 0;
  90. const SkBufferBlock* block = &fBlock;
  91. const SkBufferBlock* lastBlock = block;
  92. while (block) {
  93. block->validate();
  94. totalUsed += block->fUsed;
  95. lastBlock = block;
  96. block = block->fNext;
  97. }
  98. SkASSERT(minUsed <= totalUsed);
  99. if (tail) {
  100. SkASSERT(tail == lastBlock);
  101. }
  102. #endif
  103. }
  104. };
  105. ///////////////////////////////////////////////////////////////////////////////////////////////////
  106. // The reader can only access block.fCapacity (which never changes), and cannot access
  107. // block.fUsed, which may be updated by the writer.
  108. //
  109. SkROBuffer::SkROBuffer(const SkBufferHead* head, size_t available, const SkBufferBlock* tail)
  110. : fHead(head), fAvailable(available), fTail(tail)
  111. {
  112. if (head) {
  113. fHead->ref();
  114. SkASSERT(available > 0);
  115. head->validate(available, tail);
  116. } else {
  117. SkASSERT(0 == available);
  118. SkASSERT(!tail);
  119. }
  120. }
  121. SkROBuffer::~SkROBuffer() {
  122. if (fHead) {
  123. fHead->unref();
  124. }
  125. }
  126. SkROBuffer::Iter::Iter(const SkROBuffer* buffer) {
  127. this->reset(buffer);
  128. }
  129. SkROBuffer::Iter::Iter(const sk_sp<SkROBuffer>& buffer) {
  130. this->reset(buffer.get());
  131. }
  132. void SkROBuffer::Iter::reset(const SkROBuffer* buffer) {
  133. fBuffer = buffer;
  134. if (buffer && buffer->fHead) {
  135. fBlock = &buffer->fHead->fBlock;
  136. fRemaining = buffer->fAvailable;
  137. } else {
  138. fBlock = nullptr;
  139. fRemaining = 0;
  140. }
  141. }
  142. const void* SkROBuffer::Iter::data() const {
  143. return fRemaining ? fBlock->startData() : nullptr;
  144. }
  145. size_t SkROBuffer::Iter::size() const {
  146. if (!fBlock) {
  147. return 0;
  148. }
  149. return SkTMin(fBlock->fCapacity, fRemaining);
  150. }
  151. bool SkROBuffer::Iter::next() {
  152. if (fRemaining) {
  153. fRemaining -= this->size();
  154. if (fBuffer->fTail == fBlock) {
  155. // There are more blocks, but fBuffer does not know about them.
  156. SkASSERT(0 == fRemaining);
  157. fBlock = nullptr;
  158. } else {
  159. fBlock = fBlock->fNext;
  160. }
  161. }
  162. return fRemaining != 0;
  163. }
  164. ///////////////////////////////////////////////////////////////////////////////////////////////////
  165. SkRWBuffer::SkRWBuffer(size_t initialCapacity) : fHead(nullptr), fTail(nullptr), fTotalUsed(0) {
  166. if (initialCapacity) {
  167. fHead = SkBufferHead::Alloc(initialCapacity);
  168. fTail = &fHead->fBlock;
  169. }
  170. }
  171. SkRWBuffer::~SkRWBuffer() {
  172. this->validate();
  173. if (fHead) {
  174. fHead->unref();
  175. }
  176. }
  177. // It is important that we always completely fill the current block before spilling over to the
  178. // next, since our reader will be using fCapacity (min'd against its total available) to know how
  179. // many bytes to read from a given block.
  180. //
  181. void SkRWBuffer::append(const void* src, size_t length, size_t reserve) {
  182. this->validate();
  183. if (0 == length) {
  184. return;
  185. }
  186. fTotalUsed += length;
  187. if (nullptr == fHead) {
  188. fHead = SkBufferHead::Alloc(length + reserve);
  189. fTail = &fHead->fBlock;
  190. }
  191. size_t written = fTail->append(src, length);
  192. SkASSERT(written <= length);
  193. src = (const char*)src + written;
  194. length -= written;
  195. if (length) {
  196. SkBufferBlock* block = SkBufferBlock::Alloc(length + reserve);
  197. fTail->fNext = block;
  198. fTail = block;
  199. written = fTail->append(src, length);
  200. SkASSERT(written == length);
  201. }
  202. this->validate();
  203. }
  204. #ifdef SK_DEBUG
  205. void SkRWBuffer::validate() const {
  206. if (fHead) {
  207. fHead->validate(fTotalUsed, fTail);
  208. } else {
  209. SkASSERT(nullptr == fTail);
  210. SkASSERT(0 == fTotalUsed);
  211. }
  212. }
  213. #endif
  214. ///////////////////////////////////////////////////////////////////////////////////////////////////
  215. class SkROBufferStreamAsset : public SkStreamAsset {
  216. void validate() const {
  217. #ifdef SK_DEBUG
  218. SkASSERT(fGlobalOffset <= fBuffer->size());
  219. SkASSERT(fLocalOffset <= fIter.size());
  220. SkASSERT(fLocalOffset <= fGlobalOffset);
  221. #endif
  222. }
  223. #ifdef SK_DEBUG
  224. class AutoValidate {
  225. SkROBufferStreamAsset* fStream;
  226. public:
  227. AutoValidate(SkROBufferStreamAsset* stream) : fStream(stream) { stream->validate(); }
  228. ~AutoValidate() { fStream->validate(); }
  229. };
  230. #define AUTO_VALIDATE AutoValidate av(this);
  231. #else
  232. #define AUTO_VALIDATE
  233. #endif
  234. public:
  235. SkROBufferStreamAsset(sk_sp<SkROBuffer> buffer) : fBuffer(std::move(buffer)), fIter(fBuffer) {
  236. fGlobalOffset = fLocalOffset = 0;
  237. }
  238. size_t getLength() const override { return fBuffer->size(); }
  239. bool rewind() override {
  240. AUTO_VALIDATE
  241. fIter.reset(fBuffer.get());
  242. fGlobalOffset = fLocalOffset = 0;
  243. return true;
  244. }
  245. size_t read(void* dst, size_t request) override {
  246. AUTO_VALIDATE
  247. size_t bytesRead = 0;
  248. for (;;) {
  249. size_t size = fIter.size();
  250. SkASSERT(fLocalOffset <= size);
  251. size_t avail = SkTMin(size - fLocalOffset, request - bytesRead);
  252. if (dst) {
  253. memcpy(dst, (const char*)fIter.data() + fLocalOffset, avail);
  254. dst = (char*)dst + avail;
  255. }
  256. bytesRead += avail;
  257. fLocalOffset += avail;
  258. SkASSERT(bytesRead <= request);
  259. if (bytesRead == request) {
  260. break;
  261. }
  262. // If we get here, we've exhausted the current iter
  263. SkASSERT(fLocalOffset == size);
  264. fLocalOffset = 0;
  265. if (!fIter.next()) {
  266. break; // ran out of data
  267. }
  268. }
  269. fGlobalOffset += bytesRead;
  270. SkASSERT(fGlobalOffset <= fBuffer->size());
  271. return bytesRead;
  272. }
  273. bool isAtEnd() const override {
  274. return fBuffer->size() == fGlobalOffset;
  275. }
  276. size_t getPosition() const override {
  277. return fGlobalOffset;
  278. }
  279. bool seek(size_t position) override {
  280. AUTO_VALIDATE
  281. if (position < fGlobalOffset) {
  282. this->rewind();
  283. }
  284. (void)this->skip(position - fGlobalOffset);
  285. return true;
  286. }
  287. bool move(long offset) override{
  288. AUTO_VALIDATE
  289. offset += fGlobalOffset;
  290. if (offset <= 0) {
  291. this->rewind();
  292. } else {
  293. (void)this->seek(SkToSizeT(offset));
  294. }
  295. return true;
  296. }
  297. private:
  298. SkStreamAsset* onDuplicate() const override {
  299. return new SkROBufferStreamAsset(fBuffer);
  300. }
  301. SkStreamAsset* onFork() const override {
  302. auto clone = this->duplicate();
  303. clone->seek(this->getPosition());
  304. return clone.release();
  305. }
  306. sk_sp<SkROBuffer> fBuffer;
  307. SkROBuffer::Iter fIter;
  308. size_t fLocalOffset;
  309. size_t fGlobalOffset;
  310. };
  311. std::unique_ptr<SkStreamAsset> SkRWBuffer::makeStreamSnapshot() const {
  312. return skstd::make_unique<SkROBufferStreamAsset>(this->makeROBufferSnapshot());
  313. }