GrCCPathCache.cpp 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439
  1. /*
  2. * Copyright 2018 Google Inc.
  3. *
  4. * Use of this source code is governed by a BSD-style license that can be
  5. * found in the LICENSE file.
  6. */
  7. #include "src/gpu/ccpr/GrCCPathCache.h"
  8. #include "include/private/SkNx.h"
  9. #include "src/gpu/GrOnFlushResourceProvider.h"
  10. #include "src/gpu/GrProxyProvider.h"
  11. static constexpr int kMaxKeyDataCountU32 = 256; // 1kB of uint32_t's.
  12. DECLARE_SKMESSAGEBUS_MESSAGE(sk_sp<GrCCPathCache::Key>);
  13. static inline uint32_t next_path_cache_id() {
  14. static std::atomic<uint32_t> gNextID(1);
  15. for (;;) {
  16. uint32_t id = gNextID.fetch_add(+1, std::memory_order_acquire);
  17. if (SK_InvalidUniqueID != id) {
  18. return id;
  19. }
  20. }
  21. }
  22. static inline bool SkShouldPostMessageToBus(
  23. const sk_sp<GrCCPathCache::Key>& key, uint32_t msgBusUniqueID) {
  24. return key->pathCacheUniqueID() == msgBusUniqueID;
  25. }
  26. // The maximum number of cache entries we allow in our own cache.
  27. static constexpr int kMaxCacheCount = 1 << 16;
  28. GrCCPathCache::MaskTransform::MaskTransform(const SkMatrix& m, SkIVector* shift)
  29. : fMatrix2x2{m.getScaleX(), m.getSkewX(), m.getSkewY(), m.getScaleY()} {
  30. SkASSERT(!m.hasPerspective());
  31. Sk2f translate = Sk2f(m.getTranslateX(), m.getTranslateY());
  32. Sk2f transFloor;
  33. #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
  34. // On Android framework we pre-round view matrix translates to integers for better caching.
  35. transFloor = translate;
  36. #else
  37. transFloor = translate.floor();
  38. (translate - transFloor).store(fSubpixelTranslate);
  39. #endif
  40. shift->set((int)transFloor[0], (int)transFloor[1]);
  41. SkASSERT((float)shift->fX == transFloor[0]); // Make sure transFloor had integer values.
  42. SkASSERT((float)shift->fY == transFloor[1]);
  43. }
  44. inline static bool fuzzy_equals(const GrCCPathCache::MaskTransform& a,
  45. const GrCCPathCache::MaskTransform& b) {
  46. if ((Sk4f::Load(a.fMatrix2x2) != Sk4f::Load(b.fMatrix2x2)).anyTrue()) {
  47. return false;
  48. }
  49. #ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
  50. if (((Sk2f::Load(a.fSubpixelTranslate) -
  51. Sk2f::Load(b.fSubpixelTranslate)).abs() > 1.f/256).anyTrue()) {
  52. return false;
  53. }
  54. #endif
  55. return true;
  56. }
  57. sk_sp<GrCCPathCache::Key> GrCCPathCache::Key::Make(uint32_t pathCacheUniqueID,
  58. int dataCountU32, const void* data) {
  59. void* memory = ::operator new (sizeof(Key) + dataCountU32 * sizeof(uint32_t));
  60. sk_sp<GrCCPathCache::Key> key(new (memory) Key(pathCacheUniqueID, dataCountU32));
  61. if (data) {
  62. memcpy(key->data(), data, key->dataSizeInBytes());
  63. }
  64. return key;
  65. }
  66. const uint32_t* GrCCPathCache::Key::data() const {
  67. // The shape key is a variable-length footer to the entry allocation.
  68. return reinterpret_cast<const uint32_t*>(reinterpret_cast<const char*>(this) + sizeof(Key));
  69. }
  70. uint32_t* GrCCPathCache::Key::data() {
  71. // The shape key is a variable-length footer to the entry allocation.
  72. return reinterpret_cast<uint32_t*>(reinterpret_cast<char*>(this) + sizeof(Key));
  73. }
  74. void GrCCPathCache::Key::onChange() {
  75. // Our key's corresponding path was invalidated. Post a thread-safe eviction message.
  76. SkMessageBus<sk_sp<Key>>::Post(sk_ref_sp(this));
  77. }
  78. GrCCPathCache::GrCCPathCache(uint32_t contextUniqueID)
  79. : fContextUniqueID(contextUniqueID)
  80. , fInvalidatedKeysInbox(next_path_cache_id())
  81. , fScratchKey(Key::Make(fInvalidatedKeysInbox.uniqueID(), kMaxKeyDataCountU32)) {
  82. }
  83. GrCCPathCache::~GrCCPathCache() {
  84. while (!fLRU.isEmpty()) {
  85. this->evict(*fLRU.tail()->fCacheKey, fLRU.tail());
  86. }
  87. SkASSERT(0 == fHashTable.count()); // Ensure the hash table and LRU list were coherent.
  88. // Now take all the atlas textures we just invalidated and purge them from the GrResourceCache.
  89. // We just purge via message bus since we don't have any access to the resource cache right now.
  90. for (sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) {
  91. SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(
  92. GrUniqueKeyInvalidatedMessage(proxy->getUniqueKey(), fContextUniqueID));
  93. }
  94. for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) {
  95. SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(
  96. GrUniqueKeyInvalidatedMessage(key, fContextUniqueID));
  97. }
  98. }
  99. namespace {
  100. // Produces a key that accounts both for a shape's path geometry, as well as any stroke/style.
  101. class WriteKeyHelper {
  102. public:
  103. static constexpr int kStrokeWidthIdx = 0;
  104. static constexpr int kStrokeMiterIdx = 1;
  105. static constexpr int kStrokeCapJoinIdx = 2;
  106. static constexpr int kShapeUnstyledKeyIdx = 3;
  107. WriteKeyHelper(const GrShape& shape) : fShapeUnstyledKeyCount(shape.unstyledKeySize()) {}
  108. // Returns the total number of uint32_t's to allocate for the key.
  109. int allocCountU32() const { return kShapeUnstyledKeyIdx + fShapeUnstyledKeyCount; }
  110. // Writes the key data to out[].
  111. void write(const GrShape& shape, uint32_t* out) {
  112. // Stroke key.
  113. // We don't use GrStyle::WriteKey() because it does not account for hairlines.
  114. // http://skbug.com/8273
  115. SkASSERT(!shape.style().hasPathEffect());
  116. const SkStrokeRec& stroke = shape.style().strokeRec();
  117. if (stroke.isFillStyle()) {
  118. // Use a value for width that won't collide with a valid fp32 value >= 0.
  119. out[kStrokeWidthIdx] = ~0;
  120. out[kStrokeMiterIdx] = out[kStrokeCapJoinIdx] = 0;
  121. } else {
  122. float width = stroke.getWidth(), miterLimit = stroke.getMiter();
  123. memcpy(&out[kStrokeWidthIdx], &width, sizeof(float));
  124. memcpy(&out[kStrokeMiterIdx], &miterLimit, sizeof(float));
  125. out[kStrokeCapJoinIdx] = (stroke.getCap() << 16) | stroke.getJoin();
  126. GR_STATIC_ASSERT(sizeof(out[kStrokeWidthIdx]) == sizeof(float));
  127. }
  128. // Shape unstyled key.
  129. shape.writeUnstyledKey(&out[kShapeUnstyledKeyIdx]);
  130. }
  131. private:
  132. int fShapeUnstyledKeyCount;
  133. };
  134. }
  135. GrCCPathCache::OnFlushEntryRef GrCCPathCache::find(
  136. GrOnFlushResourceProvider* onFlushRP, const GrShape& shape,
  137. const SkIRect& clippedDrawBounds, const SkMatrix& viewMatrix, SkIVector* maskShift) {
  138. if (!shape.hasUnstyledKey()) {
  139. return OnFlushEntryRef();
  140. }
  141. WriteKeyHelper writeKeyHelper(shape);
  142. if (writeKeyHelper.allocCountU32() > kMaxKeyDataCountU32) {
  143. return OnFlushEntryRef();
  144. }
  145. SkASSERT(fScratchKey->unique());
  146. fScratchKey->resetDataCountU32(writeKeyHelper.allocCountU32());
  147. writeKeyHelper.write(shape, fScratchKey->data());
  148. MaskTransform m(viewMatrix, maskShift);
  149. GrCCPathCacheEntry* entry = nullptr;
  150. if (HashNode* node = fHashTable.find(*fScratchKey)) {
  151. entry = node->entry();
  152. SkASSERT(fLRU.isInList(entry));
  153. if (!fuzzy_equals(m, entry->fMaskTransform)) {
  154. // The path was reused with an incompatible matrix.
  155. if (entry->unique()) {
  156. // This entry is unique: recycle it instead of deleting and malloc-ing a new one.
  157. SkASSERT(0 == entry->fOnFlushRefCnt); // Because we are unique.
  158. entry->fMaskTransform = m;
  159. entry->fHitCount = 0;
  160. entry->fHitRect = SkIRect::MakeEmpty();
  161. entry->releaseCachedAtlas(this);
  162. } else {
  163. this->evict(*fScratchKey);
  164. entry = nullptr;
  165. }
  166. }
  167. }
  168. if (!entry) {
  169. if (fHashTable.count() >= kMaxCacheCount) {
  170. SkDEBUGCODE(HashNode* node = fHashTable.find(*fLRU.tail()->fCacheKey));
  171. SkASSERT(node && node->entry() == fLRU.tail());
  172. this->evict(*fLRU.tail()->fCacheKey); // We've exceeded our limit.
  173. }
  174. // Create a new entry in the cache.
  175. sk_sp<Key> permanentKey = Key::Make(fInvalidatedKeysInbox.uniqueID(),
  176. writeKeyHelper.allocCountU32(), fScratchKey->data());
  177. SkASSERT(*permanentKey == *fScratchKey);
  178. SkASSERT(!fHashTable.find(*permanentKey));
  179. entry = fHashTable.set(HashNode(this, std::move(permanentKey), m, shape))->entry();
  180. SkASSERT(fHashTable.count() <= kMaxCacheCount);
  181. } else {
  182. fLRU.remove(entry); // Will be re-added at head.
  183. }
  184. SkDEBUGCODE(HashNode* node = fHashTable.find(*fScratchKey));
  185. SkASSERT(node && node->entry() == entry);
  186. fLRU.addToHead(entry);
  187. if (0 == entry->fOnFlushRefCnt) {
  188. // Only update the time stamp and hit count if we haven't seen this entry yet during the
  189. // current flush.
  190. entry->fTimestamp = this->quickPerFlushTimestamp();
  191. ++entry->fHitCount;
  192. if (entry->fCachedAtlas) {
  193. SkASSERT(SkToBool(entry->fCachedAtlas->peekOnFlushRefCnt()) ==
  194. SkToBool(entry->fCachedAtlas->getOnFlushProxy()));
  195. if (!entry->fCachedAtlas->getOnFlushProxy()) {
  196. if (sk_sp<GrTextureProxy> onFlushProxy = onFlushRP->findOrCreateProxyByUniqueKey(
  197. entry->fCachedAtlas->textureKey(), GrCCAtlas::kTextureOrigin)) {
  198. onFlushProxy->priv().setIgnoredByResourceAllocator();
  199. entry->fCachedAtlas->setOnFlushProxy(std::move(onFlushProxy));
  200. }
  201. }
  202. if (!entry->fCachedAtlas->getOnFlushProxy()) {
  203. // Our atlas's backing texture got purged from the GrResourceCache. Release the
  204. // cached atlas.
  205. entry->releaseCachedAtlas(this);
  206. }
  207. }
  208. }
  209. entry->fHitRect.join(clippedDrawBounds.makeOffset(-maskShift->x(), -maskShift->y()));
  210. SkASSERT(!entry->fCachedAtlas || entry->fCachedAtlas->getOnFlushProxy());
  211. return OnFlushEntryRef::OnFlushRef(entry);
  212. }
  213. void GrCCPathCache::evict(const GrCCPathCache::Key& key, GrCCPathCacheEntry* entry) {
  214. if (!entry) {
  215. HashNode* node = fHashTable.find(key);
  216. SkASSERT(node);
  217. entry = node->entry();
  218. }
  219. SkASSERT(*entry->fCacheKey == key);
  220. SkASSERT(!entry->hasBeenEvicted());
  221. entry->fCacheKey->markShouldUnregisterFromPath(); // Unregister the path listener.
  222. entry->releaseCachedAtlas(this);
  223. fLRU.remove(entry);
  224. fHashTable.remove(key);
  225. }
  226. void GrCCPathCache::doPreFlushProcessing() {
  227. this->evictInvalidatedCacheKeys();
  228. // Mark the per-flush timestamp as needing to be updated with a newer clock reading.
  229. fPerFlushTimestamp = GrStdSteadyClock::time_point::min();
  230. }
  231. void GrCCPathCache::purgeEntriesOlderThan(GrProxyProvider* proxyProvider,
  232. const GrStdSteadyClock::time_point& purgeTime) {
  233. this->evictInvalidatedCacheKeys();
  234. #ifdef SK_DEBUG
  235. auto lastTimestamp = (fLRU.isEmpty())
  236. ? GrStdSteadyClock::time_point::max()
  237. : fLRU.tail()->fTimestamp;
  238. #endif
  239. // Evict every entry from our local path cache whose timestamp is older than purgeTime.
  240. while (!fLRU.isEmpty() && fLRU.tail()->fTimestamp < purgeTime) {
  241. #ifdef SK_DEBUG
  242. // Verify that fLRU is sorted by timestamp.
  243. auto timestamp = fLRU.tail()->fTimestamp;
  244. SkASSERT(timestamp >= lastTimestamp);
  245. lastTimestamp = timestamp;
  246. #endif
  247. this->evict(*fLRU.tail()->fCacheKey);
  248. }
  249. // Now take all the atlas textures we just invalidated and purge them from the GrResourceCache.
  250. this->purgeInvalidatedAtlasTextures(proxyProvider);
  251. }
  252. void GrCCPathCache::purgeInvalidatedAtlasTextures(GrOnFlushResourceProvider* onFlushRP) {
  253. for (sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) {
  254. onFlushRP->removeUniqueKeyFromProxy(proxy.get());
  255. }
  256. fInvalidatedProxies.reset();
  257. for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) {
  258. onFlushRP->processInvalidUniqueKey(key);
  259. }
  260. fInvalidatedProxyUniqueKeys.reset();
  261. }
  262. void GrCCPathCache::purgeInvalidatedAtlasTextures(GrProxyProvider* proxyProvider) {
  263. for (sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) {
  264. proxyProvider->removeUniqueKeyFromProxy(proxy.get());
  265. }
  266. fInvalidatedProxies.reset();
  267. for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) {
  268. proxyProvider->processInvalidUniqueKey(key, nullptr,
  269. GrProxyProvider::InvalidateGPUResource::kYes);
  270. }
  271. fInvalidatedProxyUniqueKeys.reset();
  272. }
  273. void GrCCPathCache::evictInvalidatedCacheKeys() {
  274. SkTArray<sk_sp<Key>> invalidatedKeys;
  275. fInvalidatedKeysInbox.poll(&invalidatedKeys);
  276. for (const sk_sp<Key>& key : invalidatedKeys) {
  277. bool isInCache = !key->shouldUnregisterFromPath(); // Gets set upon exiting the cache.
  278. if (isInCache) {
  279. this->evict(*key);
  280. }
  281. }
  282. }
  283. GrCCPathCache::OnFlushEntryRef
  284. GrCCPathCache::OnFlushEntryRef::OnFlushRef(GrCCPathCacheEntry* entry) {
  285. entry->ref();
  286. ++entry->fOnFlushRefCnt;
  287. if (entry->fCachedAtlas) {
  288. entry->fCachedAtlas->incrOnFlushRefCnt();
  289. }
  290. return OnFlushEntryRef(entry);
  291. }
  292. GrCCPathCache::OnFlushEntryRef::~OnFlushEntryRef() {
  293. if (!fEntry) {
  294. return;
  295. }
  296. --fEntry->fOnFlushRefCnt;
  297. SkASSERT(fEntry->fOnFlushRefCnt >= 0);
  298. if (fEntry->fCachedAtlas) {
  299. fEntry->fCachedAtlas->decrOnFlushRefCnt();
  300. }
  301. fEntry->unref();
  302. }
  303. void GrCCPathCacheEntry::setCoverageCountAtlas(
  304. GrOnFlushResourceProvider* onFlushRP, GrCCAtlas* atlas, const SkIVector& atlasOffset,
  305. const GrOctoBounds& octoBounds, const SkIRect& devIBounds, const SkIVector& maskShift) {
  306. SkASSERT(fOnFlushRefCnt > 0);
  307. SkASSERT(!fCachedAtlas); // Otherwise we would need to call releaseCachedAtlas().
  308. if (this->hasBeenEvicted()) {
  309. // This entry will never be found in the path cache again. Don't bother trying to save an
  310. // atlas texture for it in the GrResourceCache.
  311. return;
  312. }
  313. fCachedAtlas = atlas->refOrMakeCachedAtlas(onFlushRP);
  314. fCachedAtlas->incrOnFlushRefCnt(fOnFlushRefCnt);
  315. fCachedAtlas->addPathPixels(devIBounds.height() * devIBounds.width());
  316. fAtlasOffset = atlasOffset + maskShift;
  317. fOctoBounds.setOffset(octoBounds, -maskShift.fX, -maskShift.fY);
  318. fDevIBounds = devIBounds.makeOffset(-maskShift.fX, -maskShift.fY);
  319. }
  320. GrCCPathCacheEntry::ReleaseAtlasResult GrCCPathCacheEntry::upgradeToLiteralCoverageAtlas(
  321. GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP, GrCCAtlas* atlas,
  322. const SkIVector& newAtlasOffset) {
  323. SkASSERT(!this->hasBeenEvicted());
  324. SkASSERT(fOnFlushRefCnt > 0);
  325. SkASSERT(fCachedAtlas);
  326. SkASSERT(GrCCAtlas::CoverageType::kA8_LiteralCoverage != fCachedAtlas->coverageType());
  327. ReleaseAtlasResult releaseAtlasResult = this->releaseCachedAtlas(pathCache);
  328. fCachedAtlas = atlas->refOrMakeCachedAtlas(onFlushRP);
  329. fCachedAtlas->incrOnFlushRefCnt(fOnFlushRefCnt);
  330. fCachedAtlas->addPathPixels(this->height() * this->width());
  331. fAtlasOffset = newAtlasOffset;
  332. return releaseAtlasResult;
  333. }
  334. GrCCPathCacheEntry::ReleaseAtlasResult GrCCPathCacheEntry::releaseCachedAtlas(
  335. GrCCPathCache* pathCache) {
  336. ReleaseAtlasResult result = ReleaseAtlasResult::kNone;
  337. if (fCachedAtlas) {
  338. result = fCachedAtlas->invalidatePathPixels(pathCache, this->height() * this->width());
  339. if (fOnFlushRefCnt) {
  340. SkASSERT(fOnFlushRefCnt > 0);
  341. fCachedAtlas->decrOnFlushRefCnt(fOnFlushRefCnt);
  342. }
  343. fCachedAtlas = nullptr;
  344. }
  345. return result;
  346. }
  347. GrCCPathCacheEntry::ReleaseAtlasResult GrCCCachedAtlas::invalidatePathPixels(
  348. GrCCPathCache* pathCache, int numPixels) {
  349. // Mark the pixels invalid in the cached atlas texture.
  350. fNumInvalidatedPathPixels += numPixels;
  351. SkASSERT(fNumInvalidatedPathPixels <= fNumPathPixels);
  352. if (!fIsInvalidatedFromResourceCache && fNumInvalidatedPathPixels >= fNumPathPixels / 2) {
  353. // Too many invalidated pixels: purge the atlas texture from the resource cache.
  354. if (fOnFlushProxy) {
  355. // Don't clear (or std::move) fOnFlushProxy. Other path cache entries might still have a
  356. // reference on this atlas and expect to use our proxy during the current flush.
  357. // fOnFlushProxy will be cleared once fOnFlushRefCnt decrements to zero.
  358. pathCache->fInvalidatedProxies.push_back(fOnFlushProxy);
  359. } else {
  360. pathCache->fInvalidatedProxyUniqueKeys.push_back(fTextureKey);
  361. }
  362. fIsInvalidatedFromResourceCache = true;
  363. return ReleaseAtlasResult::kDidInvalidateFromCache;
  364. }
  365. return ReleaseAtlasResult::kNone;
  366. }
  367. void GrCCCachedAtlas::decrOnFlushRefCnt(int count) const {
  368. SkASSERT(count > 0);
  369. fOnFlushRefCnt -= count;
  370. SkASSERT(fOnFlushRefCnt >= 0);
  371. if (0 == fOnFlushRefCnt) {
  372. // Don't hold the actual proxy past the end of the current flush.
  373. SkASSERT(fOnFlushProxy);
  374. fOnFlushProxy = nullptr;
  375. }
  376. }