GrResourceAllocator.cpp 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533
  1. /*
  2. * Copyright 2017 Google Inc.
  3. *
  4. * Use of this source code is governed by a BSD-style license that can be
  5. * found in the LICENSE file.
  6. */
  7. #include "src/gpu/GrResourceAllocator.h"
  8. #include "src/gpu/GrDeinstantiateProxyTracker.h"
  9. #include "src/gpu/GrGpuResourcePriv.h"
  10. #include "src/gpu/GrOpList.h"
  11. #include "src/gpu/GrRenderTargetProxy.h"
  12. #include "src/gpu/GrResourceCache.h"
  13. #include "src/gpu/GrResourceProvider.h"
  14. #include "src/gpu/GrSurfacePriv.h"
  15. #include "src/gpu/GrSurfaceProxy.h"
  16. #include "src/gpu/GrSurfaceProxyPriv.h"
  17. #include "src/gpu/GrTextureProxy.h"
  18. #if GR_TRACK_INTERVAL_CREATION
  19. #include <atomic>
  20. uint32_t GrResourceAllocator::Interval::CreateUniqueID() {
  21. static std::atomic<uint32_t> nextID{1};
  22. uint32_t id;
  23. do {
  24. id = nextID++;
  25. } while (id == SK_InvalidUniqueID);
  26. return id;
  27. }
  28. #endif
  29. void GrResourceAllocator::Interval::assign(sk_sp<GrSurface> s) {
  30. SkASSERT(!fAssignedSurface);
  31. fAssignedSurface = s;
  32. fProxy->priv().assign(std::move(s));
  33. }
  34. void GrResourceAllocator::determineRecyclability() {
  35. for (Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
  36. if (cur->proxy()->canSkipResourceAllocator()) {
  37. // These types of proxies can slip in here if they require a stencil buffer
  38. continue;
  39. }
  40. if (cur->uses() >= cur->proxy()->priv().getProxyRefCnt()) {
  41. // All the refs on the proxy are known to the resource allocator thus no one
  42. // should be holding onto it outside of Ganesh.
  43. SkASSERT(cur->uses() == cur->proxy()->priv().getProxyRefCnt());
  44. cur->markAsRecyclable();
  45. }
  46. }
  47. }
  48. void GrResourceAllocator::markEndOfOpList(int opListIndex) {
  49. SkASSERT(!fAssigned); // We shouldn't be adding any opLists after (or during) assignment
  50. SkASSERT(fEndOfOpListOpIndices.count() == opListIndex);
  51. if (!fEndOfOpListOpIndices.empty()) {
  52. SkASSERT(fEndOfOpListOpIndices.back() < this->curOp());
  53. }
  54. fEndOfOpListOpIndices.push_back(this->curOp()); // This is the first op index of the next opList
  55. SkASSERT(fEndOfOpListOpIndices.count() <= fNumOpLists);
  56. }
  57. GrResourceAllocator::~GrResourceAllocator() {
  58. SkASSERT(fIntvlList.empty());
  59. SkASSERT(fActiveIntvls.empty());
  60. SkASSERT(!fIntvlHash.count());
  61. }
  62. void GrResourceAllocator::addInterval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end,
  63. ActualUse actualUse
  64. SkDEBUGCODE(, bool isDirectDstRead)) {
  65. if (proxy->canSkipResourceAllocator()) {
  66. // If the proxy is still not instantiated at this point but will need stencil, it will
  67. // attach its own stencil buffer upon onFlush instantiation.
  68. if (proxy->isInstantiated()) {
  69. int minStencilSampleCount = (proxy->asRenderTargetProxy())
  70. ? proxy->asRenderTargetProxy()->numStencilSamples()
  71. : 0;
  72. if (minStencilSampleCount) {
  73. if (!GrSurfaceProxyPriv::AttachStencilIfNeeded(
  74. fResourceProvider, proxy->peekSurface(), minStencilSampleCount)) {
  75. SkDebugf("WARNING: failed to attach stencil buffer. "
  76. "Rendering may be incorrect.\n");
  77. }
  78. }
  79. }
  80. return;
  81. }
  82. SkASSERT(!proxy->priv().ignoredByResourceAllocator());
  83. SkASSERT(start <= end);
  84. SkASSERT(!fAssigned); // We shouldn't be adding any intervals after (or during) assignment
  85. // If a proxy is read only it must refer to a texture with specific content that cannot be
  86. // recycled. We don't need to assign a texture to it and no other proxy can be instantiated
  87. // with the same texture.
  88. if (proxy->readOnly()) {
  89. // Since we aren't going to add an interval we won't revisit this proxy in assign(). So it
  90. // must already be instantiated or it must be a lazy proxy that we will instantiate below.
  91. SkASSERT(proxy->isInstantiated() ||
  92. GrSurfaceProxy::LazyState::kNot != proxy->lazyInstantiationState());
  93. } else {
  94. if (Interval* intvl = fIntvlHash.find(proxy->uniqueID().asUInt())) {
  95. // Revise the interval for an existing use
  96. #ifdef SK_DEBUG
  97. if (0 == start && 0 == end) {
  98. // This interval is for the initial upload to a deferred proxy. Due to the vagaries
  99. // of how deferred proxies are collected they can appear as uploads multiple times
  100. // in a single opLists' list and as uploads in several opLists.
  101. SkASSERT(0 == intvl->start());
  102. } else if (isDirectDstRead) {
  103. // Direct reads from the render target itself should occur w/in the existing
  104. // interval
  105. SkASSERT(intvl->start() <= start && intvl->end() >= end);
  106. } else {
  107. SkASSERT(intvl->end() <= start && intvl->end() <= end);
  108. }
  109. #endif
  110. if (ActualUse::kYes == actualUse) {
  111. intvl->addUse();
  112. }
  113. intvl->extendEnd(end);
  114. return;
  115. }
  116. Interval* newIntvl;
  117. if (fFreeIntervalList) {
  118. newIntvl = fFreeIntervalList;
  119. fFreeIntervalList = newIntvl->next();
  120. newIntvl->setNext(nullptr);
  121. newIntvl->resetTo(proxy, start, end);
  122. } else {
  123. newIntvl = fIntervalAllocator.make<Interval>(proxy, start, end);
  124. }
  125. if (ActualUse::kYes == actualUse) {
  126. newIntvl->addUse();
  127. }
  128. fIntvlList.insertByIncreasingStart(newIntvl);
  129. fIntvlHash.add(newIntvl);
  130. }
  131. // Because readOnly proxies do not get a usage interval we must instantiate them here (since it
  132. // won't occur in GrResourceAllocator::assign)
  133. if (proxy->readOnly()) {
  134. // FIXME: remove this once we can do the lazy instantiation from assign instead.
  135. if (GrSurfaceProxy::LazyState::kNot != proxy->lazyInstantiationState()) {
  136. if (proxy->priv().doLazyInstantiation(fResourceProvider)) {
  137. if (proxy->priv().lazyInstantiationType() ==
  138. GrSurfaceProxy::LazyInstantiationType::kDeinstantiate) {
  139. fDeinstantiateTracker->addProxy(proxy);
  140. }
  141. } else {
  142. fLazyInstantiationError = true;
  143. }
  144. }
  145. }
  146. }
  147. GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::popHead() {
  148. SkDEBUGCODE(this->validate());
  149. Interval* temp = fHead;
  150. if (temp) {
  151. fHead = temp->next();
  152. if (!fHead) {
  153. fTail = nullptr;
  154. }
  155. temp->setNext(nullptr);
  156. }
  157. SkDEBUGCODE(this->validate());
  158. return temp;
  159. }
  160. // TODO: fuse this with insertByIncreasingEnd
  161. void GrResourceAllocator::IntervalList::insertByIncreasingStart(Interval* intvl) {
  162. SkDEBUGCODE(this->validate());
  163. SkASSERT(!intvl->next());
  164. if (!fHead) {
  165. // 14%
  166. fHead = fTail = intvl;
  167. } else if (intvl->start() <= fHead->start()) {
  168. // 3%
  169. intvl->setNext(fHead);
  170. fHead = intvl;
  171. } else if (fTail->start() <= intvl->start()) {
  172. // 83%
  173. fTail->setNext(intvl);
  174. fTail = intvl;
  175. } else {
  176. // almost never
  177. Interval* prev = fHead;
  178. Interval* next = prev->next();
  179. for (; intvl->start() > next->start(); prev = next, next = next->next()) {
  180. }
  181. SkASSERT(next);
  182. intvl->setNext(next);
  183. prev->setNext(intvl);
  184. }
  185. SkDEBUGCODE(this->validate());
  186. }
  187. // TODO: fuse this with insertByIncreasingStart
  188. void GrResourceAllocator::IntervalList::insertByIncreasingEnd(Interval* intvl) {
  189. SkDEBUGCODE(this->validate());
  190. SkASSERT(!intvl->next());
  191. if (!fHead) {
  192. // 14%
  193. fHead = fTail = intvl;
  194. } else if (intvl->end() <= fHead->end()) {
  195. // 64%
  196. intvl->setNext(fHead);
  197. fHead = intvl;
  198. } else if (fTail->end() <= intvl->end()) {
  199. // 3%
  200. fTail->setNext(intvl);
  201. fTail = intvl;
  202. } else {
  203. // 19% but 81% of those land right after the list's head
  204. Interval* prev = fHead;
  205. Interval* next = prev->next();
  206. for (; intvl->end() > next->end(); prev = next, next = next->next()) {
  207. }
  208. SkASSERT(next);
  209. intvl->setNext(next);
  210. prev->setNext(intvl);
  211. }
  212. SkDEBUGCODE(this->validate());
  213. }
  214. #ifdef SK_DEBUG
  215. void GrResourceAllocator::IntervalList::validate() const {
  216. SkASSERT(SkToBool(fHead) == SkToBool(fTail));
  217. Interval* prev = nullptr;
  218. for (Interval* cur = fHead; cur; prev = cur, cur = cur->next()) {
  219. }
  220. SkASSERT(fTail == prev);
  221. }
  222. #endif
  223. GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::detachAll() {
  224. Interval* tmp = fHead;
  225. fHead = nullptr;
  226. fTail = nullptr;
  227. return tmp;
  228. }
  229. // 'surface' can be reused. Add it back to the free pool.
  230. void GrResourceAllocator::recycleSurface(sk_sp<GrSurface> surface) {
  231. const GrScratchKey &key = surface->resourcePriv().getScratchKey();
  232. if (!key.isValid()) {
  233. return; // can't do it w/o a valid scratch key
  234. }
  235. if (surface->getUniqueKey().isValid()) {
  236. // If the surface has a unique key we throw it back into the resource cache.
  237. // If things get really tight 'findSurfaceFor' may pull it back out but there is
  238. // no need to have it in tight rotation.
  239. return;
  240. }
  241. #if GR_ALLOCATION_SPEW
  242. SkDebugf("putting surface %d back into pool\n", surface->uniqueID().asUInt());
  243. #endif
  244. // TODO: fix this insertion so we get a more LRU-ish behavior
  245. fFreePool.insert(key, surface.release());
  246. }
  247. // First try to reuse one of the recently allocated/used GrSurfaces in the free pool.
  248. // If we can't find a useable one, create a new one.
  249. sk_sp<GrSurface> GrResourceAllocator::findSurfaceFor(const GrSurfaceProxy* proxy,
  250. int minStencilSampleCount) {
  251. if (proxy->asTextureProxy() && proxy->asTextureProxy()->getUniqueKey().isValid()) {
  252. // First try to reattach to a cached version if the proxy is uniquely keyed
  253. sk_sp<GrSurface> surface = fResourceProvider->findByUniqueKey<GrSurface>(
  254. proxy->asTextureProxy()->getUniqueKey());
  255. if (surface) {
  256. if (!GrSurfaceProxyPriv::AttachStencilIfNeeded(fResourceProvider, surface.get(),
  257. minStencilSampleCount)) {
  258. return nullptr;
  259. }
  260. return surface;
  261. }
  262. }
  263. // First look in the free pool
  264. GrScratchKey key;
  265. proxy->priv().computeScratchKey(&key);
  266. auto filter = [] (const GrSurface* s) {
  267. return true;
  268. };
  269. sk_sp<GrSurface> surface(fFreePool.findAndRemove(key, filter));
  270. if (surface) {
  271. if (SkBudgeted::kYes == proxy->isBudgeted() &&
  272. GrBudgetedType::kBudgeted != surface->resourcePriv().budgetedType()) {
  273. // This gets the job done but isn't quite correct. It would be better to try to
  274. // match budgeted proxies w/ budgeted surfaces and unbudgeted w/ unbudgeted.
  275. surface->resourcePriv().makeBudgeted();
  276. }
  277. if (!GrSurfaceProxyPriv::AttachStencilIfNeeded(fResourceProvider, surface.get(),
  278. minStencilSampleCount)) {
  279. return nullptr;
  280. }
  281. SkASSERT(!surface->getUniqueKey().isValid());
  282. return surface;
  283. }
  284. // Failing that, try to grab a new one from the resource cache
  285. return proxy->priv().createSurface(fResourceProvider);
  286. }
  287. // Remove any intervals that end before the current index. Return their GrSurfaces
  288. // to the free pool if possible.
  289. void GrResourceAllocator::expire(unsigned int curIndex) {
  290. while (!fActiveIntvls.empty() && fActiveIntvls.peekHead()->end() < curIndex) {
  291. Interval* temp = fActiveIntvls.popHead();
  292. SkASSERT(!temp->next());
  293. if (temp->wasAssignedSurface()) {
  294. sk_sp<GrSurface> surface = temp->detachSurface();
  295. if (temp->isRecyclable()) {
  296. this->recycleSurface(std::move(surface));
  297. }
  298. }
  299. // Add temp to the free interval list so it can be reused
  300. SkASSERT(!temp->wasAssignedSurface()); // it had better not have a ref on a surface
  301. temp->setNext(fFreeIntervalList);
  302. fFreeIntervalList = temp;
  303. }
  304. }
  305. bool GrResourceAllocator::onOpListBoundary() const {
  306. if (fIntvlList.empty()) {
  307. SkASSERT(fCurOpListIndex+1 <= fNumOpLists);
  308. // Although technically on an opList boundary there is no need to force an
  309. // intermediate flush here
  310. return false;
  311. }
  312. const Interval* tmp = fIntvlList.peekHead();
  313. return fEndOfOpListOpIndices[fCurOpListIndex] <= tmp->start();
  314. }
  315. void GrResourceAllocator::forceIntermediateFlush(int* stopIndex) {
  316. *stopIndex = fCurOpListIndex+1;
  317. // This is interrupting the allocation of resources for this flush. We need to
  318. // proactively clear the active interval list of any intervals that aren't
  319. // guaranteed to survive the partial flush lest they become zombies (i.e.,
  320. // holding a deleted surface proxy).
  321. const Interval* tmp = fIntvlList.peekHead();
  322. SkASSERT(fEndOfOpListOpIndices[fCurOpListIndex] <= tmp->start());
  323. fCurOpListIndex++;
  324. SkASSERT(fCurOpListIndex < fNumOpLists);
  325. this->expire(tmp->start());
  326. }
  327. bool GrResourceAllocator::assign(int* startIndex, int* stopIndex, AssignError* outError) {
  328. SkASSERT(outError);
  329. *outError = fLazyInstantiationError ? AssignError::kFailedProxyInstantiation
  330. : AssignError::kNoError;
  331. SkASSERT(fNumOpLists == fEndOfOpListOpIndices.count());
  332. fIntvlHash.reset(); // we don't need the interval hash anymore
  333. if (fCurOpListIndex >= fEndOfOpListOpIndices.count()) {
  334. return false; // nothing to render
  335. }
  336. *startIndex = fCurOpListIndex;
  337. *stopIndex = fEndOfOpListOpIndices.count();
  338. if (fIntvlList.empty()) {
  339. fCurOpListIndex = fEndOfOpListOpIndices.count();
  340. return true; // no resources to assign
  341. }
  342. #if GR_ALLOCATION_SPEW
  343. SkDebugf("assigning opLists %d through %d out of %d numOpLists\n",
  344. *startIndex, *stopIndex, fNumOpLists);
  345. SkDebugf("EndOfOpListIndices: ");
  346. for (int i = 0; i < fEndOfOpListOpIndices.count(); ++i) {
  347. SkDebugf("%d ", fEndOfOpListOpIndices[i]);
  348. }
  349. SkDebugf("\n");
  350. #endif
  351. SkDEBUGCODE(fAssigned = true;)
  352. #if GR_ALLOCATION_SPEW
  353. this->dumpIntervals();
  354. #endif
  355. while (Interval* cur = fIntvlList.popHead()) {
  356. if (fEndOfOpListOpIndices[fCurOpListIndex] <= cur->start()) {
  357. fCurOpListIndex++;
  358. SkASSERT(fCurOpListIndex < fNumOpLists);
  359. }
  360. this->expire(cur->start());
  361. int minStencilSampleCount = (cur->proxy()->asRenderTargetProxy())
  362. ? cur->proxy()->asRenderTargetProxy()->numStencilSamples()
  363. : 0;
  364. if (cur->proxy()->isInstantiated()) {
  365. if (!GrSurfaceProxyPriv::AttachStencilIfNeeded(
  366. fResourceProvider, cur->proxy()->peekSurface(), minStencilSampleCount)) {
  367. *outError = AssignError::kFailedProxyInstantiation;
  368. }
  369. fActiveIntvls.insertByIncreasingEnd(cur);
  370. if (fResourceProvider->overBudget()) {
  371. // Only force intermediate draws on opList boundaries
  372. if (this->onOpListBoundary()) {
  373. this->forceIntermediateFlush(stopIndex);
  374. return true;
  375. }
  376. }
  377. continue;
  378. }
  379. if (GrSurfaceProxy::LazyState::kNot != cur->proxy()->lazyInstantiationState()) {
  380. if (!cur->proxy()->priv().doLazyInstantiation(fResourceProvider)) {
  381. *outError = AssignError::kFailedProxyInstantiation;
  382. } else {
  383. if (GrSurfaceProxy::LazyInstantiationType::kDeinstantiate ==
  384. cur->proxy()->priv().lazyInstantiationType()) {
  385. fDeinstantiateTracker->addProxy(cur->proxy());
  386. }
  387. }
  388. } else if (sk_sp<GrSurface> surface = this->findSurfaceFor(
  389. cur->proxy(), minStencilSampleCount)) {
  390. // TODO: make getUniqueKey virtual on GrSurfaceProxy
  391. GrTextureProxy* texProxy = cur->proxy()->asTextureProxy();
  392. if (texProxy && texProxy->getUniqueKey().isValid()) {
  393. if (!surface->getUniqueKey().isValid()) {
  394. fResourceProvider->assignUniqueKeyToResource(texProxy->getUniqueKey(),
  395. surface.get());
  396. }
  397. SkASSERT(surface->getUniqueKey() == texProxy->getUniqueKey());
  398. }
  399. #if GR_ALLOCATION_SPEW
  400. SkDebugf("Assigning %d to %d\n",
  401. surface->uniqueID().asUInt(),
  402. cur->proxy()->uniqueID().asUInt());
  403. #endif
  404. cur->assign(std::move(surface));
  405. } else {
  406. SkASSERT(!cur->proxy()->isInstantiated());
  407. *outError = AssignError::kFailedProxyInstantiation;
  408. }
  409. fActiveIntvls.insertByIncreasingEnd(cur);
  410. if (fResourceProvider->overBudget()) {
  411. // Only force intermediate draws on opList boundaries
  412. if (this->onOpListBoundary()) {
  413. this->forceIntermediateFlush(stopIndex);
  414. return true;
  415. }
  416. }
  417. }
  418. // expire all the remaining intervals to drain the active interval list
  419. this->expire(std::numeric_limits<unsigned int>::max());
  420. return true;
  421. }
  422. #if GR_ALLOCATION_SPEW
  423. void GrResourceAllocator::dumpIntervals() {
  424. // Print all the intervals while computing their range
  425. SkDebugf("------------------------------------------------------------\n");
  426. unsigned int min = std::numeric_limits<unsigned int>::max();
  427. unsigned int max = 0;
  428. for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
  429. SkDebugf("{ %3d,%3d }: [%2d, %2d] - proxyRefs:%d surfaceRefs:%d\n",
  430. cur->proxy()->uniqueID().asUInt(),
  431. cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1,
  432. cur->start(),
  433. cur->end(),
  434. cur->proxy()->priv().getProxyRefCnt(),
  435. cur->proxy()->testingOnly_getBackingRefCnt());
  436. min = SkTMin(min, cur->start());
  437. max = SkTMax(max, cur->end());
  438. }
  439. // Draw a graph of the useage intervals
  440. for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
  441. SkDebugf("{ %3d,%3d }: ",
  442. cur->proxy()->uniqueID().asUInt(),
  443. cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1);
  444. for (unsigned int i = min; i <= max; ++i) {
  445. if (i >= cur->start() && i <= cur->end()) {
  446. SkDebugf("x");
  447. } else {
  448. SkDebugf(" ");
  449. }
  450. }
  451. SkDebugf("\n");
  452. }
  453. }
  454. #endif