GrDrawingManager.cpp 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795
  1. /*
  2. * Copyright 2015 Google Inc.
  3. *
  4. * Use of this source code is governed by a BSD-style license that can be
  5. * found in the LICENSE file.
  6. */
  7. #include "src/gpu/GrDrawingManager.h"
  8. #include "include/gpu/GrBackendSemaphore.h"
  9. #include "include/gpu/GrTexture.h"
  10. #include "include/private/GrRecordingContext.h"
  11. #include "include/private/SkDeferredDisplayList.h"
  12. #include "src/core/SkTTopoSort.h"
  13. #include "src/gpu/GrAuditTrail.h"
  14. #include "src/gpu/GrContextPriv.h"
  15. #include "src/gpu/GrGpu.h"
  16. #include "src/gpu/GrMemoryPool.h"
  17. #include "src/gpu/GrOnFlushResourceProvider.h"
  18. #include "src/gpu/GrOpList.h"
  19. #include "src/gpu/GrRecordingContextPriv.h"
  20. #include "src/gpu/GrRenderTargetContext.h"
  21. #include "src/gpu/GrRenderTargetProxy.h"
  22. #include "src/gpu/GrResourceAllocator.h"
  23. #include "src/gpu/GrResourceProvider.h"
  24. #include "src/gpu/GrSoftwarePathRenderer.h"
  25. #include "src/gpu/GrSurfaceProxyPriv.h"
  26. #include "src/gpu/GrTextureContext.h"
  27. #include "src/gpu/GrTextureOpList.h"
  28. #include "src/gpu/GrTexturePriv.h"
  29. #include "src/gpu/GrTextureProxy.h"
  30. #include "src/gpu/GrTextureProxyPriv.h"
  31. #include "src/gpu/GrTracing.h"
  32. #include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
  33. #include "src/gpu/text/GrTextContext.h"
  34. #include "src/image/SkSurface_Gpu.h"
  35. GrDrawingManager::OpListDAG::OpListDAG(bool sortOpLists) : fSortOpLists(sortOpLists) {}
  36. GrDrawingManager::OpListDAG::~OpListDAG() {}
  37. void GrDrawingManager::OpListDAG::gatherIDs(SkSTArray<8, uint32_t, true>* idArray) const {
  38. idArray->reset(fOpLists.count());
  39. for (int i = 0; i < fOpLists.count(); ++i) {
  40. if (fOpLists[i]) {
  41. (*idArray)[i] = fOpLists[i]->uniqueID();
  42. }
  43. }
  44. }
  45. void GrDrawingManager::OpListDAG::reset() {
  46. fOpLists.reset();
  47. }
  48. void GrDrawingManager::OpListDAG::removeOpList(int index) {
  49. if (!fOpLists[index]->unique()) {
  50. // TODO: Eventually this should be guaranteed unique: http://skbug.com/7111
  51. fOpLists[index]->endFlush();
  52. }
  53. fOpLists[index] = nullptr;
  54. }
  55. void GrDrawingManager::OpListDAG::removeOpLists(int startIndex, int stopIndex) {
  56. for (int i = startIndex; i < stopIndex; ++i) {
  57. if (!fOpLists[i]) {
  58. continue;
  59. }
  60. this->removeOpList(i);
  61. }
  62. }
  63. bool GrDrawingManager::OpListDAG::isUsed(GrSurfaceProxy* proxy) const {
  64. for (int i = 0; i < fOpLists.count(); ++i) {
  65. if (fOpLists[i] && fOpLists[i]->isUsed(proxy)) {
  66. return true;
  67. }
  68. }
  69. return false;
  70. }
  71. void GrDrawingManager::OpListDAG::add(sk_sp<GrOpList> opList) {
  72. fOpLists.emplace_back(std::move(opList));
  73. }
  74. void GrDrawingManager::OpListDAG::add(const SkTArray<sk_sp<GrOpList>>& opLists) {
  75. fOpLists.push_back_n(opLists.count(), opLists.begin());
  76. }
  77. void GrDrawingManager::OpListDAG::swap(SkTArray<sk_sp<GrOpList>>* opLists) {
  78. SkASSERT(opLists->empty());
  79. opLists->swap(fOpLists);
  80. }
  81. void GrDrawingManager::OpListDAG::prepForFlush() {
  82. if (fSortOpLists) {
  83. SkDEBUGCODE(bool result =) SkTTopoSort<GrOpList, GrOpList::TopoSortTraits>(&fOpLists);
  84. SkASSERT(result);
  85. }
  86. #ifdef SK_DEBUG
  87. // This block checks for any unnecessary splits in the opLists. If two sequential opLists
  88. // share the same backing GrSurfaceProxy it means the opList was artificially split.
  89. if (fOpLists.count()) {
  90. GrRenderTargetOpList* prevOpList = fOpLists[0]->asRenderTargetOpList();
  91. for (int i = 1; i < fOpLists.count(); ++i) {
  92. GrRenderTargetOpList* curOpList = fOpLists[i]->asRenderTargetOpList();
  93. if (prevOpList && curOpList) {
  94. SkASSERT(prevOpList->fTarget.get() != curOpList->fTarget.get());
  95. }
  96. prevOpList = curOpList;
  97. }
  98. }
  99. #endif
  100. }
  101. void GrDrawingManager::OpListDAG::closeAll(const GrCaps* caps) {
  102. for (int i = 0; i < fOpLists.count(); ++i) {
  103. if (fOpLists[i]) {
  104. fOpLists[i]->makeClosed(*caps);
  105. }
  106. }
  107. }
  108. void GrDrawingManager::OpListDAG::cleanup(const GrCaps* caps) {
  109. for (int i = 0; i < fOpLists.count(); ++i) {
  110. if (!fOpLists[i]) {
  111. continue;
  112. }
  113. // no opList should receive a new command after this
  114. fOpLists[i]->makeClosed(*caps);
  115. // We shouldn't need to do this, but it turns out some clients still hold onto opLists
  116. // after a cleanup.
  117. // MDB TODO: is this still true?
  118. if (!fOpLists[i]->unique()) {
  119. // TODO: Eventually this should be guaranteed unique.
  120. // https://bugs.chromium.org/p/skia/issues/detail?id=7111
  121. fOpLists[i]->endFlush();
  122. }
  123. }
  124. fOpLists.reset();
  125. }
  126. ///////////////////////////////////////////////////////////////////////////////////////////////////
  127. GrDrawingManager::GrDrawingManager(GrRecordingContext* context,
  128. const GrPathRendererChain::Options& optionsForPathRendererChain,
  129. const GrTextContext::Options& optionsForTextContext,
  130. bool sortOpLists,
  131. bool reduceOpListSplitting)
  132. : fContext(context)
  133. , fOptionsForPathRendererChain(optionsForPathRendererChain)
  134. , fOptionsForTextContext(optionsForTextContext)
  135. , fDAG(sortOpLists)
  136. , fTextContext(nullptr)
  137. , fPathRendererChain(nullptr)
  138. , fSoftwarePathRenderer(nullptr)
  139. , fFlushing(false)
  140. , fReduceOpListSplitting(reduceOpListSplitting) {
  141. }
  142. void GrDrawingManager::cleanup() {
  143. fDAG.cleanup(fContext->priv().caps());
  144. fPathRendererChain = nullptr;
  145. fSoftwarePathRenderer = nullptr;
  146. fOnFlushCBObjects.reset();
  147. }
  148. GrDrawingManager::~GrDrawingManager() {
  149. this->cleanup();
  150. }
  151. bool GrDrawingManager::wasAbandoned() const {
  152. return fContext->priv().abandoned();
  153. }
  154. void GrDrawingManager::freeGpuResources() {
  155. for (int i = fOnFlushCBObjects.count() - 1; i >= 0; --i) {
  156. if (!fOnFlushCBObjects[i]->retainOnFreeGpuResources()) {
  157. // it's safe to just do this because we're iterating in reverse
  158. fOnFlushCBObjects.removeShuffle(i);
  159. }
  160. }
  161. // a path renderer may be holding onto resources
  162. fPathRendererChain = nullptr;
  163. fSoftwarePathRenderer = nullptr;
  164. }
  165. // MDB TODO: make use of the 'proxy' parameter.
  166. GrSemaphoresSubmitted GrDrawingManager::flush(GrSurfaceProxy* proxies[], int numProxies,
  167. SkSurface::BackendSurfaceAccess access, const GrFlushInfo& info,
  168. const GrPrepareForExternalIORequests& externalRequests) {
  169. SkASSERT(numProxies >= 0);
  170. SkASSERT(!numProxies || proxies);
  171. GR_CREATE_TRACE_MARKER_CONTEXT("GrDrawingManager", "flush", fContext);
  172. if (fFlushing || this->wasAbandoned()) {
  173. if (info.fFinishedProc) {
  174. info.fFinishedProc(info.fFinishedContext);
  175. }
  176. return GrSemaphoresSubmitted::kNo;
  177. }
  178. SkDEBUGCODE(this->validate());
  179. if (kNone_GrFlushFlags == info.fFlags && !info.fNumSemaphores && !info.fFinishedProc &&
  180. !externalRequests.hasRequests()) {
  181. bool canSkip = numProxies > 0;
  182. for (int i = 0; i < numProxies && canSkip; ++i) {
  183. canSkip = !fDAG.isUsed(proxies[i]) && !this->isDDLTarget(proxies[i]);
  184. }
  185. if (canSkip) {
  186. return GrSemaphoresSubmitted::kNo;
  187. }
  188. }
  189. auto direct = fContext->priv().asDirectContext();
  190. if (!direct) {
  191. if (info.fFinishedProc) {
  192. info.fFinishedProc(info.fFinishedContext);
  193. }
  194. return GrSemaphoresSubmitted::kNo; // Can't flush while DDL recording
  195. }
  196. GrGpu* gpu = direct->priv().getGpu();
  197. if (!gpu) {
  198. if (info.fFinishedProc) {
  199. info.fFinishedProc(info.fFinishedContext);
  200. }
  201. return GrSemaphoresSubmitted::kNo; // Can't flush while DDL recording
  202. }
  203. fFlushing = true;
  204. auto resourceProvider = direct->priv().resourceProvider();
  205. auto resourceCache = direct->priv().getResourceCache();
  206. // Semi-usually the GrOpLists are already closed at this point, but sometimes Ganesh
  207. // needs to flush mid-draw. In that case, the SkGpuDevice's GrOpLists won't be closed
  208. // but need to be flushed anyway. Closing such GrOpLists here will mean new
  209. // GrOpLists will be created to replace them if the SkGpuDevice(s) write to them again.
  210. fDAG.closeAll(fContext->priv().caps());
  211. fActiveOpList = nullptr;
  212. fDAG.prepForFlush();
  213. if (!fCpuBufferCache) {
  214. // We cache more buffers when the backend is using client side arrays. Otherwise, we
  215. // expect each pool will use a CPU buffer as a staging buffer before uploading to a GPU
  216. // buffer object. Each pool only requires one staging buffer at a time.
  217. int maxCachedBuffers = fContext->priv().caps()->preferClientSideDynamicBuffers() ? 2 : 6;
  218. fCpuBufferCache = GrBufferAllocPool::CpuBufferCache::Make(maxCachedBuffers);
  219. }
  220. GrOpFlushState flushState(gpu, resourceProvider, &fTokenTracker, fCpuBufferCache);
  221. GrOnFlushResourceProvider onFlushProvider(this);
  222. // TODO: AFAICT the only reason fFlushState is on GrDrawingManager rather than on the
  223. // stack here is to preserve the flush tokens.
  224. // Prepare any onFlush op lists (e.g. atlases).
  225. if (!fOnFlushCBObjects.empty()) {
  226. fDAG.gatherIDs(&fFlushingOpListIDs);
  227. SkSTArray<4, sk_sp<GrRenderTargetContext>> renderTargetContexts;
  228. for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
  229. onFlushCBObject->preFlush(&onFlushProvider,
  230. fFlushingOpListIDs.begin(), fFlushingOpListIDs.count(),
  231. &renderTargetContexts);
  232. for (const sk_sp<GrRenderTargetContext>& rtc : renderTargetContexts) {
  233. sk_sp<GrRenderTargetOpList> onFlushOpList = sk_ref_sp(rtc->getRTOpList());
  234. if (!onFlushOpList) {
  235. continue; // Odd - but not a big deal
  236. }
  237. #ifdef SK_DEBUG
  238. // OnFlush callbacks are already invoked during flush, and are therefore expected to
  239. // handle resource allocation & usage on their own. (No deferred or lazy proxies!)
  240. onFlushOpList->visitProxies_debugOnly([](GrSurfaceProxy* p, GrMipMapped) {
  241. SkASSERT(!p->asTextureProxy() || !p->asTextureProxy()->texPriv().isDeferred());
  242. SkASSERT(GrSurfaceProxy::LazyState::kNot == p->lazyInstantiationState());
  243. });
  244. #endif
  245. onFlushOpList->makeClosed(*fContext->priv().caps());
  246. onFlushOpList->prepare(&flushState);
  247. fOnFlushCBOpLists.push_back(std::move(onFlushOpList));
  248. }
  249. renderTargetContexts.reset();
  250. }
  251. }
  252. #if 0
  253. // Enable this to print out verbose GrOp information
  254. for (int i = 0; i < fOpLists.count(); ++i) {
  255. SkDEBUGCODE(fOpLists[i]->dump();)
  256. }
  257. #endif
  258. int startIndex, stopIndex;
  259. bool flushed = false;
  260. {
  261. GrResourceAllocator alloc(resourceProvider, flushState.deinstantiateProxyTracker()
  262. SkDEBUGCODE(, fDAG.numOpLists()));
  263. for (int i = 0; i < fDAG.numOpLists(); ++i) {
  264. if (fDAG.opList(i)) {
  265. fDAG.opList(i)->gatherProxyIntervals(&alloc);
  266. }
  267. alloc.markEndOfOpList(i);
  268. }
  269. alloc.determineRecyclability();
  270. GrResourceAllocator::AssignError error = GrResourceAllocator::AssignError::kNoError;
  271. int numOpListsExecuted = 0;
  272. while (alloc.assign(&startIndex, &stopIndex, &error)) {
  273. if (GrResourceAllocator::AssignError::kFailedProxyInstantiation == error) {
  274. for (int i = startIndex; i < stopIndex; ++i) {
  275. if (fDAG.opList(i) && !fDAG.opList(i)->isInstantiated()) {
  276. // If the backing surface wasn't allocated, drop the entire opList.
  277. fDAG.removeOpList(i);
  278. }
  279. if (fDAG.opList(i)) {
  280. fDAG.opList(i)->purgeOpsWithUninstantiatedProxies();
  281. }
  282. }
  283. }
  284. if (this->executeOpLists(startIndex, stopIndex, &flushState, &numOpListsExecuted)) {
  285. flushed = true;
  286. }
  287. }
  288. }
  289. #ifdef SK_DEBUG
  290. for (int i = 0; i < fDAG.numOpLists(); ++i) {
  291. // If there are any remaining opLists at this point, make sure they will not survive the
  292. // flush. Otherwise we need to call endFlush() on them.
  293. // http://skbug.com/7111
  294. SkASSERT(!fDAG.opList(i) || fDAG.opList(i)->unique());
  295. }
  296. #endif
  297. fDAG.reset();
  298. this->clearDDLTargets();
  299. #ifdef SK_DEBUG
  300. // In non-DDL mode this checks that all the flushed ops have been freed from the memory pool.
  301. // When we move to partial flushes this assert will no longer be valid.
  302. // In DDL mode this check is somewhat superfluous since the memory for most of the ops/opLists
  303. // will be stored in the DDL's GrOpMemoryPools.
  304. GrOpMemoryPool* opMemoryPool = fContext->priv().opMemoryPool();
  305. opMemoryPool->isEmpty();
  306. #endif
  307. GrSemaphoresSubmitted result = gpu->finishFlush(proxies, numProxies, access, info,
  308. externalRequests);
  309. flushState.deinstantiateProxyTracker()->deinstantiateAllProxies();
  310. // Give the cache a chance to purge resources that become purgeable due to flushing.
  311. if (flushed) {
  312. resourceCache->purgeAsNeeded();
  313. flushed = false;
  314. }
  315. for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
  316. onFlushCBObject->postFlush(fTokenTracker.nextTokenToFlush(), fFlushingOpListIDs.begin(),
  317. fFlushingOpListIDs.count());
  318. flushed = true;
  319. }
  320. if (flushed) {
  321. resourceCache->purgeAsNeeded();
  322. }
  323. fFlushingOpListIDs.reset();
  324. fFlushing = false;
  325. return result;
  326. }
  327. bool GrDrawingManager::executeOpLists(int startIndex, int stopIndex, GrOpFlushState* flushState,
  328. int* numOpListsExecuted) {
  329. SkASSERT(startIndex <= stopIndex && stopIndex <= fDAG.numOpLists());
  330. #if GR_FLUSH_TIME_OP_SPEW
  331. SkDebugf("Flushing opLists: %d to %d out of [%d, %d]\n",
  332. startIndex, stopIndex, 0, fDAG.numOpLists());
  333. for (int i = startIndex; i < stopIndex; ++i) {
  334. if (fDAG.opList(i)) {
  335. fDAG.opList(i)->dump(true);
  336. }
  337. }
  338. #endif
  339. bool anyOpListsExecuted = false;
  340. for (int i = startIndex; i < stopIndex; ++i) {
  341. if (!fDAG.opList(i)) {
  342. continue;
  343. }
  344. GrOpList* opList = fDAG.opList(i);
  345. SkASSERT(opList->isInstantiated());
  346. SkASSERT(opList->deferredProxiesAreInstantiated());
  347. opList->prepare(flushState);
  348. }
  349. // Upload all data to the GPU
  350. flushState->preExecuteDraws();
  351. // For Vulkan, if we have too many oplists to be flushed we end up allocating a lot of resources
  352. // for each command buffer associated with the oplists. If this gets too large we can cause the
  353. // devices to go OOM. In practice we usually only hit this case in our tests, but to be safe we
  354. // put a cap on the number of oplists we will execute before flushing to the GPU to relieve some
  355. // memory pressure.
  356. static constexpr int kMaxOpListsBeforeFlush = 100;
  357. // Execute the onFlush op lists first, if any.
  358. for (sk_sp<GrOpList>& onFlushOpList : fOnFlushCBOpLists) {
  359. if (!onFlushOpList->execute(flushState)) {
  360. SkDebugf("WARNING: onFlushOpList failed to execute.\n");
  361. }
  362. SkASSERT(onFlushOpList->unique());
  363. onFlushOpList = nullptr;
  364. (*numOpListsExecuted)++;
  365. if (*numOpListsExecuted >= kMaxOpListsBeforeFlush) {
  366. flushState->gpu()->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess,
  367. GrFlushInfo(), GrPrepareForExternalIORequests());
  368. *numOpListsExecuted = 0;
  369. }
  370. }
  371. fOnFlushCBOpLists.reset();
  372. // Execute the normal op lists.
  373. for (int i = startIndex; i < stopIndex; ++i) {
  374. if (!fDAG.opList(i)) {
  375. continue;
  376. }
  377. if (fDAG.opList(i)->execute(flushState)) {
  378. anyOpListsExecuted = true;
  379. }
  380. (*numOpListsExecuted)++;
  381. if (*numOpListsExecuted >= kMaxOpListsBeforeFlush) {
  382. flushState->gpu()->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess,
  383. GrFlushInfo(), GrPrepareForExternalIORequests());
  384. *numOpListsExecuted = 0;
  385. }
  386. }
  387. SkASSERT(!flushState->commandBuffer());
  388. SkASSERT(fTokenTracker.nextDrawToken() == fTokenTracker.nextTokenToFlush());
  389. // We reset the flush state before the OpLists so that the last resources to be freed are those
  390. // that are written to in the OpLists. This helps to make sure the most recently used resources
  391. // are the last to be purged by the resource cache.
  392. flushState->reset();
  393. fDAG.removeOpLists(startIndex, stopIndex);
  394. return anyOpListsExecuted;
  395. }
  396. GrSemaphoresSubmitted GrDrawingManager::flushSurfaces(GrSurfaceProxy* proxies[], int numProxies,
  397. SkSurface::BackendSurfaceAccess access,
  398. const GrFlushInfo& info) {
  399. if (this->wasAbandoned()) {
  400. return GrSemaphoresSubmitted::kNo;
  401. }
  402. SkDEBUGCODE(this->validate());
  403. SkASSERT(numProxies >= 0);
  404. SkASSERT(!numProxies || proxies);
  405. auto direct = fContext->priv().asDirectContext();
  406. if (!direct) {
  407. return GrSemaphoresSubmitted::kNo; // Can't flush while DDL recording
  408. }
  409. GrGpu* gpu = direct->priv().getGpu();
  410. if (!gpu) {
  411. return GrSemaphoresSubmitted::kNo; // Can't flush while DDL recording
  412. }
  413. // TODO: It is important to upgrade the drawingmanager to just flushing the
  414. // portion of the DAG required by 'proxies' in order to restore some of the
  415. // semantics of this method.
  416. GrSemaphoresSubmitted result = this->flush(proxies, numProxies, access, info,
  417. GrPrepareForExternalIORequests());
  418. for (int i = 0; i < numProxies; ++i) {
  419. if (!proxies[i]->isInstantiated()) {
  420. return result;
  421. }
  422. }
  423. for (int i = 0; i < numProxies; ++i) {
  424. GrSurface* surface = proxies[i]->peekSurface();
  425. if (auto* rt = surface->asRenderTarget()) {
  426. gpu->resolveRenderTarget(rt);
  427. }
  428. if (auto* tex = surface->asTexture()) {
  429. if (tex->texturePriv().mipMapped() == GrMipMapped::kYes &&
  430. tex->texturePriv().mipMapsAreDirty()) {
  431. gpu->regenerateMipMapLevels(tex);
  432. }
  433. }
  434. }
  435. SkDEBUGCODE(this->validate());
  436. return result;
  437. }
  438. void GrDrawingManager::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) {
  439. fOnFlushCBObjects.push_back(onFlushCBObject);
  440. }
  441. #if GR_TEST_UTILS
  442. void GrDrawingManager::testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject* cb) {
  443. int n = std::find(fOnFlushCBObjects.begin(), fOnFlushCBObjects.end(), cb) -
  444. fOnFlushCBObjects.begin();
  445. SkASSERT(n < fOnFlushCBObjects.count());
  446. fOnFlushCBObjects.removeShuffle(n);
  447. }
  448. #endif
  449. void GrDrawingManager::moveOpListsToDDL(SkDeferredDisplayList* ddl) {
  450. SkDEBUGCODE(this->validate());
  451. // no opList should receive a new command after this
  452. fDAG.closeAll(fContext->priv().caps());
  453. fActiveOpList = nullptr;
  454. fDAG.swap(&ddl->fOpLists);
  455. if (fPathRendererChain) {
  456. if (auto ccpr = fPathRendererChain->getCoverageCountingPathRenderer()) {
  457. ddl->fPendingPaths = ccpr->detachPendingPaths();
  458. }
  459. }
  460. SkDEBUGCODE(this->validate());
  461. }
  462. void GrDrawingManager::copyOpListsFromDDL(const SkDeferredDisplayList* ddl,
  463. GrRenderTargetProxy* newDest) {
  464. SkDEBUGCODE(this->validate());
  465. if (fActiveOpList) {
  466. // This is a temporary fix for the partial-MDB world. In that world we're not
  467. // reordering so ops that (in the single opList world) would've just glommed onto the
  468. // end of the single opList but referred to a far earlier RT need to appear in their
  469. // own opList.
  470. fActiveOpList->makeClosed(*fContext->priv().caps());
  471. fActiveOpList = nullptr;
  472. }
  473. this->addDDLTarget(newDest);
  474. // Here we jam the proxy that backs the current replay SkSurface into the LazyProxyData.
  475. // The lazy proxy that references it (in the copied opLists) will steal its GrTexture.
  476. ddl->fLazyProxyData->fReplayDest = newDest;
  477. if (ddl->fPendingPaths.size()) {
  478. GrCoverageCountingPathRenderer* ccpr = this->getCoverageCountingPathRenderer();
  479. ccpr->mergePendingPaths(ddl->fPendingPaths);
  480. }
  481. fDAG.add(ddl->fOpLists);
  482. SkDEBUGCODE(this->validate());
  483. }
  484. #ifdef SK_DEBUG
  485. void GrDrawingManager::validate() const {
  486. if (fDAG.sortingOpLists() && fReduceOpListSplitting) {
  487. SkASSERT(!fActiveOpList);
  488. } else {
  489. if (fActiveOpList) {
  490. SkASSERT(!fDAG.empty());
  491. SkASSERT(!fActiveOpList->isClosed());
  492. SkASSERT(fActiveOpList == fDAG.back());
  493. }
  494. for (int i = 0; i < fDAG.numOpLists(); ++i) {
  495. if (fActiveOpList != fDAG.opList(i)) {
  496. SkASSERT(fDAG.opList(i)->isClosed());
  497. }
  498. }
  499. if (!fDAG.empty() && !fDAG.back()->isClosed()) {
  500. SkASSERT(fActiveOpList == fDAG.back());
  501. }
  502. }
  503. }
  504. #endif
  505. sk_sp<GrRenderTargetOpList> GrDrawingManager::newRTOpList(sk_sp<GrRenderTargetProxy> rtp,
  506. bool managedOpList) {
  507. SkDEBUGCODE(this->validate());
  508. SkASSERT(fContext);
  509. if (fDAG.sortingOpLists() && fReduceOpListSplitting) {
  510. // In this case we need to close all the opLists that rely on the current contents of
  511. // 'rtp'. That is bc we're going to update the content of the proxy so they need to be
  512. // split in case they use both the old and new content. (This is a bit of an overkill:
  513. // they really only need to be split if they ever reference proxy's contents again but
  514. // that is hard to predict/handle).
  515. if (GrOpList* lastOpList = rtp->getLastOpList()) {
  516. lastOpList->closeThoseWhoDependOnMe(*fContext->priv().caps());
  517. }
  518. } else if (fActiveOpList) {
  519. // This is a temporary fix for the partial-MDB world. In that world we're not
  520. // reordering so ops that (in the single opList world) would've just glommed onto the
  521. // end of the single opList but referred to a far earlier RT need to appear in their
  522. // own opList.
  523. fActiveOpList->makeClosed(*fContext->priv().caps());
  524. fActiveOpList = nullptr;
  525. }
  526. sk_sp<GrRenderTargetOpList> opList(new GrRenderTargetOpList(
  527. fContext->priv().refOpMemoryPool(),
  528. rtp,
  529. fContext->priv().auditTrail()));
  530. SkASSERT(rtp->getLastOpList() == opList.get());
  531. if (managedOpList) {
  532. fDAG.add(opList);
  533. if (!fDAG.sortingOpLists() || !fReduceOpListSplitting) {
  534. fActiveOpList = opList.get();
  535. }
  536. }
  537. SkDEBUGCODE(this->validate());
  538. return opList;
  539. }
  540. sk_sp<GrTextureOpList> GrDrawingManager::newTextureOpList(sk_sp<GrTextureProxy> textureProxy) {
  541. SkDEBUGCODE(this->validate());
  542. SkASSERT(fContext);
  543. if (fDAG.sortingOpLists() && fReduceOpListSplitting) {
  544. // In this case we need to close all the opLists that rely on the current contents of
  545. // 'texture'. That is bc we're going to update the content of the proxy so they need to
  546. // be split in case they use both the old and new content. (This is a bit of an
  547. // overkill: they really only need to be split if they ever reference proxy's contents
  548. // again but that is hard to predict/handle).
  549. if (GrOpList* lastOpList = textureProxy->getLastOpList()) {
  550. lastOpList->closeThoseWhoDependOnMe(*fContext->priv().caps());
  551. }
  552. } else if (fActiveOpList) {
  553. // This is a temporary fix for the partial-MDB world. In that world we're not
  554. // reordering so ops that (in the single opList world) would've just glommed onto the
  555. // end of the single opList but referred to a far earlier RT need to appear in their
  556. // own opList.
  557. fActiveOpList->makeClosed(*fContext->priv().caps());
  558. fActiveOpList = nullptr;
  559. }
  560. sk_sp<GrTextureOpList> opList(new GrTextureOpList(fContext->priv().refOpMemoryPool(),
  561. textureProxy,
  562. fContext->priv().auditTrail()));
  563. SkASSERT(textureProxy->getLastOpList() == opList.get());
  564. fDAG.add(opList);
  565. if (!fDAG.sortingOpLists() || !fReduceOpListSplitting) {
  566. fActiveOpList = opList.get();
  567. }
  568. SkDEBUGCODE(this->validate());
  569. return opList;
  570. }
  571. GrTextContext* GrDrawingManager::getTextContext() {
  572. if (!fTextContext) {
  573. fTextContext = GrTextContext::Make(fOptionsForTextContext);
  574. }
  575. return fTextContext.get();
  576. }
  577. /*
  578. * This method finds a path renderer that can draw the specified path on
  579. * the provided target.
  580. * Due to its expense, the software path renderer has split out so it can
  581. * can be individually allowed/disallowed via the "allowSW" boolean.
  582. */
  583. GrPathRenderer* GrDrawingManager::getPathRenderer(const GrPathRenderer::CanDrawPathArgs& args,
  584. bool allowSW,
  585. GrPathRendererChain::DrawType drawType,
  586. GrPathRenderer::StencilSupport* stencilSupport) {
  587. if (!fPathRendererChain) {
  588. fPathRendererChain.reset(new GrPathRendererChain(fContext, fOptionsForPathRendererChain));
  589. }
  590. GrPathRenderer* pr = fPathRendererChain->getPathRenderer(args, drawType, stencilSupport);
  591. if (!pr && allowSW) {
  592. auto swPR = this->getSoftwarePathRenderer();
  593. if (GrPathRenderer::CanDrawPath::kNo != swPR->canDrawPath(args)) {
  594. pr = swPR;
  595. }
  596. }
  597. return pr;
  598. }
  599. GrPathRenderer* GrDrawingManager::getSoftwarePathRenderer() {
  600. if (!fSoftwarePathRenderer) {
  601. fSoftwarePathRenderer.reset(
  602. new GrSoftwarePathRenderer(fContext->priv().proxyProvider(),
  603. fOptionsForPathRendererChain.fAllowPathMaskCaching));
  604. }
  605. return fSoftwarePathRenderer.get();
  606. }
  607. GrCoverageCountingPathRenderer* GrDrawingManager::getCoverageCountingPathRenderer() {
  608. if (!fPathRendererChain) {
  609. fPathRendererChain.reset(new GrPathRendererChain(fContext, fOptionsForPathRendererChain));
  610. }
  611. return fPathRendererChain->getCoverageCountingPathRenderer();
  612. }
  613. void GrDrawingManager::flushIfNecessary() {
  614. auto direct = fContext->priv().asDirectContext();
  615. if (!direct) {
  616. return;
  617. }
  618. auto resourceCache = direct->priv().getResourceCache();
  619. if (resourceCache && resourceCache->requestsFlush()) {
  620. this->flush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, GrFlushInfo(),
  621. GrPrepareForExternalIORequests());
  622. resourceCache->purgeAsNeeded();
  623. }
  624. }
  625. sk_sp<GrRenderTargetContext> GrDrawingManager::makeRenderTargetContext(
  626. sk_sp<GrSurfaceProxy> sProxy,
  627. GrColorType colorType,
  628. sk_sp<SkColorSpace> colorSpace,
  629. const SkSurfaceProps* surfaceProps,
  630. bool managedOpList) {
  631. if (this->wasAbandoned() || !sProxy->asRenderTargetProxy()) {
  632. return nullptr;
  633. }
  634. // SkSurface catches bad color space usage at creation. This check handles anything that slips
  635. // by, including internal usage.
  636. if (!SkSurface_Gpu::Valid(fContext->priv().caps(), sProxy->backendFormat())) {
  637. SkDEBUGFAIL("Invalid config and colorspace combination");
  638. return nullptr;
  639. }
  640. sk_sp<GrRenderTargetProxy> renderTargetProxy(sk_ref_sp(sProxy->asRenderTargetProxy()));
  641. return sk_sp<GrRenderTargetContext>(new GrRenderTargetContext(fContext,
  642. std::move(renderTargetProxy),
  643. colorType,
  644. std::move(colorSpace),
  645. surfaceProps,
  646. managedOpList));
  647. }
  648. sk_sp<GrTextureContext> GrDrawingManager::makeTextureContext(sk_sp<GrSurfaceProxy> sProxy,
  649. GrColorType colorType,
  650. SkAlphaType alphaType,
  651. sk_sp<SkColorSpace> colorSpace) {
  652. if (this->wasAbandoned() || !sProxy->asTextureProxy()) {
  653. return nullptr;
  654. }
  655. // SkSurface catches bad color space usage at creation. This check handles anything that slips
  656. // by, including internal usage.
  657. if (!SkSurface_Gpu::Valid(fContext->priv().caps(), sProxy->backendFormat())) {
  658. SkDEBUGFAIL("Invalid config and colorspace combination");
  659. return nullptr;
  660. }
  661. // GrTextureRenderTargets should always be using a GrRenderTargetContext
  662. SkASSERT(!sProxy->asRenderTargetProxy());
  663. sk_sp<GrTextureProxy> textureProxy(sk_ref_sp(sProxy->asTextureProxy()));
  664. return sk_sp<GrTextureContext>(new GrTextureContext(fContext,
  665. std::move(textureProxy),
  666. colorType,
  667. alphaType,
  668. std::move(colorSpace)));
  669. }