GrRenderTargetOpList.cpp 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752
  1. /*
  2. * Copyright 2010 Google Inc.
  3. *
  4. * Use of this source code is governed by a BSD-style license that can be
  5. * found in the LICENSE file.
  6. */
  7. #include "include/private/GrRecordingContext.h"
  8. #include "src/core/SkExchange.h"
  9. #include "src/core/SkRectPriv.h"
  10. #include "src/core/SkTraceEvent.h"
  11. #include "src/gpu/GrAuditTrail.h"
  12. #include "src/gpu/GrCaps.h"
  13. #include "src/gpu/GrGpu.h"
  14. #include "src/gpu/GrGpuCommandBuffer.h"
  15. #include "src/gpu/GrMemoryPool.h"
  16. #include "src/gpu/GrRecordingContextPriv.h"
  17. #include "src/gpu/GrRenderTargetContext.h"
  18. #include "src/gpu/GrRenderTargetOpList.h"
  19. #include "src/gpu/GrResourceAllocator.h"
  20. #include "src/gpu/geometry/GrRect.h"
  21. #include "src/gpu/ops/GrClearOp.h"
  22. #include "src/gpu/ops/GrCopySurfaceOp.h"
  23. ////////////////////////////////////////////////////////////////////////////////
  24. // Experimentally we have found that most combining occurs within the first 10 comparisons.
  25. static const int kMaxOpMergeDistance = 10;
  26. static const int kMaxOpChainDistance = 10;
  27. ////////////////////////////////////////////////////////////////////////////////
  28. using DstProxy = GrXferProcessor::DstProxy;
  29. ////////////////////////////////////////////////////////////////////////////////
  30. static inline bool can_reorder(const SkRect& a, const SkRect& b) { return !GrRectsOverlap(a, b); }
  31. ////////////////////////////////////////////////////////////////////////////////
  32. inline GrRenderTargetOpList::OpChain::List::List(std::unique_ptr<GrOp> op)
  33. : fHead(std::move(op)), fTail(fHead.get()) {
  34. this->validate();
  35. }
  36. inline GrRenderTargetOpList::OpChain::List::List(List&& that) { *this = std::move(that); }
  37. inline GrRenderTargetOpList::OpChain::List& GrRenderTargetOpList::OpChain::List::operator=(
  38. List&& that) {
  39. fHead = std::move(that.fHead);
  40. fTail = that.fTail;
  41. that.fTail = nullptr;
  42. this->validate();
  43. return *this;
  44. }
  45. inline std::unique_ptr<GrOp> GrRenderTargetOpList::OpChain::List::popHead() {
  46. SkASSERT(fHead);
  47. auto temp = fHead->cutChain();
  48. std::swap(temp, fHead);
  49. if (!fHead) {
  50. SkASSERT(fTail == temp.get());
  51. fTail = nullptr;
  52. }
  53. return temp;
  54. }
  55. inline std::unique_ptr<GrOp> GrRenderTargetOpList::OpChain::List::removeOp(GrOp* op) {
  56. #ifdef SK_DEBUG
  57. auto head = op;
  58. while (head->prevInChain()) { head = head->prevInChain(); }
  59. SkASSERT(head == fHead.get());
  60. #endif
  61. auto prev = op->prevInChain();
  62. if (!prev) {
  63. SkASSERT(op == fHead.get());
  64. return this->popHead();
  65. }
  66. auto temp = prev->cutChain();
  67. if (auto next = temp->cutChain()) {
  68. prev->chainConcat(std::move(next));
  69. } else {
  70. SkASSERT(fTail == op);
  71. fTail = prev;
  72. }
  73. this->validate();
  74. return temp;
  75. }
  76. inline void GrRenderTargetOpList::OpChain::List::pushHead(std::unique_ptr<GrOp> op) {
  77. SkASSERT(op);
  78. SkASSERT(op->isChainHead());
  79. SkASSERT(op->isChainTail());
  80. if (fHead) {
  81. op->chainConcat(std::move(fHead));
  82. fHead = std::move(op);
  83. } else {
  84. fHead = std::move(op);
  85. fTail = fHead.get();
  86. }
  87. }
  88. inline void GrRenderTargetOpList::OpChain::List::pushTail(std::unique_ptr<GrOp> op) {
  89. SkASSERT(op->isChainTail());
  90. fTail->chainConcat(std::move(op));
  91. fTail = fTail->nextInChain();
  92. }
  93. inline void GrRenderTargetOpList::OpChain::List::validate() const {
  94. #ifdef SK_DEBUG
  95. if (fHead) {
  96. SkASSERT(fTail);
  97. fHead->validateChain(fTail);
  98. }
  99. #endif
  100. }
  101. ////////////////////////////////////////////////////////////////////////////////
  102. GrRenderTargetOpList::OpChain::OpChain(std::unique_ptr<GrOp> op,
  103. GrProcessorSet::Analysis processorAnalysis,
  104. GrAppliedClip* appliedClip, const DstProxy* dstProxy)
  105. : fList{std::move(op)}
  106. , fProcessorAnalysis(processorAnalysis)
  107. , fAppliedClip(appliedClip) {
  108. if (fProcessorAnalysis.requiresDstTexture()) {
  109. SkASSERT(dstProxy && dstProxy->proxy());
  110. fDstProxy = *dstProxy;
  111. }
  112. fBounds = fList.head()->bounds();
  113. }
  114. void GrRenderTargetOpList::OpChain::visitProxies(const GrOp::VisitProxyFunc& func) const {
  115. if (fList.empty()) {
  116. return;
  117. }
  118. for (const auto& op : GrOp::ChainRange<>(fList.head())) {
  119. op.visitProxies(func);
  120. }
  121. if (fDstProxy.proxy()) {
  122. func(fDstProxy.proxy(), GrMipMapped::kNo);
  123. }
  124. if (fAppliedClip) {
  125. fAppliedClip->visitProxies(func);
  126. }
  127. }
  128. void GrRenderTargetOpList::OpChain::deleteOps(GrOpMemoryPool* pool) {
  129. while (!fList.empty()) {
  130. pool->release(fList.popHead());
  131. }
  132. }
  133. // Concatenates two op chains and attempts to merge ops across the chains. Assumes that we know that
  134. // the two chains are chainable. Returns the new chain.
  135. GrRenderTargetOpList::OpChain::List GrRenderTargetOpList::OpChain::DoConcat(
  136. List chainA, List chainB, const GrCaps& caps, GrOpMemoryPool* pool,
  137. GrAuditTrail* auditTrail) {
  138. // We process ops in chain b from head to tail. We attempt to merge with nodes in a, starting
  139. // at chain a's tail and working toward the head. We produce one of the following outcomes:
  140. // 1) b's head is merged into an op in a.
  141. // 2) An op from chain a is merged into b's head. (In this case b's head gets processed again.)
  142. // 3) b's head is popped from chain a and added at the tail of a.
  143. // After result 3 we don't want to attempt to merge the next head of b with the new tail of a,
  144. // as we assume merges were already attempted when chain b was created. So we keep track of the
  145. // original tail of a and start our iteration of a there. We also track the bounds of the nodes
  146. // appended to chain a that will be skipped for bounds testing. If the original tail of a is
  147. // merged into an op in b (case 2) then we advance the "original tail" towards the head of a.
  148. GrOp* origATail = chainA.tail();
  149. SkRect skipBounds = SkRectPriv::MakeLargestInverted();
  150. do {
  151. int numMergeChecks = 0;
  152. bool merged = false;
  153. bool noSkip = (origATail == chainA.tail());
  154. SkASSERT(noSkip == (skipBounds == SkRectPriv::MakeLargestInverted()));
  155. bool canBackwardMerge = noSkip || can_reorder(chainB.head()->bounds(), skipBounds);
  156. SkRect forwardMergeBounds = skipBounds;
  157. GrOp* a = origATail;
  158. while (a) {
  159. bool canForwardMerge =
  160. (a == chainA.tail()) || can_reorder(a->bounds(), forwardMergeBounds);
  161. if (canForwardMerge || canBackwardMerge) {
  162. auto result = a->combineIfPossible(chainB.head(), caps);
  163. SkASSERT(result != GrOp::CombineResult::kCannotCombine);
  164. merged = (result == GrOp::CombineResult::kMerged);
  165. GrOP_INFO("\t\t: (%s opID: %u) -> Combining with (%s, opID: %u)\n",
  166. chainB.head()->name(), chainB.head()->uniqueID(), a->name(),
  167. a->uniqueID());
  168. }
  169. if (merged) {
  170. GR_AUDIT_TRAIL_OPS_RESULT_COMBINED(auditTrail, a, chainB.head());
  171. if (canBackwardMerge) {
  172. pool->release(chainB.popHead());
  173. } else {
  174. // We merged the contents of b's head into a. We will replace b's head with a in
  175. // chain b.
  176. SkASSERT(canForwardMerge);
  177. if (a == origATail) {
  178. origATail = a->prevInChain();
  179. }
  180. std::unique_ptr<GrOp> detachedA = chainA.removeOp(a);
  181. pool->release(chainB.popHead());
  182. chainB.pushHead(std::move(detachedA));
  183. if (chainA.empty()) {
  184. // We merged all the nodes in chain a to chain b.
  185. return chainB;
  186. }
  187. }
  188. break;
  189. } else {
  190. if (++numMergeChecks == kMaxOpMergeDistance) {
  191. break;
  192. }
  193. forwardMergeBounds.joinNonEmptyArg(a->bounds());
  194. canBackwardMerge =
  195. canBackwardMerge && can_reorder(chainB.head()->bounds(), a->bounds());
  196. a = a->prevInChain();
  197. }
  198. }
  199. // If we weren't able to merge b's head then pop b's head from chain b and make it the new
  200. // tail of a.
  201. if (!merged) {
  202. chainA.pushTail(chainB.popHead());
  203. skipBounds.joinNonEmptyArg(chainA.tail()->bounds());
  204. }
  205. } while (!chainB.empty());
  206. return chainA;
  207. }
  208. // Attempts to concatenate the given chain onto our own and merge ops across the chains. Returns
  209. // whether the operation succeeded. On success, the provided list will be returned empty.
  210. bool GrRenderTargetOpList::OpChain::tryConcat(
  211. List* list, GrProcessorSet::Analysis processorAnalysis, const DstProxy& dstProxy,
  212. const GrAppliedClip* appliedClip, const SkRect& bounds, const GrCaps& caps,
  213. GrOpMemoryPool* pool, GrAuditTrail* auditTrail) {
  214. SkASSERT(!fList.empty());
  215. SkASSERT(!list->empty());
  216. SkASSERT(fProcessorAnalysis.requiresDstTexture() == SkToBool(fDstProxy.proxy()));
  217. SkASSERT(processorAnalysis.requiresDstTexture() == SkToBool(dstProxy.proxy()));
  218. // All returns use explicit tuple constructor rather than {a, b} to work around old GCC bug.
  219. if (fList.head()->classID() != list->head()->classID() ||
  220. SkToBool(fAppliedClip) != SkToBool(appliedClip) ||
  221. (fAppliedClip && *fAppliedClip != *appliedClip) ||
  222. (fProcessorAnalysis.requiresNonOverlappingDraws() !=
  223. processorAnalysis.requiresNonOverlappingDraws()) ||
  224. (fProcessorAnalysis.requiresNonOverlappingDraws() &&
  225. // Non-overlaping draws are only required when Ganesh will either insert a barrier,
  226. // or read back a new dst texture between draws. In either case, we can neither
  227. // chain nor combine overlapping Ops.
  228. GrRectsTouchOrOverlap(fBounds, bounds)) ||
  229. (fProcessorAnalysis.requiresDstTexture() != processorAnalysis.requiresDstTexture()) ||
  230. (fProcessorAnalysis.requiresDstTexture() && fDstProxy != dstProxy)) {
  231. return false;
  232. }
  233. SkDEBUGCODE(bool first = true;)
  234. do {
  235. switch (fList.tail()->combineIfPossible(list->head(), caps)) {
  236. case GrOp::CombineResult::kCannotCombine:
  237. // If an op supports chaining then it is required that chaining is transitive and
  238. // that if any two ops in two different chains can merge then the two chains
  239. // may also be chained together. Thus, we should only hit this on the first
  240. // iteration.
  241. SkASSERT(first);
  242. return false;
  243. case GrOp::CombineResult::kMayChain:
  244. fList = DoConcat(std::move(fList), skstd::exchange(*list, List()), caps, pool,
  245. auditTrail);
  246. // The above exchange cleared out 'list'. The list needs to be empty now for the
  247. // loop to terminate.
  248. SkASSERT(list->empty());
  249. break;
  250. case GrOp::CombineResult::kMerged: {
  251. GrOP_INFO("\t\t: (%s opID: %u) -> Combining with (%s, opID: %u)\n",
  252. list->tail()->name(), list->tail()->uniqueID(), list->head()->name(),
  253. list->head()->uniqueID());
  254. GR_AUDIT_TRAIL_OPS_RESULT_COMBINED(auditTrail, fList.tail(), list->head());
  255. pool->release(list->popHead());
  256. break;
  257. }
  258. }
  259. SkDEBUGCODE(first = false);
  260. } while (!list->empty());
  261. // The new ops were successfully merged and/or chained onto our own.
  262. fBounds.joinPossiblyEmptyRect(bounds);
  263. return true;
  264. }
  265. bool GrRenderTargetOpList::OpChain::prependChain(OpChain* that, const GrCaps& caps,
  266. GrOpMemoryPool* pool, GrAuditTrail* auditTrail) {
  267. if (!that->tryConcat(
  268. &fList, fProcessorAnalysis, fDstProxy, fAppliedClip, fBounds, caps, pool, auditTrail)) {
  269. this->validate();
  270. // append failed
  271. return false;
  272. }
  273. // 'that' owns the combined chain. Move it into 'this'.
  274. SkASSERT(fList.empty());
  275. fList = std::move(that->fList);
  276. fBounds = that->fBounds;
  277. that->fDstProxy.setProxy(nullptr);
  278. if (that->fAppliedClip) {
  279. for (int i = 0; i < that->fAppliedClip->numClipCoverageFragmentProcessors(); ++i) {
  280. that->fAppliedClip->detachClipCoverageFragmentProcessor(i);
  281. }
  282. }
  283. this->validate();
  284. return true;
  285. }
  286. std::unique_ptr<GrOp> GrRenderTargetOpList::OpChain::appendOp(
  287. std::unique_ptr<GrOp> op, GrProcessorSet::Analysis processorAnalysis,
  288. const DstProxy* dstProxy, const GrAppliedClip* appliedClip, const GrCaps& caps,
  289. GrOpMemoryPool* pool, GrAuditTrail* auditTrail) {
  290. const GrXferProcessor::DstProxy noDstProxy;
  291. if (!dstProxy) {
  292. dstProxy = &noDstProxy;
  293. }
  294. SkASSERT(op->isChainHead() && op->isChainTail());
  295. SkRect opBounds = op->bounds();
  296. List chain(std::move(op));
  297. if (!this->tryConcat(
  298. &chain, processorAnalysis, *dstProxy, appliedClip, opBounds, caps, pool, auditTrail)) {
  299. // append failed, give the op back to the caller.
  300. this->validate();
  301. return chain.popHead();
  302. }
  303. SkASSERT(chain.empty());
  304. this->validate();
  305. return nullptr;
  306. }
  307. inline void GrRenderTargetOpList::OpChain::validate() const {
  308. #ifdef SK_DEBUG
  309. fList.validate();
  310. for (const auto& op : GrOp::ChainRange<>(fList.head())) {
  311. // Not using SkRect::contains because we allow empty rects.
  312. SkASSERT(fBounds.fLeft <= op.bounds().fLeft && fBounds.fTop <= op.bounds().fTop &&
  313. fBounds.fRight >= op.bounds().fRight && fBounds.fBottom >= op.bounds().fBottom);
  314. }
  315. #endif
  316. }
  317. ////////////////////////////////////////////////////////////////////////////////
  318. GrRenderTargetOpList::GrRenderTargetOpList(sk_sp<GrOpMemoryPool> opMemoryPool,
  319. sk_sp<GrRenderTargetProxy> proxy,
  320. GrAuditTrail* auditTrail)
  321. : INHERITED(std::move(opMemoryPool), std::move(proxy), auditTrail)
  322. , fLastClipStackGenID(SK_InvalidUniqueID)
  323. SkDEBUGCODE(, fNumClips(0)) {
  324. }
  325. void GrRenderTargetOpList::deleteOps() {
  326. for (auto& chain : fOpChains) {
  327. chain.deleteOps(fOpMemoryPool.get());
  328. }
  329. fOpChains.reset();
  330. }
  331. GrRenderTargetOpList::~GrRenderTargetOpList() {
  332. this->deleteOps();
  333. }
  334. ////////////////////////////////////////////////////////////////////////////////
  335. #ifdef SK_DEBUG
  336. void GrRenderTargetOpList::dump(bool printDependencies) const {
  337. INHERITED::dump(printDependencies);
  338. SkDebugf("ops (%d):\n", fOpChains.count());
  339. for (int i = 0; i < fOpChains.count(); ++i) {
  340. SkDebugf("*******************************\n");
  341. if (!fOpChains[i].head()) {
  342. SkDebugf("%d: <combined forward or failed instantiation>\n", i);
  343. } else {
  344. SkDebugf("%d: %s\n", i, fOpChains[i].head()->name());
  345. SkRect bounds = fOpChains[i].bounds();
  346. SkDebugf("ClippedBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n", bounds.fLeft,
  347. bounds.fTop, bounds.fRight, bounds.fBottom);
  348. for (const auto& op : GrOp::ChainRange<>(fOpChains[i].head())) {
  349. SkString info = SkTabString(op.dumpInfo(), 1);
  350. SkDebugf("%s\n", info.c_str());
  351. bounds = op.bounds();
  352. SkDebugf("\tClippedBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n", bounds.fLeft,
  353. bounds.fTop, bounds.fRight, bounds.fBottom);
  354. }
  355. }
  356. }
  357. }
  358. void GrRenderTargetOpList::visitProxies_debugOnly(const GrOp::VisitProxyFunc& func) const {
  359. for (const OpChain& chain : fOpChains) {
  360. chain.visitProxies(func);
  361. }
  362. }
  363. #endif
  364. void GrRenderTargetOpList::onPrepare(GrOpFlushState* flushState) {
  365. SkASSERT(fTarget->peekRenderTarget());
  366. SkASSERT(this->isClosed());
  367. #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
  368. TRACE_EVENT0("skia.gpu", TRACE_FUNC);
  369. #endif
  370. // Loop over the ops that haven't yet been prepared.
  371. for (const auto& chain : fOpChains) {
  372. if (chain.head()) {
  373. #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
  374. TRACE_EVENT0("skia.gpu", chain.head()->name());
  375. #endif
  376. GrOpFlushState::OpArgs opArgs = {
  377. chain.head(),
  378. fTarget->asRenderTargetProxy(),
  379. chain.appliedClip(),
  380. fTarget.get()->asRenderTargetProxy()->outputSwizzle(),
  381. chain.dstProxy()
  382. };
  383. flushState->setOpArgs(&opArgs);
  384. chain.head()->prepare(flushState);
  385. flushState->setOpArgs(nullptr);
  386. }
  387. }
  388. }
  389. static GrGpuRTCommandBuffer* create_command_buffer(GrGpu* gpu,
  390. GrRenderTarget* rt,
  391. GrSurfaceOrigin origin,
  392. const SkRect& bounds,
  393. GrLoadOp colorLoadOp,
  394. const SkPMColor4f& loadClearColor,
  395. GrLoadOp stencilLoadOp) {
  396. const GrGpuRTCommandBuffer::LoadAndStoreInfo kColorLoadStoreInfo {
  397. colorLoadOp,
  398. GrStoreOp::kStore,
  399. loadClearColor
  400. };
  401. // TODO:
  402. // We would like to (at this level) only ever clear & discard. We would need
  403. // to stop splitting up higher level opLists for copyOps to achieve that.
  404. // Note: we would still need SB loads and stores but they would happen at a
  405. // lower level (inside the VK command buffer).
  406. const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo stencilLoadAndStoreInfo {
  407. stencilLoadOp,
  408. GrStoreOp::kStore,
  409. };
  410. return gpu->getCommandBuffer(rt, origin, bounds, kColorLoadStoreInfo, stencilLoadAndStoreInfo);
  411. }
  412. // TODO: this is where GrOp::renderTarget is used (which is fine since it
  413. // is at flush time). However, we need to store the RenderTargetProxy in the
  414. // Ops and instantiate them here.
  415. bool GrRenderTargetOpList::onExecute(GrOpFlushState* flushState) {
  416. // TODO: Forcing the execution of the discard here isn't ideal since it will cause us to do a
  417. // discard and then store the data back in memory so that the load op on future draws doesn't
  418. // think the memory is unitialized. Ideally we would want a system where we are tracking whether
  419. // the proxy itself has valid data or not, and then use that as a signal on whether we should be
  420. // loading or discarding. In that world we wouldni;t need to worry about executing oplists with
  421. // no ops just to do a discard.
  422. if (fOpChains.empty() && GrLoadOp::kClear != fColorLoadOp &&
  423. GrLoadOp::kDiscard != fColorLoadOp) {
  424. return false;
  425. }
  426. SkASSERT(fTarget->peekRenderTarget());
  427. TRACE_EVENT0("skia.gpu", TRACE_FUNC);
  428. // TODO: at the very least, we want the stencil store op to always be discard (at this
  429. // level). In Vulkan, sub-command buffers would still need to load & store the stencil buffer.
  430. // Make sure load ops are not kClear if the GPU needs to use draws for clears
  431. SkASSERT(fColorLoadOp != GrLoadOp::kClear ||
  432. !flushState->gpu()->caps()->performColorClearsAsDraws());
  433. SkASSERT(fStencilLoadOp != GrLoadOp::kClear ||
  434. !flushState->gpu()->caps()->performStencilClearsAsDraws());
  435. GrGpuRTCommandBuffer* commandBuffer = create_command_buffer(
  436. flushState->gpu(),
  437. fTarget->peekRenderTarget(),
  438. fTarget->origin(),
  439. fTarget->getBoundsRect(),
  440. fColorLoadOp,
  441. fLoadClearColor,
  442. fStencilLoadOp);
  443. flushState->setCommandBuffer(commandBuffer);
  444. commandBuffer->begin();
  445. // Draw all the generated geometry.
  446. for (const auto& chain : fOpChains) {
  447. if (!chain.head()) {
  448. continue;
  449. }
  450. #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
  451. TRACE_EVENT0("skia.gpu", chain.head()->name());
  452. #endif
  453. GrOpFlushState::OpArgs opArgs {
  454. chain.head(),
  455. fTarget->asRenderTargetProxy(),
  456. chain.appliedClip(),
  457. fTarget.get()->asRenderTargetProxy()->outputSwizzle(),
  458. chain.dstProxy()
  459. };
  460. flushState->setOpArgs(&opArgs);
  461. chain.head()->execute(flushState, chain.bounds());
  462. flushState->setOpArgs(nullptr);
  463. }
  464. commandBuffer->end();
  465. flushState->gpu()->submit(commandBuffer);
  466. flushState->setCommandBuffer(nullptr);
  467. return true;
  468. }
  469. void GrRenderTargetOpList::endFlush() {
  470. fLastClipStackGenID = SK_InvalidUniqueID;
  471. this->deleteOps();
  472. fClipAllocator.reset();
  473. INHERITED::endFlush();
  474. }
  475. void GrRenderTargetOpList::discard() {
  476. // Discard calls to in-progress opLists are ignored. Calls at the start update the
  477. // opLists' color & stencil load ops.
  478. if (this->isEmpty()) {
  479. fColorLoadOp = GrLoadOp::kDiscard;
  480. fStencilLoadOp = GrLoadOp::kDiscard;
  481. }
  482. }
  483. void GrRenderTargetOpList::setColorLoadOp(GrLoadOp op, const SkPMColor4f& color) {
  484. fColorLoadOp = op;
  485. fLoadClearColor = color;
  486. }
  487. bool GrRenderTargetOpList::resetForFullscreenClear(CanDiscardPreviousOps canDiscardPreviousOps) {
  488. // Mark the color load op as discard (this may be followed by a clearColorOnLoad call to make
  489. // the load op kClear, or it may be followed by an explicit op). In the event of an absClear()
  490. // after a regular clear(), we could end up with a clear load op and a real clear op in the list
  491. // if the load op were not reset here.
  492. fColorLoadOp = GrLoadOp::kDiscard;
  493. // If we previously recorded a wait op, we cannot delete the wait op. Until we track the wait
  494. // ops separately from normal ops, we have to avoid clearing out any ops in this case as well.
  495. if (fHasWaitOp) {
  496. canDiscardPreviousOps = CanDiscardPreviousOps::kNo;
  497. }
  498. if (CanDiscardPreviousOps::kYes == canDiscardPreviousOps || this->isEmpty()) {
  499. this->deleteOps();
  500. fDeferredProxies.reset();
  501. // If the opList is using a render target which wraps a vulkan command buffer, we can't do a
  502. // clear load since we cannot change the render pass that we are using. Thus we fall back to
  503. // making a clear op in this case.
  504. return !fTarget->asRenderTargetProxy()->wrapsVkSecondaryCB();
  505. }
  506. // Could not empty the list, so an op must be added to handle the clear
  507. return false;
  508. }
  509. ////////////////////////////////////////////////////////////////////////////////
  510. // This closely parallels GrTextureOpList::copySurface but renderTargetOpLists
  511. // also store the applied clip and dest proxy with the op
  512. bool GrRenderTargetOpList::copySurface(GrRecordingContext* context,
  513. GrSurfaceProxy* dst,
  514. GrSurfaceProxy* src,
  515. const SkIRect& srcRect,
  516. const SkIPoint& dstPoint) {
  517. SkASSERT(dst->asRenderTargetProxy() == fTarget.get());
  518. std::unique_ptr<GrOp> op = GrCopySurfaceOp::Make(context, dst, src, srcRect, dstPoint);
  519. if (!op) {
  520. return false;
  521. }
  522. this->addOp(std::move(op), *context->priv().caps());
  523. return true;
  524. }
  525. void GrRenderTargetOpList::purgeOpsWithUninstantiatedProxies() {
  526. bool hasUninstantiatedProxy = false;
  527. auto checkInstantiation = [&hasUninstantiatedProxy](GrSurfaceProxy* p, GrMipMapped) {
  528. if (!p->isInstantiated()) {
  529. hasUninstantiatedProxy = true;
  530. }
  531. };
  532. for (OpChain& recordedOp : fOpChains) {
  533. hasUninstantiatedProxy = false;
  534. recordedOp.visitProxies(checkInstantiation);
  535. if (hasUninstantiatedProxy) {
  536. // When instantiation of the proxy fails we drop the Op
  537. recordedOp.deleteOps(fOpMemoryPool.get());
  538. }
  539. }
  540. }
  541. bool GrRenderTargetOpList::onIsUsed(GrSurfaceProxy* proxyToCheck) const {
  542. bool used = false;
  543. auto visit = [ proxyToCheck, &used ] (GrSurfaceProxy* p, GrMipMapped) {
  544. if (p == proxyToCheck) {
  545. used = true;
  546. }
  547. };
  548. for (const OpChain& recordedOp : fOpChains) {
  549. recordedOp.visitProxies(visit);
  550. }
  551. return used;
  552. }
  553. void GrRenderTargetOpList::gatherProxyIntervals(GrResourceAllocator* alloc) const {
  554. for (int i = 0; i < fDeferredProxies.count(); ++i) {
  555. SkASSERT(!fDeferredProxies[i]->isInstantiated());
  556. // We give all the deferred proxies a write usage at the very start of flushing. This
  557. // locks them out of being reused for the entire flush until they are read - and then
  558. // they can be recycled. This is a bit unfortunate because a flush can proceed in waves
  559. // with sub-flushes. The deferred proxies only need to be pinned from the start of
  560. // the sub-flush in which they appear.
  561. alloc->addInterval(fDeferredProxies[i], 0, 0, GrResourceAllocator::ActualUse::kNo);
  562. }
  563. // Add the interval for all the writes to this opList's target
  564. if (fOpChains.count()) {
  565. unsigned int cur = alloc->curOp();
  566. alloc->addInterval(fTarget.get(), cur, cur + fOpChains.count() - 1,
  567. GrResourceAllocator::ActualUse::kYes);
  568. } else {
  569. // This can happen if there is a loadOp (e.g., a clear) but no other draws. In this case we
  570. // still need to add an interval for the destination so we create a fake op# for
  571. // the missing clear op.
  572. alloc->addInterval(fTarget.get(), alloc->curOp(), alloc->curOp(),
  573. GrResourceAllocator::ActualUse::kYes);
  574. alloc->incOps();
  575. }
  576. auto gather = [ alloc SkDEBUGCODE(, this) ] (GrSurfaceProxy* p, GrMipMapped) {
  577. alloc->addInterval(p, alloc->curOp(), alloc->curOp(), GrResourceAllocator::ActualUse::kYes
  578. SkDEBUGCODE(, fTarget.get() == p));
  579. };
  580. for (const OpChain& recordedOp : fOpChains) {
  581. // only diff from the GrTextureOpList version
  582. recordedOp.visitProxies(gather);
  583. // Even though the op may have been (re)moved we still need to increment the op count to
  584. // keep all the math consistent.
  585. alloc->incOps();
  586. }
  587. }
  588. void GrRenderTargetOpList::recordOp(
  589. std::unique_ptr<GrOp> op, GrProcessorSet::Analysis processorAnalysis, GrAppliedClip* clip,
  590. const DstProxy* dstProxy, const GrCaps& caps) {
  591. SkDEBUGCODE(op->validate();)
  592. SkASSERT(processorAnalysis.requiresDstTexture() == (dstProxy && dstProxy->proxy()));
  593. SkASSERT(fTarget);
  594. // A closed GrOpList should never receive new/more ops
  595. SkASSERT(!this->isClosed());
  596. if (!op->bounds().isFinite()) {
  597. fOpMemoryPool->release(std::move(op));
  598. return;
  599. }
  600. // Check if there is an op we can combine with by linearly searching back until we either
  601. // 1) check every op
  602. // 2) intersect with something
  603. // 3) find a 'blocker'
  604. GR_AUDIT_TRAIL_ADD_OP(fAuditTrail, op.get(), fTarget->uniqueID());
  605. GrOP_INFO("opList: %d Recording (%s, opID: %u)\n"
  606. "\tBounds [L: %.2f, T: %.2f R: %.2f B: %.2f]\n",
  607. this->uniqueID(),
  608. op->name(),
  609. op->uniqueID(),
  610. op->bounds().fLeft, op->bounds().fTop,
  611. op->bounds().fRight, op->bounds().fBottom);
  612. GrOP_INFO(SkTabString(op->dumpInfo(), 1).c_str());
  613. GrOP_INFO("\tOutcome:\n");
  614. int maxCandidates = SkTMin(kMaxOpChainDistance, fOpChains.count());
  615. if (maxCandidates) {
  616. int i = 0;
  617. while (true) {
  618. OpChain& candidate = fOpChains.fromBack(i);
  619. op = candidate.appendOp(std::move(op), processorAnalysis, dstProxy, clip, caps,
  620. fOpMemoryPool.get(), fAuditTrail);
  621. if (!op) {
  622. return;
  623. }
  624. // Stop going backwards if we would cause a painter's order violation.
  625. if (!can_reorder(candidate.bounds(), op->bounds())) {
  626. GrOP_INFO("\t\tBackward: Intersects with chain (%s, head opID: %u)\n",
  627. candidate.head()->name(), candidate.head()->uniqueID());
  628. break;
  629. }
  630. if (++i == maxCandidates) {
  631. GrOP_INFO("\t\tBackward: Reached max lookback or beginning of op array %d\n", i);
  632. break;
  633. }
  634. }
  635. } else {
  636. GrOP_INFO("\t\tBackward: FirstOp\n");
  637. }
  638. if (clip) {
  639. clip = fClipAllocator.make<GrAppliedClip>(std::move(*clip));
  640. SkDEBUGCODE(fNumClips++;)
  641. }
  642. fOpChains.emplace_back(std::move(op), processorAnalysis, clip, dstProxy);
  643. }
  644. void GrRenderTargetOpList::forwardCombine(const GrCaps& caps) {
  645. SkASSERT(!this->isClosed());
  646. GrOP_INFO("opList: %d ForwardCombine %d ops:\n", this->uniqueID(), fOpChains.count());
  647. for (int i = 0; i < fOpChains.count() - 1; ++i) {
  648. OpChain& chain = fOpChains[i];
  649. int maxCandidateIdx = SkTMin(i + kMaxOpChainDistance, fOpChains.count() - 1);
  650. int j = i + 1;
  651. while (true) {
  652. OpChain& candidate = fOpChains[j];
  653. if (candidate.prependChain(&chain, caps, fOpMemoryPool.get(), fAuditTrail)) {
  654. break;
  655. }
  656. // Stop traversing if we would cause a painter's order violation.
  657. if (!can_reorder(chain.bounds(), candidate.bounds())) {
  658. GrOP_INFO(
  659. "\t\t%d: chain (%s head opID: %u) -> "
  660. "Intersects with chain (%s, head opID: %u)\n",
  661. i, chain.head()->name(), chain.head()->uniqueID(), candidate.head()->name(),
  662. candidate.head()->uniqueID());
  663. break;
  664. }
  665. if (++j > maxCandidateIdx) {
  666. GrOP_INFO("\t\t%d: chain (%s opID: %u) -> Reached max lookahead or end of array\n",
  667. i, chain.head()->name(), chain.head()->uniqueID());
  668. break;
  669. }
  670. }
  671. }
  672. }