GrCoverageCountingPathRenderer.cpp 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373
  1. /*
  2. * Copyright 2017 Google Inc.
  3. *
  4. * Use of this source code is governed by a BSD-style license that can be
  5. * found in the LICENSE file.
  6. */
  7. #include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
  8. #include "include/pathops/SkPathOps.h"
  9. #include "src/core/SkMakeUnique.h"
  10. #include "src/gpu/GrCaps.h"
  11. #include "src/gpu/GrClip.h"
  12. #include "src/gpu/GrProxyProvider.h"
  13. #include "src/gpu/ccpr/GrCCClipProcessor.h"
  14. #include "src/gpu/ccpr/GrCCDrawPathsOp.h"
  15. #include "src/gpu/ccpr/GrCCPathCache.h"
  16. using PathInstance = GrCCPathProcessor::Instance;
  17. bool GrCoverageCountingPathRenderer::IsSupported(const GrCaps& caps, CoverageType* coverageType) {
  18. const GrShaderCaps& shaderCaps = *caps.shaderCaps();
  19. if (caps.driverBlacklistCCPR() || !shaderCaps.integerSupport() ||
  20. !caps.instanceAttribSupport() || !shaderCaps.floatIs32Bits() ||
  21. GrCaps::kNone_MapFlags == caps.mapBufferFlags() ||
  22. !caps.isConfigTexturable(kAlpha_8_GrPixelConfig) ||
  23. !caps.isConfigRenderable(kAlpha_8_GrPixelConfig) ||
  24. !caps.halfFloatVertexAttributeSupport()) {
  25. return false;
  26. }
  27. if (caps.allowCoverageCounting() &&
  28. caps.isConfigTexturable(kAlpha_half_GrPixelConfig) &&
  29. caps.isConfigRenderable(kAlpha_half_GrPixelConfig)) {
  30. if (coverageType) {
  31. *coverageType = CoverageType::kFP16_CoverageCount;
  32. }
  33. return true;
  34. }
  35. if (!caps.driverBlacklistMSAACCPR() &&
  36. caps.internalMultisampleCount(kAlpha_8_GrPixelConfig) > 1 &&
  37. caps.sampleLocationsSupport() &&
  38. shaderCaps.sampleVariablesStencilSupport()) {
  39. if (coverageType) {
  40. *coverageType = CoverageType::kA8_Multisample;
  41. }
  42. return true;
  43. }
  44. return false;
  45. }
  46. sk_sp<GrCoverageCountingPathRenderer> GrCoverageCountingPathRenderer::CreateIfSupported(
  47. const GrCaps& caps, AllowCaching allowCaching, uint32_t contextUniqueID) {
  48. CoverageType coverageType;
  49. if (IsSupported(caps, &coverageType)) {
  50. return sk_sp<GrCoverageCountingPathRenderer>(new GrCoverageCountingPathRenderer(
  51. coverageType, allowCaching, contextUniqueID));
  52. }
  53. return nullptr;
  54. }
  55. GrCoverageCountingPathRenderer::GrCoverageCountingPathRenderer(
  56. CoverageType coverageType, AllowCaching allowCaching, uint32_t contextUniqueID)
  57. : fCoverageType(coverageType) {
  58. if (AllowCaching::kYes == allowCaching) {
  59. fPathCache = skstd::make_unique<GrCCPathCache>(contextUniqueID);
  60. }
  61. }
  62. GrCCPerOpListPaths* GrCoverageCountingPathRenderer::lookupPendingPaths(uint32_t opListID) {
  63. auto it = fPendingPaths.find(opListID);
  64. if (fPendingPaths.end() == it) {
  65. sk_sp<GrCCPerOpListPaths> paths = sk_make_sp<GrCCPerOpListPaths>();
  66. it = fPendingPaths.insert(std::make_pair(opListID, std::move(paths))).first;
  67. }
  68. return it->second.get();
  69. }
  70. GrPathRenderer::CanDrawPath GrCoverageCountingPathRenderer::onCanDrawPath(
  71. const CanDrawPathArgs& args) const {
  72. const GrShape& shape = *args.fShape;
  73. // We use "kCoverage", or analytic AA, no mater what the coverage type of our atlas: Even if the
  74. // atlas is multisampled, that resolves into analytic coverage before we draw the path to the
  75. // main canvas.
  76. if (GrAAType::kCoverage != args.fAAType || shape.style().hasPathEffect() ||
  77. args.fViewMatrix->hasPerspective() || shape.inverseFilled()) {
  78. return CanDrawPath::kNo;
  79. }
  80. SkPath path;
  81. shape.asPath(&path);
  82. const SkStrokeRec& stroke = shape.style().strokeRec();
  83. switch (stroke.getStyle()) {
  84. case SkStrokeRec::kFill_Style: {
  85. SkRect devBounds;
  86. args.fViewMatrix->mapRect(&devBounds, path.getBounds());
  87. SkIRect clippedIBounds;
  88. devBounds.roundOut(&clippedIBounds);
  89. if (!clippedIBounds.intersect(*args.fClipConservativeBounds)) {
  90. // The path is completely clipped away. Our code will eventually notice this before
  91. // doing any real work.
  92. return CanDrawPath::kYes;
  93. }
  94. int64_t numPixels = sk_64_mul(clippedIBounds.height(), clippedIBounds.width());
  95. if (path.countVerbs() > 1000 && path.countPoints() > numPixels) {
  96. // This is a complicated path that has more vertices than pixels! Let's let the SW
  97. // renderer have this one: It will probably be faster and a bitmap will require less
  98. // total memory on the GPU than CCPR instance buffers would for the raw path data.
  99. return CanDrawPath::kNo;
  100. }
  101. if (numPixels > 256 * 256) {
  102. // Large paths can blow up the atlas fast. And they are not ideal for a two-pass
  103. // rendering algorithm. Give the simpler direct renderers a chance before we commit
  104. // to drawing it.
  105. return CanDrawPath::kAsBackup;
  106. }
  107. if (args.fShape->hasUnstyledKey() && path.countVerbs() > 50) {
  108. // Complex paths do better cached in an SDF, if the renderer will accept them.
  109. return CanDrawPath::kAsBackup;
  110. }
  111. return CanDrawPath::kYes;
  112. }
  113. case SkStrokeRec::kStroke_Style:
  114. if (!args.fViewMatrix->isSimilarity()) {
  115. // The stroker currently only supports rigid-body transfoms for the stroke lines
  116. // themselves. This limitation doesn't affect hairlines since their stroke lines are
  117. // defined relative to device space.
  118. return CanDrawPath::kNo;
  119. }
  120. // fallthru
  121. case SkStrokeRec::kHairline_Style: {
  122. if (CoverageType::kFP16_CoverageCount != fCoverageType) {
  123. // Stroking is not yet supported in MSAA atlas mode.
  124. return CanDrawPath::kNo;
  125. }
  126. float inflationRadius;
  127. GetStrokeDevWidth(*args.fViewMatrix, stroke, &inflationRadius);
  128. if (!(inflationRadius <= kMaxBoundsInflationFromStroke)) {
  129. // Let extremely wide strokes be converted to fill paths and drawn by the CCPR
  130. // filler instead. (Cast the logic negatively in order to also catch r=NaN.)
  131. return CanDrawPath::kNo;
  132. }
  133. SkASSERT(!SkScalarIsNaN(inflationRadius));
  134. if (SkPathPriv::ConicWeightCnt(path)) {
  135. // The stroker does not support conics yet.
  136. return CanDrawPath::kNo;
  137. }
  138. return CanDrawPath::kYes;
  139. }
  140. case SkStrokeRec::kStrokeAndFill_Style:
  141. return CanDrawPath::kNo;
  142. }
  143. SK_ABORT("Invalid stroke style.");
  144. return CanDrawPath::kNo;
  145. }
  146. bool GrCoverageCountingPathRenderer::onDrawPath(const DrawPathArgs& args) {
  147. SkASSERT(!fFlushing);
  148. SkIRect clipIBounds;
  149. GrRenderTargetContext* rtc = args.fRenderTargetContext;
  150. args.fClip->getConservativeBounds(rtc->width(), rtc->height(), &clipIBounds, nullptr);
  151. auto op = GrCCDrawPathsOp::Make(args.fContext, clipIBounds, *args.fViewMatrix, *args.fShape,
  152. std::move(args.fPaint));
  153. this->recordOp(std::move(op), args);
  154. return true;
  155. }
  156. void GrCoverageCountingPathRenderer::recordOp(std::unique_ptr<GrCCDrawPathsOp> op,
  157. const DrawPathArgs& args) {
  158. if (op) {
  159. auto addToOwningPerOpListPaths = [this](GrOp* op, uint32_t opListID) {
  160. op->cast<GrCCDrawPathsOp>()->addToOwningPerOpListPaths(
  161. sk_ref_sp(this->lookupPendingPaths(opListID)));
  162. };
  163. args.fRenderTargetContext->addDrawOp(*args.fClip, std::move(op), addToOwningPerOpListPaths);
  164. }
  165. }
  166. std::unique_ptr<GrFragmentProcessor> GrCoverageCountingPathRenderer::makeClipProcessor(
  167. uint32_t opListID, const SkPath& deviceSpacePath, const SkIRect& accessRect,
  168. const GrCaps& caps) {
  169. SkASSERT(!fFlushing);
  170. uint32_t key = deviceSpacePath.getGenerationID();
  171. if (CoverageType::kA8_Multisample == fCoverageType) {
  172. // We only need to consider fill rule in MSAA mode. In coverage count mode Even/Odd and
  173. // Nonzero both reference the same coverage count mask.
  174. key = (key << 1) | (uint32_t)GrFillRuleForSkPath(deviceSpacePath);
  175. }
  176. GrCCClipPath& clipPath =
  177. this->lookupPendingPaths(opListID)->fClipPaths[key];
  178. if (!clipPath.isInitialized()) {
  179. // This ClipPath was just created during lookup. Initialize it.
  180. const SkRect& pathDevBounds = deviceSpacePath.getBounds();
  181. if (SkTMax(pathDevBounds.height(), pathDevBounds.width()) > kPathCropThreshold) {
  182. // The path is too large. Crop it or analytic AA can run out of fp32 precision.
  183. SkPath croppedPath;
  184. int maxRTSize = caps.maxRenderTargetSize();
  185. CropPath(deviceSpacePath, SkIRect::MakeWH(maxRTSize, maxRTSize), &croppedPath);
  186. clipPath.init(croppedPath, accessRect, fCoverageType, caps);
  187. } else {
  188. clipPath.init(deviceSpacePath, accessRect, fCoverageType, caps);
  189. }
  190. } else {
  191. clipPath.addAccess(accessRect);
  192. }
  193. auto isCoverageCount = GrCCClipProcessor::IsCoverageCount(
  194. CoverageType::kFP16_CoverageCount == fCoverageType);
  195. auto mustCheckBounds = GrCCClipProcessor::MustCheckBounds(
  196. !clipPath.pathDevIBounds().contains(accessRect));
  197. return skstd::make_unique<GrCCClipProcessor>(&clipPath, isCoverageCount, mustCheckBounds);
  198. }
  199. void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlushRP,
  200. const uint32_t* opListIDs, int numOpListIDs,
  201. SkTArray<sk_sp<GrRenderTargetContext>>* out) {
  202. using DoCopiesToA8Coverage = GrCCDrawPathsOp::DoCopiesToA8Coverage;
  203. SkASSERT(!fFlushing);
  204. SkASSERT(fFlushingPaths.empty());
  205. SkDEBUGCODE(fFlushing = true);
  206. if (fPathCache) {
  207. fPathCache->doPreFlushProcessing();
  208. }
  209. if (fPendingPaths.empty()) {
  210. return; // Nothing to draw.
  211. }
  212. GrCCPerFlushResourceSpecs specs;
  213. int maxPreferredRTSize = onFlushRP->caps()->maxPreferredRenderTargetSize();
  214. specs.fCopyAtlasSpecs.fMaxPreferredTextureSize = SkTMin(2048, maxPreferredRTSize);
  215. SkASSERT(0 == specs.fCopyAtlasSpecs.fMinTextureSize);
  216. specs.fRenderedAtlasSpecs.fMaxPreferredTextureSize = maxPreferredRTSize;
  217. specs.fRenderedAtlasSpecs.fMinTextureSize = SkTMin(512, maxPreferredRTSize);
  218. // Move the per-opList paths that are about to be flushed from fPendingPaths to fFlushingPaths,
  219. // and count them up so we can preallocate buffers.
  220. fFlushingPaths.reserve(numOpListIDs);
  221. for (int i = 0; i < numOpListIDs; ++i) {
  222. auto iter = fPendingPaths.find(opListIDs[i]);
  223. if (fPendingPaths.end() == iter) {
  224. continue; // No paths on this opList.
  225. }
  226. fFlushingPaths.push_back(std::move(iter->second));
  227. fPendingPaths.erase(iter);
  228. for (GrCCDrawPathsOp* op : fFlushingPaths.back()->fDrawOps) {
  229. op->accountForOwnPaths(fPathCache.get(), onFlushRP, &specs);
  230. }
  231. for (const auto& clipsIter : fFlushingPaths.back()->fClipPaths) {
  232. clipsIter.second.accountForOwnPath(&specs);
  233. }
  234. }
  235. if (specs.isEmpty()) {
  236. return; // Nothing to draw.
  237. }
  238. // Determine if there are enough reusable paths from last flush for it to be worth our time to
  239. // copy them to cached atlas(es).
  240. int numCopies = specs.fNumCopiedPaths[GrCCPerFlushResourceSpecs::kFillIdx] +
  241. specs.fNumCopiedPaths[GrCCPerFlushResourceSpecs::kStrokeIdx];
  242. auto doCopies = DoCopiesToA8Coverage(numCopies > 100 ||
  243. specs.fCopyAtlasSpecs.fApproxNumPixels > 256 * 256);
  244. if (numCopies && DoCopiesToA8Coverage::kNo == doCopies) {
  245. specs.cancelCopies();
  246. }
  247. auto resources = sk_make_sp<GrCCPerFlushResources>(onFlushRP, fCoverageType, specs);
  248. if (!resources->isMapped()) {
  249. return; // Some allocation failed.
  250. }
  251. // Layout the atlas(es) and parse paths.
  252. for (const auto& flushingPaths : fFlushingPaths) {
  253. for (GrCCDrawPathsOp* op : flushingPaths->fDrawOps) {
  254. op->setupResources(fPathCache.get(), onFlushRP, resources.get(), doCopies);
  255. }
  256. for (auto& clipsIter : flushingPaths->fClipPaths) {
  257. clipsIter.second.renderPathInAtlas(resources.get(), onFlushRP);
  258. }
  259. }
  260. if (fPathCache) {
  261. // Purge invalidated textures from previous atlases *before* calling finalize(). That way,
  262. // the underlying textures objects can be freed up and reused for the next atlases.
  263. fPathCache->purgeInvalidatedAtlasTextures(onFlushRP);
  264. }
  265. // Allocate resources and then render the atlas(es).
  266. if (!resources->finalize(onFlushRP, out)) {
  267. return;
  268. }
  269. // Commit flushing paths to the resources once they are successfully completed.
  270. for (auto& flushingPaths : fFlushingPaths) {
  271. SkASSERT(!flushingPaths->fFlushResources);
  272. flushingPaths->fFlushResources = resources;
  273. }
  274. }
  275. void GrCoverageCountingPathRenderer::postFlush(GrDeferredUploadToken, const uint32_t* opListIDs,
  276. int numOpListIDs) {
  277. SkASSERT(fFlushing);
  278. if (!fFlushingPaths.empty()) {
  279. // In DDL mode these aren't guaranteed to be deleted so we must clear out the perFlush
  280. // resources manually.
  281. for (auto& flushingPaths : fFlushingPaths) {
  282. flushingPaths->fFlushResources = nullptr;
  283. }
  284. // We wait to erase these until after flush, once Ops and FPs are done accessing their data.
  285. fFlushingPaths.reset();
  286. }
  287. SkDEBUGCODE(fFlushing = false);
  288. }
  289. void GrCoverageCountingPathRenderer::purgeCacheEntriesOlderThan(
  290. GrProxyProvider* proxyProvider, const GrStdSteadyClock::time_point& purgeTime) {
  291. if (fPathCache) {
  292. fPathCache->purgeEntriesOlderThan(proxyProvider, purgeTime);
  293. }
  294. }
  295. void GrCoverageCountingPathRenderer::CropPath(const SkPath& path, const SkIRect& cropbox,
  296. SkPath* out) {
  297. SkPath cropboxPath;
  298. cropboxPath.addRect(SkRect::Make(cropbox));
  299. if (!Op(cropboxPath, path, kIntersect_SkPathOp, out)) {
  300. // This can fail if the PathOps encounter NaN or infinities.
  301. out->reset();
  302. }
  303. out->setIsVolatile(true);
  304. }
  305. float GrCoverageCountingPathRenderer::GetStrokeDevWidth(const SkMatrix& m,
  306. const SkStrokeRec& stroke,
  307. float* inflationRadius) {
  308. float strokeDevWidth;
  309. if (stroke.isHairlineStyle()) {
  310. strokeDevWidth = 1;
  311. } else {
  312. SkASSERT(SkStrokeRec::kStroke_Style == stroke.getStyle());
  313. SkASSERT(m.isSimilarity()); // Otherwise matrixScaleFactor = m.getMaxScale().
  314. float matrixScaleFactor = SkVector::Length(m.getScaleX(), m.getSkewY());
  315. strokeDevWidth = stroke.getWidth() * matrixScaleFactor;
  316. }
  317. if (inflationRadius) {
  318. // Inflate for a minimum stroke width of 1. In some cases when the stroke is less than 1px
  319. // wide, we may inflate it to 1px and instead reduce the opacity.
  320. *inflationRadius = SkStrokeRec::GetInflationRadius(
  321. stroke.getJoin(), stroke.getMiter(), stroke.getCap(), SkTMax(strokeDevWidth, 1.f));
  322. }
  323. return strokeDevWidth;
  324. }