GrCCDrawPathsOp.cpp 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451
  1. /*
  2. * Copyright 2018 Google Inc.
  3. *
  4. * Use of this source code is governed by a BSD-style license that can be
  5. * found in the LICENSE file.
  6. */
  7. #include "src/gpu/ccpr/GrCCDrawPathsOp.h"
  8. #include "include/private/GrRecordingContext.h"
  9. #include "src/gpu/GrMemoryPool.h"
  10. #include "src/gpu/GrOpFlushState.h"
  11. #include "src/gpu/GrRecordingContextPriv.h"
  12. #include "src/gpu/ccpr/GrCCPathCache.h"
  13. #include "src/gpu/ccpr/GrCCPerFlushResources.h"
  14. #include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
  15. #include "src/gpu/ccpr/GrOctoBounds.h"
  16. static bool has_coord_transforms(const GrPaint& paint) {
  17. GrFragmentProcessor::Iter iter(paint);
  18. while (const GrFragmentProcessor* fp = iter.next()) {
  19. if (!fp->coordTransforms().empty()) {
  20. return true;
  21. }
  22. }
  23. return false;
  24. }
  25. std::unique_ptr<GrCCDrawPathsOp> GrCCDrawPathsOp::Make(
  26. GrRecordingContext* context, const SkIRect& clipIBounds, const SkMatrix& m,
  27. const GrShape& shape, GrPaint&& paint) {
  28. SkRect conservativeDevBounds;
  29. m.mapRect(&conservativeDevBounds, shape.bounds());
  30. const SkStrokeRec& stroke = shape.style().strokeRec();
  31. float strokeDevWidth = 0;
  32. float conservativeInflationRadius = 0;
  33. if (!stroke.isFillStyle()) {
  34. strokeDevWidth = GrCoverageCountingPathRenderer::GetStrokeDevWidth(
  35. m, stroke, &conservativeInflationRadius);
  36. conservativeDevBounds.outset(conservativeInflationRadius, conservativeInflationRadius);
  37. }
  38. std::unique_ptr<GrCCDrawPathsOp> op;
  39. float conservativeSize = SkTMax(conservativeDevBounds.height(), conservativeDevBounds.width());
  40. if (conservativeSize > GrCoverageCountingPathRenderer::kPathCropThreshold) {
  41. // The path is too large. Crop it or analytic AA can run out of fp32 precision.
  42. SkPath croppedDevPath;
  43. shape.asPath(&croppedDevPath);
  44. croppedDevPath.transform(m, &croppedDevPath);
  45. SkIRect cropBox = clipIBounds;
  46. GrShape croppedDevShape;
  47. if (stroke.isFillStyle()) {
  48. GrCoverageCountingPathRenderer::CropPath(croppedDevPath, cropBox, &croppedDevPath);
  49. croppedDevShape = GrShape(croppedDevPath);
  50. conservativeDevBounds = croppedDevShape.bounds();
  51. } else {
  52. int r = SkScalarCeilToInt(conservativeInflationRadius);
  53. cropBox.outset(r, r);
  54. GrCoverageCountingPathRenderer::CropPath(croppedDevPath, cropBox, &croppedDevPath);
  55. SkStrokeRec devStroke = stroke;
  56. devStroke.setStrokeStyle(strokeDevWidth);
  57. croppedDevShape = GrShape(croppedDevPath, GrStyle(devStroke, nullptr));
  58. conservativeDevBounds = croppedDevPath.getBounds();
  59. conservativeDevBounds.outset(conservativeInflationRadius, conservativeInflationRadius);
  60. }
  61. // FIXME: This breaks local coords: http://skbug.com/8003
  62. return InternalMake(context, clipIBounds, SkMatrix::I(), croppedDevShape, strokeDevWidth,
  63. conservativeDevBounds, std::move(paint));
  64. }
  65. return InternalMake(context, clipIBounds, m, shape, strokeDevWidth, conservativeDevBounds,
  66. std::move(paint));
  67. }
  68. std::unique_ptr<GrCCDrawPathsOp> GrCCDrawPathsOp::InternalMake(
  69. GrRecordingContext* context, const SkIRect& clipIBounds, const SkMatrix& m,
  70. const GrShape& shape, float strokeDevWidth, const SkRect& conservativeDevBounds,
  71. GrPaint&& paint) {
  72. // The path itself should have been cropped if larger than kPathCropThreshold. If it had a
  73. // stroke, that would have further inflated its draw bounds.
  74. SkASSERT(SkTMax(conservativeDevBounds.height(), conservativeDevBounds.width()) <
  75. GrCoverageCountingPathRenderer::kPathCropThreshold +
  76. GrCoverageCountingPathRenderer::kMaxBoundsInflationFromStroke*2 + 1);
  77. SkIRect shapeConservativeIBounds;
  78. conservativeDevBounds.roundOut(&shapeConservativeIBounds);
  79. SkIRect maskDevIBounds;
  80. if (!maskDevIBounds.intersect(clipIBounds, shapeConservativeIBounds)) {
  81. return nullptr;
  82. }
  83. GrOpMemoryPool* pool = context->priv().opMemoryPool();
  84. return pool->allocate<GrCCDrawPathsOp>(m, shape, strokeDevWidth, shapeConservativeIBounds,
  85. maskDevIBounds, conservativeDevBounds, std::move(paint));
  86. }
  87. GrCCDrawPathsOp::GrCCDrawPathsOp(const SkMatrix& m, const GrShape& shape, float strokeDevWidth,
  88. const SkIRect& shapeConservativeIBounds,
  89. const SkIRect& maskDevIBounds, const SkRect& conservativeDevBounds,
  90. GrPaint&& paint)
  91. : GrDrawOp(ClassID())
  92. , fViewMatrixIfUsingLocalCoords(has_coord_transforms(paint) ? m : SkMatrix::I())
  93. , fDraws(m, shape, strokeDevWidth, shapeConservativeIBounds, maskDevIBounds,
  94. paint.getColor4f())
  95. , fProcessors(std::move(paint)) { // Paint must be moved after fetching its color above.
  96. SkDEBUGCODE(fBaseInstance = -1);
  97. // If the path is clipped, CCPR will only draw the visible portion. This helps improve batching,
  98. // since it eliminates the need for scissor when drawing to the main canvas.
  99. // FIXME: We should parse the path right here. It will provide a tighter bounding box for us to
  100. // give the opList, as well as enabling threaded parsing when using DDL.
  101. SkRect clippedDrawBounds;
  102. if (!clippedDrawBounds.intersect(conservativeDevBounds, SkRect::Make(maskDevIBounds))) {
  103. clippedDrawBounds.setEmpty();
  104. }
  105. // We always have AA bloat, even in MSAA atlas mode. This is because by the time this Op comes
  106. // along and draws to the main canvas, the atlas has been resolved to analytic coverage.
  107. this->setBounds(clippedDrawBounds, GrOp::HasAABloat::kYes, GrOp::IsZeroArea::kNo);
  108. }
  109. GrCCDrawPathsOp::~GrCCDrawPathsOp() {
  110. if (fOwningPerOpListPaths) {
  111. // Remove the list's dangling pointer to this Op before deleting it.
  112. fOwningPerOpListPaths->fDrawOps.remove(this);
  113. }
  114. }
  115. GrCCDrawPathsOp::SingleDraw::SingleDraw(const SkMatrix& m, const GrShape& shape,
  116. float strokeDevWidth,
  117. const SkIRect& shapeConservativeIBounds,
  118. const SkIRect& maskDevIBounds, const SkPMColor4f& color)
  119. : fMatrix(m)
  120. , fShape(shape)
  121. , fStrokeDevWidth(strokeDevWidth)
  122. , fShapeConservativeIBounds(shapeConservativeIBounds)
  123. , fMaskDevIBounds(maskDevIBounds)
  124. , fColor(color) {
  125. #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
  126. if (fShape.hasUnstyledKey()) {
  127. // On AOSP we round view matrix translates to integer values for cachable paths. We do this
  128. // to match HWUI's cache hit ratio, which doesn't consider the matrix when caching paths.
  129. fMatrix.setTranslateX(SkScalarRoundToScalar(fMatrix.getTranslateX()));
  130. fMatrix.setTranslateY(SkScalarRoundToScalar(fMatrix.getTranslateY()));
  131. }
  132. #endif
  133. }
  134. GrProcessorSet::Analysis GrCCDrawPathsOp::finalize(
  135. const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
  136. GrClampType clampType) {
  137. SkASSERT(1 == fNumDraws); // There should only be one single path draw in this Op right now.
  138. return fDraws.head().finalize(caps, clip, hasMixedSampledCoverage, clampType, &fProcessors);
  139. }
  140. GrProcessorSet::Analysis GrCCDrawPathsOp::SingleDraw::finalize(
  141. const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage, GrClampType
  142. clampType, GrProcessorSet* processors) {
  143. const GrProcessorSet::Analysis& analysis = processors->finalize(
  144. fColor, GrProcessorAnalysisCoverage::kSingleChannel, clip,
  145. &GrUserStencilSettings::kUnused, hasMixedSampledCoverage, caps, clampType, &fColor);
  146. // Lines start looking jagged when they get thinner than 1px. For thin strokes it looks better
  147. // if we can convert them to hairline (i.e., inflate the stroke width to 1px), and instead
  148. // reduce the opacity to create the illusion of thin-ness. This strategy also helps reduce
  149. // artifacts from coverage dilation when there are self intersections.
  150. if (analysis.isCompatibleWithCoverageAsAlpha() &&
  151. !fShape.style().strokeRec().isFillStyle() && fStrokeDevWidth < 1) {
  152. // Modifying the shape affects its cache key. The draw can't have a cache entry yet or else
  153. // our next step would invalidate it.
  154. SkASSERT(!fCacheEntry);
  155. SkASSERT(SkStrokeRec::kStroke_Style == fShape.style().strokeRec().getStyle());
  156. SkPath path;
  157. fShape.asPath(&path);
  158. // Create a hairline version of our stroke.
  159. SkStrokeRec hairlineStroke = fShape.style().strokeRec();
  160. hairlineStroke.setStrokeStyle(0);
  161. // How transparent does a 1px stroke have to be in order to appear as thin as the real one?
  162. float coverage = fStrokeDevWidth;
  163. fShape = GrShape(path, GrStyle(hairlineStroke, nullptr));
  164. fStrokeDevWidth = 1;
  165. // fShapeConservativeIBounds already accounted for this possibility of inflating the stroke.
  166. fColor = fColor * coverage;
  167. }
  168. return analysis;
  169. }
  170. GrOp::CombineResult GrCCDrawPathsOp::onCombineIfPossible(GrOp* op, const GrCaps&) {
  171. GrCCDrawPathsOp* that = op->cast<GrCCDrawPathsOp>();
  172. SkASSERT(fOwningPerOpListPaths);
  173. SkASSERT(fNumDraws);
  174. SkASSERT(!that->fOwningPerOpListPaths || that->fOwningPerOpListPaths == fOwningPerOpListPaths);
  175. SkASSERT(that->fNumDraws);
  176. if (fProcessors != that->fProcessors ||
  177. fViewMatrixIfUsingLocalCoords != that->fViewMatrixIfUsingLocalCoords) {
  178. return CombineResult::kCannotCombine;
  179. }
  180. fDraws.append(std::move(that->fDraws), &fOwningPerOpListPaths->fAllocator);
  181. SkDEBUGCODE(fNumDraws += that->fNumDraws);
  182. SkDEBUGCODE(that->fNumDraws = 0);
  183. return CombineResult::kMerged;
  184. }
  185. void GrCCDrawPathsOp::addToOwningPerOpListPaths(sk_sp<GrCCPerOpListPaths> owningPerOpListPaths) {
  186. SkASSERT(1 == fNumDraws);
  187. SkASSERT(!fOwningPerOpListPaths);
  188. fOwningPerOpListPaths = std::move(owningPerOpListPaths);
  189. fOwningPerOpListPaths->fDrawOps.addToTail(this);
  190. }
  191. void GrCCDrawPathsOp::accountForOwnPaths(GrCCPathCache* pathCache,
  192. GrOnFlushResourceProvider* onFlushRP,
  193. GrCCPerFlushResourceSpecs* specs) {
  194. for (SingleDraw& draw : fDraws) {
  195. draw.accountForOwnPath(pathCache, onFlushRP, specs);
  196. }
  197. }
  198. void GrCCDrawPathsOp::SingleDraw::accountForOwnPath(
  199. GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP,
  200. GrCCPerFlushResourceSpecs* specs) {
  201. using CoverageType = GrCCAtlas::CoverageType;
  202. SkPath path;
  203. fShape.asPath(&path);
  204. SkASSERT(!fCacheEntry);
  205. if (pathCache) {
  206. fCacheEntry = pathCache->find(
  207. onFlushRP, fShape, fMaskDevIBounds, fMatrix, &fCachedMaskShift);
  208. }
  209. if (fCacheEntry) {
  210. if (const GrCCCachedAtlas* cachedAtlas = fCacheEntry->cachedAtlas()) {
  211. SkASSERT(cachedAtlas->getOnFlushProxy());
  212. if (CoverageType::kA8_LiteralCoverage == cachedAtlas->coverageType()) {
  213. ++specs->fNumCachedPaths;
  214. } else {
  215. // Suggest that this path be copied to a literal coverage atlas, to save memory.
  216. // (The client may decline this copy via DoCopiesToA8Coverage::kNo.)
  217. int idx = (fShape.style().strokeRec().isFillStyle())
  218. ? GrCCPerFlushResourceSpecs::kFillIdx
  219. : GrCCPerFlushResourceSpecs::kStrokeIdx;
  220. ++specs->fNumCopiedPaths[idx];
  221. specs->fCopyPathStats[idx].statPath(path);
  222. specs->fCopyAtlasSpecs.accountForSpace(fCacheEntry->width(), fCacheEntry->height());
  223. fDoCopyToA8Coverage = true;
  224. }
  225. return;
  226. }
  227. if (this->shouldCachePathMask(onFlushRP->caps()->maxRenderTargetSize())) {
  228. fDoCachePathMask = true;
  229. // We don't cache partial masks; ensure the bounds include the entire path.
  230. fMaskDevIBounds = fShapeConservativeIBounds;
  231. }
  232. }
  233. // Plan on rendering this path in a new atlas.
  234. int idx = (fShape.style().strokeRec().isFillStyle())
  235. ? GrCCPerFlushResourceSpecs::kFillIdx
  236. : GrCCPerFlushResourceSpecs::kStrokeIdx;
  237. ++specs->fNumRenderedPaths[idx];
  238. specs->fRenderedPathStats[idx].statPath(path);
  239. specs->fRenderedAtlasSpecs.accountForSpace(fMaskDevIBounds.width(), fMaskDevIBounds.height());
  240. SkDEBUGCODE(fWasCountedAsRender = true);
  241. }
  242. bool GrCCDrawPathsOp::SingleDraw::shouldCachePathMask(int maxRenderTargetSize) const {
  243. SkASSERT(fCacheEntry);
  244. SkASSERT(!fCacheEntry->cachedAtlas());
  245. if (fCacheEntry->hitCount() <= 1) {
  246. return false; // Don't cache a path mask until at least its second hit.
  247. }
  248. int shapeMaxDimension = SkTMax(
  249. fShapeConservativeIBounds.height(), fShapeConservativeIBounds.width());
  250. if (shapeMaxDimension > maxRenderTargetSize) {
  251. return false; // This path isn't cachable.
  252. }
  253. int64_t shapeArea = sk_64_mul(
  254. fShapeConservativeIBounds.height(), fShapeConservativeIBounds.width());
  255. if (shapeArea < 100*100) {
  256. // If a path is small enough, we might as well try to render and cache the entire thing, no
  257. // matter how much of it is actually visible.
  258. return true;
  259. }
  260. // The hitRect should already be contained within the shape's bounds, but we still intersect it
  261. // because it's possible for edges very near pixel boundaries (e.g., 0.999999), to round out
  262. // inconsistently, depending on the integer translation values and fp32 precision.
  263. SkIRect hitRect = fCacheEntry->hitRect().makeOffset(fCachedMaskShift.x(), fCachedMaskShift.y());
  264. hitRect.intersect(fShapeConservativeIBounds);
  265. // Render and cache the entire path mask if we see enough of it to justify rendering all the
  266. // pixels. Our criteria for "enough" is that we must have seen at least 50% of the path in the
  267. // past, and in this particular draw we must see at least 10% of it.
  268. int64_t hitArea = sk_64_mul(hitRect.height(), hitRect.width());
  269. int64_t drawArea = sk_64_mul(fMaskDevIBounds.height(), fMaskDevIBounds.width());
  270. return hitArea*2 >= shapeArea && drawArea*10 >= shapeArea;
  271. }
  272. void GrCCDrawPathsOp::setupResources(
  273. GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP,
  274. GrCCPerFlushResources* resources, DoCopiesToA8Coverage doCopies) {
  275. SkASSERT(fNumDraws > 0);
  276. SkASSERT(-1 == fBaseInstance);
  277. fBaseInstance = resources->nextPathInstanceIdx();
  278. for (SingleDraw& draw : fDraws) {
  279. draw.setupResources(pathCache, onFlushRP, resources, doCopies, this);
  280. }
  281. if (!fInstanceRanges.empty()) {
  282. fInstanceRanges.back().fEndInstanceIdx = resources->nextPathInstanceIdx();
  283. }
  284. }
  285. void GrCCDrawPathsOp::SingleDraw::setupResources(
  286. GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP,
  287. GrCCPerFlushResources* resources, DoCopiesToA8Coverage doCopies, GrCCDrawPathsOp* op) {
  288. SkPath path;
  289. fShape.asPath(&path);
  290. auto fillRule = (fShape.style().strokeRec().isFillStyle())
  291. ? GrFillRuleForSkPath(path)
  292. : GrFillRule::kNonzero;
  293. if (fCacheEntry) {
  294. // Does the path already exist in a cached atlas texture?
  295. if (fCacheEntry->cachedAtlas()) {
  296. SkASSERT(fCacheEntry->cachedAtlas()->getOnFlushProxy());
  297. if (DoCopiesToA8Coverage::kYes == doCopies && fDoCopyToA8Coverage) {
  298. resources->upgradeEntryToLiteralCoverageAtlas(
  299. pathCache, onFlushRP, fCacheEntry.get(), fillRule);
  300. SkASSERT(fCacheEntry->cachedAtlas());
  301. SkASSERT(GrCCAtlas::CoverageType::kA8_LiteralCoverage
  302. == fCacheEntry->cachedAtlas()->coverageType());
  303. SkASSERT(fCacheEntry->cachedAtlas()->getOnFlushProxy());
  304. }
  305. #if 0
  306. // Simple color manipulation to visualize cached paths.
  307. fColor = (GrCCAtlas::CoverageType::kA8_LiteralCoverage
  308. == fCacheEntry->cachedAtlas()->coverageType())
  309. ? SkPMColor4f{0,0,.25,.25} : SkPMColor4f{0,.25,0,.25};
  310. #endif
  311. auto coverageMode = GrCCPathProcessor::GetCoverageMode(
  312. fCacheEntry->cachedAtlas()->coverageType());
  313. op->recordInstance(coverageMode, fCacheEntry->cachedAtlas()->getOnFlushProxy(),
  314. resources->nextPathInstanceIdx());
  315. resources->appendDrawPathInstance().set(
  316. *fCacheEntry, fCachedMaskShift, SkPMColor4f_toFP16(fColor), fillRule);
  317. #ifdef SK_DEBUG
  318. if (fWasCountedAsRender) {
  319. // A path mask didn't exist for this path at the beginning of flush, but we have one
  320. // now. What this means is that we've drawn the same path multiple times this flush.
  321. // Let the resources know that we reused one for their internal debug counters.
  322. resources->debugOnly_didReuseRenderedPath();
  323. }
  324. #endif
  325. return;
  326. }
  327. }
  328. // Render the raw path into a coverage count atlas. renderShapeInAtlas() gives us two tight
  329. // bounding boxes: One in device space, as well as a second one rotated an additional 45
  330. // degrees. The path vertex shader uses these two bounding boxes to generate an octagon that
  331. // circumscribes the path.
  332. GrOctoBounds octoBounds;
  333. SkIRect devIBounds;
  334. SkIVector devToAtlasOffset;
  335. if (auto atlas = resources->renderShapeInAtlas(
  336. fMaskDevIBounds, fMatrix, fShape, fStrokeDevWidth, &octoBounds, &devIBounds,
  337. &devToAtlasOffset)) {
  338. auto coverageMode = GrCCPathProcessor::GetCoverageMode(
  339. resources->renderedPathCoverageType());
  340. op->recordInstance(coverageMode, atlas->textureProxy(), resources->nextPathInstanceIdx());
  341. resources->appendDrawPathInstance().set(
  342. octoBounds, devToAtlasOffset, SkPMColor4f_toFP16(fColor), fillRule);
  343. if (fDoCachePathMask) {
  344. SkASSERT(fCacheEntry);
  345. SkASSERT(!fCacheEntry->cachedAtlas());
  346. SkASSERT(fShapeConservativeIBounds == fMaskDevIBounds);
  347. fCacheEntry->setCoverageCountAtlas(
  348. onFlushRP, atlas, devToAtlasOffset, octoBounds, devIBounds, fCachedMaskShift);
  349. }
  350. }
  351. }
  352. inline void GrCCDrawPathsOp::recordInstance(
  353. GrCCPathProcessor::CoverageMode coverageMode, GrTextureProxy* atlasProxy, int instanceIdx) {
  354. if (fInstanceRanges.empty()) {
  355. fInstanceRanges.push_back({coverageMode, atlasProxy, instanceIdx});
  356. } else if (fInstanceRanges.back().fAtlasProxy != atlasProxy) {
  357. fInstanceRanges.back().fEndInstanceIdx = instanceIdx;
  358. fInstanceRanges.push_back({coverageMode, atlasProxy, instanceIdx});
  359. }
  360. SkASSERT(fInstanceRanges.back().fCoverageMode == coverageMode);
  361. SkASSERT(fInstanceRanges.back().fAtlasProxy == atlasProxy);
  362. }
  363. void GrCCDrawPathsOp::onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) {
  364. SkASSERT(fOwningPerOpListPaths);
  365. const GrCCPerFlushResources* resources = fOwningPerOpListPaths->fFlushResources.get();
  366. if (!resources) {
  367. return; // Setup failed.
  368. }
  369. GrPipeline::InitArgs initArgs;
  370. initArgs.fCaps = &flushState->caps();
  371. initArgs.fDstProxy = flushState->drawOpArgs().fDstProxy;
  372. initArgs.fOutputSwizzle = flushState->drawOpArgs().fOutputSwizzle;
  373. auto clip = flushState->detachAppliedClip();
  374. GrPipeline::FixedDynamicState fixedDynamicState(clip.scissorState().rect());
  375. GrPipeline pipeline(initArgs, std::move(fProcessors), std::move(clip));
  376. int baseInstance = fBaseInstance;
  377. SkASSERT(baseInstance >= 0); // Make sure setupResources() has been called.
  378. for (const InstanceRange& range : fInstanceRanges) {
  379. SkASSERT(range.fEndInstanceIdx > baseInstance);
  380. const GrTextureProxy* atlas = range.fAtlasProxy;
  381. SkASSERT(atlas->isInstantiated());
  382. GrCCPathProcessor pathProc(
  383. range.fCoverageMode, atlas->peekTexture(), atlas->textureSwizzle(), atlas->origin(),
  384. fViewMatrixIfUsingLocalCoords);
  385. GrTextureProxy* atlasProxy = range.fAtlasProxy;
  386. fixedDynamicState.fPrimitiveProcessorTextures = &atlasProxy;
  387. pathProc.drawPaths(flushState, pipeline, &fixedDynamicState, *resources, baseInstance,
  388. range.fEndInstanceIdx, this->bounds());
  389. baseInstance = range.fEndInstanceIdx;
  390. }
  391. }