GrCCFiller.cpp 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583
  1. /*
  2. * Copyright 2017 Google Inc.
  3. *
  4. * Use of this source code is governed by a BSD-style license that can be
  5. * found in the LICENSE file.
  6. */
  7. #include "src/gpu/ccpr/GrCCFiller.h"
  8. #include "include/core/SkPath.h"
  9. #include "include/core/SkPoint.h"
  10. #include "src/core/SkMathPriv.h"
  11. #include "src/core/SkPathPriv.h"
  12. #include "src/gpu/GrCaps.h"
  13. #include "src/gpu/GrGpuCommandBuffer.h"
  14. #include "src/gpu/GrOnFlushResourceProvider.h"
  15. #include "src/gpu/GrOpFlushState.h"
  16. #include <stdlib.h>
  17. using TriPointInstance = GrCCCoverageProcessor::TriPointInstance;
  18. using QuadPointInstance = GrCCCoverageProcessor::QuadPointInstance;
  19. GrCCFiller::GrCCFiller(Algorithm algorithm, int numPaths, int numSkPoints, int numSkVerbs,
  20. int numConicWeights)
  21. : fAlgorithm(algorithm)
  22. , fGeometry(numSkPoints, numSkVerbs, numConicWeights)
  23. , fPathInfos(numPaths)
  24. , fScissorSubBatches(numPaths)
  25. , fTotalPrimitiveCounts{PrimitiveTallies(), PrimitiveTallies()} {
  26. // Batches decide what to draw by looking where the previous one ended. Define initial batches
  27. // that "end" at the beginning of the data. These will not be drawn, but will only be be read by
  28. // the first actual batch.
  29. fScissorSubBatches.push_back() = {PrimitiveTallies(), SkIRect::MakeEmpty()};
  30. fBatches.push_back() = {PrimitiveTallies(), fScissorSubBatches.count(), PrimitiveTallies()};
  31. }
  32. void GrCCFiller::parseDeviceSpaceFill(const SkPath& path, const SkPoint* deviceSpacePts,
  33. GrScissorTest scissorTest, const SkIRect& clippedDevIBounds,
  34. const SkIVector& devToAtlasOffset) {
  35. SkASSERT(!fInstanceBuffer); // Can't call after prepareToDraw().
  36. SkASSERT(!path.isEmpty());
  37. int currPathPointsIdx = fGeometry.points().count();
  38. int currPathVerbsIdx = fGeometry.verbs().count();
  39. PrimitiveTallies currPathPrimitiveCounts = PrimitiveTallies();
  40. fGeometry.beginPath();
  41. const float* conicWeights = SkPathPriv::ConicWeightData(path);
  42. int ptsIdx = 0;
  43. int conicWeightsIdx = 0;
  44. bool insideContour = false;
  45. for (SkPath::Verb verb : SkPathPriv::Verbs(path)) {
  46. switch (verb) {
  47. case SkPath::kMove_Verb:
  48. if (insideContour) {
  49. currPathPrimitiveCounts += fGeometry.endContour();
  50. }
  51. fGeometry.beginContour(deviceSpacePts[ptsIdx]);
  52. ++ptsIdx;
  53. insideContour = true;
  54. continue;
  55. case SkPath::kClose_Verb:
  56. if (insideContour) {
  57. currPathPrimitiveCounts += fGeometry.endContour();
  58. }
  59. insideContour = false;
  60. continue;
  61. case SkPath::kLine_Verb:
  62. fGeometry.lineTo(&deviceSpacePts[ptsIdx - 1]);
  63. ++ptsIdx;
  64. continue;
  65. case SkPath::kQuad_Verb:
  66. fGeometry.quadraticTo(&deviceSpacePts[ptsIdx - 1]);
  67. ptsIdx += 2;
  68. continue;
  69. case SkPath::kCubic_Verb:
  70. fGeometry.cubicTo(&deviceSpacePts[ptsIdx - 1]);
  71. ptsIdx += 3;
  72. continue;
  73. case SkPath::kConic_Verb:
  74. fGeometry.conicTo(&deviceSpacePts[ptsIdx - 1], conicWeights[conicWeightsIdx]);
  75. ptsIdx += 2;
  76. ++conicWeightsIdx;
  77. continue;
  78. default:
  79. SK_ABORT("Unexpected path verb.");
  80. }
  81. }
  82. SkASSERT(ptsIdx == path.countPoints());
  83. SkASSERT(conicWeightsIdx == SkPathPriv::ConicWeightCnt(path));
  84. if (insideContour) {
  85. currPathPrimitiveCounts += fGeometry.endContour();
  86. }
  87. fPathInfos.emplace_back(scissorTest, devToAtlasOffset);
  88. // Tessellate fans from very large and/or simple paths, in order to reduce overdraw.
  89. int numVerbs = fGeometry.verbs().count() - currPathVerbsIdx - 1;
  90. int64_t tessellationWork = (int64_t)numVerbs * (32 - SkCLZ(numVerbs)); // N log N.
  91. int64_t fanningWork = (int64_t)clippedDevIBounds.height() * clippedDevIBounds.width();
  92. if (tessellationWork * (50*50) + (100*100) < fanningWork) { // Don't tessellate under 100x100.
  93. fPathInfos.back().tessellateFan(
  94. fAlgorithm, path, fGeometry, currPathVerbsIdx, currPathPointsIdx, clippedDevIBounds,
  95. &currPathPrimitiveCounts);
  96. }
  97. fTotalPrimitiveCounts[(int)scissorTest] += currPathPrimitiveCounts;
  98. if (GrScissorTest::kEnabled == scissorTest) {
  99. fScissorSubBatches.push_back() = {fTotalPrimitiveCounts[(int)GrScissorTest::kEnabled],
  100. clippedDevIBounds.makeOffset(devToAtlasOffset.fX,
  101. devToAtlasOffset.fY)};
  102. }
  103. }
  104. void GrCCFiller::PathInfo::tessellateFan(
  105. Algorithm algorithm, const SkPath& originalPath, const GrCCFillGeometry& geometry,
  106. int verbsIdx, int ptsIdx, const SkIRect& clippedDevIBounds,
  107. PrimitiveTallies* newTriangleCounts) {
  108. using Verb = GrCCFillGeometry::Verb;
  109. SkASSERT(-1 == fFanTessellationCount);
  110. SkASSERT(!fFanTessellation);
  111. const SkTArray<Verb, true>& verbs = geometry.verbs();
  112. const SkTArray<SkPoint, true>& pts = geometry.points();
  113. newTriangleCounts->fTriangles =
  114. newTriangleCounts->fWeightedTriangles = 0;
  115. // Build an SkPath of the Redbook fan.
  116. SkPath fan;
  117. if (Algorithm::kCoverageCount == algorithm) {
  118. // We use "winding" fill type right now because we are producing a coverage count, and must
  119. // fill in every region that has non-zero wind. The path processor will convert coverage
  120. // count to the appropriate fill type later.
  121. fan.setFillType(SkPath::kWinding_FillType);
  122. } else {
  123. // When counting winding numbers in the stencil buffer, it works to just tessellate the
  124. // Redbook fan with the same fill type as the path.
  125. fan.setFillType(originalPath.getFillType());
  126. }
  127. SkASSERT(Verb::kBeginPath == verbs[verbsIdx]);
  128. for (int i = verbsIdx + 1; i < verbs.count(); ++i) {
  129. switch (verbs[i]) {
  130. case Verb::kBeginPath:
  131. SK_ABORT("Invalid GrCCFillGeometry");
  132. continue;
  133. case Verb::kBeginContour:
  134. fan.moveTo(pts[ptsIdx++]);
  135. continue;
  136. case Verb::kLineTo:
  137. fan.lineTo(pts[ptsIdx++]);
  138. continue;
  139. case Verb::kMonotonicQuadraticTo:
  140. case Verb::kMonotonicConicTo:
  141. fan.lineTo(pts[ptsIdx + 1]);
  142. ptsIdx += 2;
  143. continue;
  144. case Verb::kMonotonicCubicTo:
  145. fan.lineTo(pts[ptsIdx + 2]);
  146. ptsIdx += 3;
  147. continue;
  148. case Verb::kEndClosedContour:
  149. case Verb::kEndOpenContour:
  150. fan.close();
  151. continue;
  152. }
  153. }
  154. GrTessellator::WindingVertex* vertices = nullptr;
  155. fFanTessellationCount =
  156. GrTessellator::PathToVertices(fan, std::numeric_limits<float>::infinity(),
  157. SkRect::Make(clippedDevIBounds), &vertices);
  158. if (fFanTessellationCount <= 0) {
  159. SkASSERT(0 == fFanTessellationCount);
  160. SkASSERT(nullptr == vertices);
  161. return;
  162. }
  163. SkASSERT(0 == fFanTessellationCount % 3);
  164. for (int i = 0; i < fFanTessellationCount; i += 3) {
  165. int tessWinding = vertices[i].fWinding;
  166. SkASSERT(tessWinding == vertices[i + 1].fWinding);
  167. SkASSERT(tessWinding == vertices[i + 2].fWinding);
  168. // Ensure this triangle's points actually wind in the same direction as tessWinding.
  169. // CCPR shaders use the sign of wind to determine which direction to bloat, so even for
  170. // "wound" triangles the winding sign and point ordering need to agree.
  171. float ax = vertices[i].fPos.fX - vertices[i + 1].fPos.fX;
  172. float ay = vertices[i].fPos.fY - vertices[i + 1].fPos.fY;
  173. float bx = vertices[i].fPos.fX - vertices[i + 2].fPos.fX;
  174. float by = vertices[i].fPos.fY - vertices[i + 2].fPos.fY;
  175. float wind = ax*by - ay*bx;
  176. if ((wind > 0) != (-tessWinding > 0)) { // Tessellator has opposite winding sense.
  177. std::swap(vertices[i + 1].fPos, vertices[i + 2].fPos);
  178. }
  179. int weight = abs(tessWinding);
  180. SkASSERT(SkPath::kEvenOdd_FillType != fan.getFillType() || weight == 1);
  181. if (weight > 1 && Algorithm::kCoverageCount == algorithm) {
  182. ++newTriangleCounts->fWeightedTriangles;
  183. } else {
  184. newTriangleCounts->fTriangles += weight;
  185. }
  186. }
  187. fFanTessellation.reset(vertices);
  188. }
  189. GrCCFiller::BatchID GrCCFiller::closeCurrentBatch() {
  190. SkASSERT(!fInstanceBuffer);
  191. SkASSERT(!fBatches.empty());
  192. const auto& lastBatch = fBatches.back();
  193. int maxMeshes = 1 + fScissorSubBatches.count() - lastBatch.fEndScissorSubBatchIdx;
  194. fMaxMeshesPerDraw = SkTMax(fMaxMeshesPerDraw, maxMeshes);
  195. const auto& lastScissorSubBatch = fScissorSubBatches[lastBatch.fEndScissorSubBatchIdx - 1];
  196. PrimitiveTallies batchTotalCounts = fTotalPrimitiveCounts[(int)GrScissorTest::kDisabled] -
  197. lastBatch.fEndNonScissorIndices;
  198. batchTotalCounts += fTotalPrimitiveCounts[(int)GrScissorTest::kEnabled] -
  199. lastScissorSubBatch.fEndPrimitiveIndices;
  200. // This will invalidate lastBatch.
  201. fBatches.push_back() = {
  202. fTotalPrimitiveCounts[(int)GrScissorTest::kDisabled],
  203. fScissorSubBatches.count(),
  204. batchTotalCounts
  205. };
  206. return fBatches.count() - 1;
  207. }
  208. // Emits a contour's triangle fan.
  209. //
  210. // Classic Redbook fanning would be the triangles: [0 1 2], [0 2 3], ..., [0 n-2 n-1].
  211. //
  212. // This function emits the triangle: [0 n/3 n*2/3], and then recurses on all three sides. The
  213. // advantage to this approach is that for a convex-ish contour, it generates larger triangles.
  214. // Classic fanning tends to generate long, skinny triangles, which are expensive to draw since they
  215. // have a longer perimeter to rasterize and antialias.
  216. //
  217. // The indices array indexes the fan's points (think: glDrawElements), and must have at least log3
  218. // elements past the end for this method to use as scratch space.
  219. //
  220. // Returns the next triangle instance after the final one emitted.
  221. static TriPointInstance* emit_recursive_fan(
  222. const SkTArray<SkPoint, true>& pts, SkTArray<int32_t, true>& indices, int firstIndex,
  223. int indexCount, const Sk2f& devToAtlasOffset, TriPointInstance::Ordering ordering,
  224. TriPointInstance out[]) {
  225. if (indexCount < 3) {
  226. return out;
  227. }
  228. int32_t oneThirdCount = indexCount / 3;
  229. int32_t twoThirdsCount = (2 * indexCount) / 3;
  230. out++->set(pts[indices[firstIndex]], pts[indices[firstIndex + oneThirdCount]],
  231. pts[indices[firstIndex + twoThirdsCount]], devToAtlasOffset, ordering);
  232. out = emit_recursive_fan(
  233. pts, indices, firstIndex, oneThirdCount + 1, devToAtlasOffset, ordering, out);
  234. out = emit_recursive_fan(
  235. pts, indices, firstIndex + oneThirdCount, twoThirdsCount - oneThirdCount + 1,
  236. devToAtlasOffset, ordering, out);
  237. int endIndex = firstIndex + indexCount;
  238. int32_t oldValue = indices[endIndex];
  239. indices[endIndex] = indices[firstIndex];
  240. out = emit_recursive_fan(
  241. pts, indices, firstIndex + twoThirdsCount, indexCount - twoThirdsCount + 1,
  242. devToAtlasOffset, ordering, out);
  243. indices[endIndex] = oldValue;
  244. return out;
  245. }
  246. void GrCCFiller::emitTessellatedFan(
  247. const GrTessellator::WindingVertex* vertices, int numVertices, const Sk2f& devToAtlasOffset,
  248. TriPointInstance::Ordering ordering, TriPointInstance* triPointInstanceData,
  249. QuadPointInstance* quadPointInstanceData, GrCCFillGeometry::PrimitiveTallies* indices) {
  250. for (int i = 0; i < numVertices; i += 3) {
  251. int weight = abs(vertices[i].fWinding);
  252. SkASSERT(weight >= 1);
  253. if (weight > 1 && Algorithm::kStencilWindingCount != fAlgorithm) {
  254. quadPointInstanceData[indices->fWeightedTriangles++].setW(
  255. vertices[i].fPos, vertices[i+1].fPos, vertices[i + 2].fPos, devToAtlasOffset,
  256. static_cast<float>(abs(vertices[i].fWinding)));
  257. } else for (int j = 0; j < weight; ++j) {
  258. // Unfortunately, there is not a way to increment stencil values by an amount larger
  259. // than 1. Instead we draw the triangle 'weight' times.
  260. triPointInstanceData[indices->fTriangles++].set(
  261. vertices[i].fPos, vertices[i + 1].fPos, vertices[i + 2].fPos, devToAtlasOffset,
  262. ordering);
  263. }
  264. }
  265. }
  266. bool GrCCFiller::prepareToDraw(GrOnFlushResourceProvider* onFlushRP) {
  267. using Verb = GrCCFillGeometry::Verb;
  268. SkASSERT(!fInstanceBuffer);
  269. SkASSERT(fBatches.back().fEndNonScissorIndices == // Call closeCurrentBatch().
  270. fTotalPrimitiveCounts[(int)GrScissorTest::kDisabled]);
  271. SkASSERT(fBatches.back().fEndScissorSubBatchIdx == fScissorSubBatches.count());
  272. auto triangleOrdering = (Algorithm::kCoverageCount == fAlgorithm)
  273. ? TriPointInstance::Ordering::kXYTransposed
  274. : TriPointInstance::Ordering::kXYInterleaved;
  275. // Here we build a single instance buffer to share with every internal batch.
  276. //
  277. // CCPR processs 3 different types of primitives: triangles, quadratics, cubics. Each primitive
  278. // type is further divided into instances that require a scissor and those that don't. This
  279. // leaves us with 3*2 = 6 independent instance arrays to build for the GPU.
  280. //
  281. // Rather than place each instance array in its own GPU buffer, we allocate a single
  282. // megabuffer and lay them all out side-by-side. We can offset the "baseInstance" parameter in
  283. // our draw calls to direct the GPU to the applicable elements within a given array.
  284. //
  285. // We already know how big to make each of the 6 arrays from fTotalPrimitiveCounts, so layout is
  286. // straightforward. Start with triangles and quadratics. They both view the instance buffer as
  287. // an array of TriPointInstance[], so we can begin at zero and lay them out one after the other.
  288. fBaseInstances[0].fTriangles = 0;
  289. fBaseInstances[1].fTriangles = fBaseInstances[0].fTriangles +
  290. fTotalPrimitiveCounts[0].fTriangles;
  291. fBaseInstances[0].fQuadratics = fBaseInstances[1].fTriangles +
  292. fTotalPrimitiveCounts[1].fTriangles;
  293. fBaseInstances[1].fQuadratics = fBaseInstances[0].fQuadratics +
  294. fTotalPrimitiveCounts[0].fQuadratics;
  295. int triEndIdx = fBaseInstances[1].fQuadratics + fTotalPrimitiveCounts[1].fQuadratics;
  296. // Wound triangles and cubics both view the same instance buffer as an array of
  297. // QuadPointInstance[]. So, reinterpreting the instance data as QuadPointInstance[], we start
  298. // them on the first index that will not overwrite previous TriPointInstance data.
  299. int quadBaseIdx =
  300. GrSizeDivRoundUp(triEndIdx * sizeof(TriPointInstance), sizeof(QuadPointInstance));
  301. fBaseInstances[0].fWeightedTriangles = quadBaseIdx;
  302. fBaseInstances[1].fWeightedTriangles = fBaseInstances[0].fWeightedTriangles +
  303. fTotalPrimitiveCounts[0].fWeightedTriangles;
  304. fBaseInstances[0].fCubics = fBaseInstances[1].fWeightedTriangles +
  305. fTotalPrimitiveCounts[1].fWeightedTriangles;
  306. fBaseInstances[1].fCubics = fBaseInstances[0].fCubics + fTotalPrimitiveCounts[0].fCubics;
  307. fBaseInstances[0].fConics = fBaseInstances[1].fCubics + fTotalPrimitiveCounts[1].fCubics;
  308. fBaseInstances[1].fConics = fBaseInstances[0].fConics + fTotalPrimitiveCounts[0].fConics;
  309. int quadEndIdx = fBaseInstances[1].fConics + fTotalPrimitiveCounts[1].fConics;
  310. fInstanceBuffer =
  311. onFlushRP->makeBuffer(GrGpuBufferType::kVertex, quadEndIdx * sizeof(QuadPointInstance));
  312. if (!fInstanceBuffer) {
  313. SkDebugf("WARNING: failed to allocate CCPR fill instance buffer.\n");
  314. return false;
  315. }
  316. TriPointInstance* triPointInstanceData = static_cast<TriPointInstance*>(fInstanceBuffer->map());
  317. QuadPointInstance* quadPointInstanceData =
  318. reinterpret_cast<QuadPointInstance*>(triPointInstanceData);
  319. SkASSERT(quadPointInstanceData);
  320. PathInfo* nextPathInfo = fPathInfos.begin();
  321. Sk2f devToAtlasOffset;
  322. PrimitiveTallies instanceIndices[2] = {fBaseInstances[0], fBaseInstances[1]};
  323. PrimitiveTallies* currIndices = nullptr;
  324. SkSTArray<256, int32_t, true> currFan;
  325. bool currFanIsTessellated = false;
  326. const SkTArray<SkPoint, true>& pts = fGeometry.points();
  327. int ptsIdx = -1;
  328. int nextConicWeightIdx = 0;
  329. // Expand the ccpr verbs into GPU instance buffers.
  330. for (Verb verb : fGeometry.verbs()) {
  331. switch (verb) {
  332. case Verb::kBeginPath:
  333. SkASSERT(currFan.empty());
  334. currIndices = &instanceIndices[(int)nextPathInfo->scissorTest()];
  335. devToAtlasOffset = Sk2f(static_cast<float>(nextPathInfo->devToAtlasOffset().fX),
  336. static_cast<float>(nextPathInfo->devToAtlasOffset().fY));
  337. currFanIsTessellated = nextPathInfo->hasFanTessellation();
  338. if (currFanIsTessellated) {
  339. this->emitTessellatedFan(
  340. nextPathInfo->fanTessellation(), nextPathInfo->fanTessellationCount(),
  341. devToAtlasOffset, triangleOrdering, triPointInstanceData,
  342. quadPointInstanceData, currIndices);
  343. }
  344. ++nextPathInfo;
  345. continue;
  346. case Verb::kBeginContour:
  347. SkASSERT(currFan.empty());
  348. ++ptsIdx;
  349. if (!currFanIsTessellated) {
  350. currFan.push_back(ptsIdx);
  351. }
  352. continue;
  353. case Verb::kLineTo:
  354. ++ptsIdx;
  355. if (!currFanIsTessellated) {
  356. SkASSERT(!currFan.empty());
  357. currFan.push_back(ptsIdx);
  358. }
  359. continue;
  360. case Verb::kMonotonicQuadraticTo:
  361. triPointInstanceData[currIndices->fQuadratics++].set(
  362. &pts[ptsIdx], devToAtlasOffset, TriPointInstance::Ordering::kXYTransposed);
  363. ptsIdx += 2;
  364. if (!currFanIsTessellated) {
  365. SkASSERT(!currFan.empty());
  366. currFan.push_back(ptsIdx);
  367. }
  368. continue;
  369. case Verb::kMonotonicCubicTo:
  370. quadPointInstanceData[currIndices->fCubics++].set(
  371. &pts[ptsIdx], devToAtlasOffset[0], devToAtlasOffset[1]);
  372. ptsIdx += 3;
  373. if (!currFanIsTessellated) {
  374. SkASSERT(!currFan.empty());
  375. currFan.push_back(ptsIdx);
  376. }
  377. continue;
  378. case Verb::kMonotonicConicTo:
  379. quadPointInstanceData[currIndices->fConics++].setW(
  380. &pts[ptsIdx], devToAtlasOffset,
  381. fGeometry.getConicWeight(nextConicWeightIdx));
  382. ptsIdx += 2;
  383. ++nextConicWeightIdx;
  384. if (!currFanIsTessellated) {
  385. SkASSERT(!currFan.empty());
  386. currFan.push_back(ptsIdx);
  387. }
  388. continue;
  389. case Verb::kEndClosedContour: // endPt == startPt.
  390. if (!currFanIsTessellated) {
  391. SkASSERT(!currFan.empty());
  392. currFan.pop_back();
  393. }
  394. // fallthru.
  395. case Verb::kEndOpenContour: // endPt != startPt.
  396. SkASSERT(!currFanIsTessellated || currFan.empty());
  397. if (!currFanIsTessellated && currFan.count() >= 3) {
  398. int fanSize = currFan.count();
  399. // Reserve space for emit_recursive_fan. Technically this can grow to
  400. // fanSize + log3(fanSize), but we approximate with log2.
  401. currFan.push_back_n(SkNextLog2(fanSize));
  402. SkDEBUGCODE(TriPointInstance* end =) emit_recursive_fan(
  403. pts, currFan, 0, fanSize, devToAtlasOffset, triangleOrdering,
  404. triPointInstanceData + currIndices->fTriangles);
  405. currIndices->fTriangles += fanSize - 2;
  406. SkASSERT(triPointInstanceData + currIndices->fTriangles == end);
  407. }
  408. currFan.reset();
  409. continue;
  410. }
  411. }
  412. fInstanceBuffer->unmap();
  413. SkASSERT(nextPathInfo == fPathInfos.end());
  414. SkASSERT(ptsIdx == pts.count() - 1);
  415. SkASSERT(instanceIndices[0].fTriangles == fBaseInstances[1].fTriangles);
  416. SkASSERT(instanceIndices[1].fTriangles == fBaseInstances[0].fQuadratics);
  417. SkASSERT(instanceIndices[0].fQuadratics == fBaseInstances[1].fQuadratics);
  418. SkASSERT(instanceIndices[1].fQuadratics == triEndIdx);
  419. SkASSERT(instanceIndices[0].fWeightedTriangles == fBaseInstances[1].fWeightedTriangles);
  420. SkASSERT(instanceIndices[1].fWeightedTriangles == fBaseInstances[0].fCubics);
  421. SkASSERT(instanceIndices[0].fCubics == fBaseInstances[1].fCubics);
  422. SkASSERT(instanceIndices[1].fCubics == fBaseInstances[0].fConics);
  423. SkASSERT(instanceIndices[0].fConics == fBaseInstances[1].fConics);
  424. SkASSERT(instanceIndices[1].fConics == quadEndIdx);
  425. fMeshesScratchBuffer.reserve(fMaxMeshesPerDraw);
  426. fScissorRectScratchBuffer.reserve(fMaxMeshesPerDraw);
  427. return true;
  428. }
  429. void GrCCFiller::drawFills(
  430. GrOpFlushState* flushState, GrCCCoverageProcessor* proc, const GrPipeline& pipeline,
  431. BatchID batchID, const SkIRect& drawBounds) const {
  432. using PrimitiveType = GrCCCoverageProcessor::PrimitiveType;
  433. SkASSERT(fInstanceBuffer);
  434. GrResourceProvider* rp = flushState->resourceProvider();
  435. const PrimitiveTallies& batchTotalCounts = fBatches[batchID].fTotalPrimitiveCounts;
  436. if (batchTotalCounts.fTriangles) {
  437. proc->reset(PrimitiveType::kTriangles, rp);
  438. this->drawPrimitives(
  439. flushState, *proc, pipeline, batchID, &PrimitiveTallies::fTriangles, drawBounds);
  440. }
  441. if (batchTotalCounts.fWeightedTriangles) {
  442. SkASSERT(Algorithm::kStencilWindingCount != fAlgorithm);
  443. proc->reset(PrimitiveType::kWeightedTriangles, rp);
  444. this->drawPrimitives(
  445. flushState, *proc, pipeline, batchID, &PrimitiveTallies::fWeightedTriangles,
  446. drawBounds);
  447. }
  448. if (batchTotalCounts.fQuadratics) {
  449. proc->reset(PrimitiveType::kQuadratics, rp);
  450. this->drawPrimitives(
  451. flushState, *proc, pipeline, batchID, &PrimitiveTallies::fQuadratics, drawBounds);
  452. }
  453. if (batchTotalCounts.fCubics) {
  454. proc->reset(PrimitiveType::kCubics, rp);
  455. this->drawPrimitives(
  456. flushState, *proc, pipeline, batchID, &PrimitiveTallies::fCubics, drawBounds);
  457. }
  458. if (batchTotalCounts.fConics) {
  459. proc->reset(PrimitiveType::kConics, rp);
  460. this->drawPrimitives(
  461. flushState, *proc, pipeline, batchID, &PrimitiveTallies::fConics, drawBounds);
  462. }
  463. }
  464. void GrCCFiller::drawPrimitives(
  465. GrOpFlushState* flushState, const GrCCCoverageProcessor& proc, const GrPipeline& pipeline,
  466. BatchID batchID, int PrimitiveTallies::*instanceType, const SkIRect& drawBounds) const {
  467. SkASSERT(pipeline.isScissorEnabled());
  468. // Don't call reset(), as that also resets the reserve count.
  469. fMeshesScratchBuffer.pop_back_n(fMeshesScratchBuffer.count());
  470. fScissorRectScratchBuffer.pop_back_n(fScissorRectScratchBuffer.count());
  471. SkASSERT(batchID > 0);
  472. SkASSERT(batchID < fBatches.count());
  473. const Batch& previousBatch = fBatches[batchID - 1];
  474. const Batch& batch = fBatches[batchID];
  475. SkDEBUGCODE(int totalInstanceCount = 0);
  476. if (int instanceCount = batch.fEndNonScissorIndices.*instanceType -
  477. previousBatch.fEndNonScissorIndices.*instanceType) {
  478. SkASSERT(instanceCount > 0);
  479. int baseInstance = fBaseInstances[(int)GrScissorTest::kDisabled].*instanceType +
  480. previousBatch.fEndNonScissorIndices.*instanceType;
  481. proc.appendMesh(fInstanceBuffer, instanceCount, baseInstance, &fMeshesScratchBuffer);
  482. fScissorRectScratchBuffer.push_back().setXYWH(0, 0, drawBounds.width(),
  483. drawBounds.height());
  484. SkDEBUGCODE(totalInstanceCount += instanceCount);
  485. }
  486. SkASSERT(previousBatch.fEndScissorSubBatchIdx > 0);
  487. SkASSERT(batch.fEndScissorSubBatchIdx <= fScissorSubBatches.count());
  488. int baseScissorInstance = fBaseInstances[(int)GrScissorTest::kEnabled].*instanceType;
  489. for (int i = previousBatch.fEndScissorSubBatchIdx; i < batch.fEndScissorSubBatchIdx; ++i) {
  490. const ScissorSubBatch& previousSubBatch = fScissorSubBatches[i - 1];
  491. const ScissorSubBatch& scissorSubBatch = fScissorSubBatches[i];
  492. int startIndex = previousSubBatch.fEndPrimitiveIndices.*instanceType;
  493. int instanceCount = scissorSubBatch.fEndPrimitiveIndices.*instanceType - startIndex;
  494. if (!instanceCount) {
  495. continue;
  496. }
  497. SkASSERT(instanceCount > 0);
  498. proc.appendMesh(fInstanceBuffer, instanceCount, baseScissorInstance + startIndex,
  499. &fMeshesScratchBuffer);
  500. fScissorRectScratchBuffer.push_back() = scissorSubBatch.fScissor;
  501. SkDEBUGCODE(totalInstanceCount += instanceCount);
  502. }
  503. SkASSERT(fMeshesScratchBuffer.count() == fScissorRectScratchBuffer.count());
  504. SkASSERT(fMeshesScratchBuffer.count() <= fMaxMeshesPerDraw);
  505. SkASSERT(totalInstanceCount == batch.fTotalPrimitiveCounts.*instanceType);
  506. if (!fMeshesScratchBuffer.empty()) {
  507. proc.draw(flushState, pipeline, fScissorRectScratchBuffer.begin(),
  508. fMeshesScratchBuffer.begin(), fMeshesScratchBuffer.count(),
  509. SkRect::Make(drawBounds));
  510. }
  511. }