blob: 8bb507bd01ea5a0cc20e6a7d8e9dfa056cf8ada7 [file] [log] [blame]
Chris Dalton9ca27842018-01-18 12:24:50 -07001/*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GrCCPathParser.h"
9
10#include "GrCaps.h"
11#include "GrGpuCommandBuffer.h"
12#include "GrOnFlushResourceProvider.h"
13#include "GrOpFlushState.h"
14#include "SkMathPriv.h"
15#include "SkPath.h"
16#include "SkPathPriv.h"
17#include "SkPoint.h"
18#include "ccpr/GrCCGeometry.h"
Chris Dalton84403d72018-02-13 21:46:17 -050019#include <stdlib.h>
Chris Dalton9ca27842018-01-18 12:24:50 -070020
Chris Dalton84403d72018-02-13 21:46:17 -050021using TriPointInstance = GrCCCoverageProcessor::TriPointInstance;
22using QuadPointInstance = GrCCCoverageProcessor::QuadPointInstance;
Chris Dalton9ca27842018-01-18 12:24:50 -070023
24GrCCPathParser::GrCCPathParser(int maxTotalPaths, int maxPathPoints, int numSkPoints,
25 int numSkVerbs)
26 : fLocalDevPtsBuffer(maxPathPoints + 1) // Overallocate by one point to accomodate for
27 // overflow with Sk4f. (See parsePath.)
28 , fGeometry(numSkPoints, numSkVerbs)
29 , fPathsInfo(maxTotalPaths)
30 , fScissorSubBatches(maxTotalPaths)
31 , fTotalPrimitiveCounts{PrimitiveTallies(), PrimitiveTallies()} {
32 // Batches decide what to draw by looking where the previous one ended. Define initial batches
33 // that "end" at the beginning of the data. These will not be drawn, but will only be be read by
34 // the first actual batch.
35 fScissorSubBatches.push_back() = {PrimitiveTallies(), SkIRect::MakeEmpty()};
Chris Dalton84403d72018-02-13 21:46:17 -050036 fCoverageCountBatches.push_back() = {PrimitiveTallies(), fScissorSubBatches.count(),
37 PrimitiveTallies()};
Chris Dalton9ca27842018-01-18 12:24:50 -070038}
39
40void GrCCPathParser::parsePath(const SkMatrix& m, const SkPath& path, SkRect* devBounds,
41 SkRect* devBounds45) {
42 const SkPoint* pts = SkPathPriv::PointData(path);
43 int numPts = path.countPoints();
44 SkASSERT(numPts + 1 <= fLocalDevPtsBuffer.count());
45
46 if (!numPts) {
47 devBounds->setEmpty();
48 devBounds45->setEmpty();
49 this->parsePath(path, nullptr);
50 return;
51 }
52
53 // m45 transforms path points into "45 degree" device space. A bounding box in this space gives
54 // the circumscribing octagon's diagonals. We could use SK_ScalarRoot2Over2, but an orthonormal
55 // transform is not necessary as long as the shader uses the correct inverse.
56 SkMatrix m45;
57 m45.setSinCos(1, 1);
58 m45.preConcat(m);
59
60 // X,Y,T are two parallel view matrices that accumulate two bounding boxes as they map points:
61 // device-space bounds and "45 degree" device-space bounds (| 1 -1 | * devCoords).
62 // | 1 1 |
63 Sk4f X = Sk4f(m.getScaleX(), m.getSkewY(), m45.getScaleX(), m45.getSkewY());
64 Sk4f Y = Sk4f(m.getSkewX(), m.getScaleY(), m45.getSkewX(), m45.getScaleY());
65 Sk4f T = Sk4f(m.getTranslateX(), m.getTranslateY(), m45.getTranslateX(), m45.getTranslateY());
66
67 // Map the path's points to device space and accumulate bounding boxes.
68 Sk4f devPt = SkNx_fma(Y, Sk4f(pts[0].y()), T);
69 devPt = SkNx_fma(X, Sk4f(pts[0].x()), devPt);
70 Sk4f topLeft = devPt;
71 Sk4f bottomRight = devPt;
72
73 // Store all 4 values [dev.x, dev.y, dev45.x, dev45.y]. We are only interested in the first two,
74 // and will overwrite [dev45.x, dev45.y] with the next point. This is why the dst buffer must
75 // be at least one larger than the number of points.
76 devPt.store(&fLocalDevPtsBuffer[0]);
77
78 for (int i = 1; i < numPts; ++i) {
79 devPt = SkNx_fma(Y, Sk4f(pts[i].y()), T);
80 devPt = SkNx_fma(X, Sk4f(pts[i].x()), devPt);
81 topLeft = Sk4f::Min(topLeft, devPt);
82 bottomRight = Sk4f::Max(bottomRight, devPt);
83 devPt.store(&fLocalDevPtsBuffer[i]);
84 }
85
86 SkPoint topLeftPts[2], bottomRightPts[2];
87 topLeft.store(topLeftPts);
88 bottomRight.store(bottomRightPts);
89 devBounds->setLTRB(topLeftPts[0].x(), topLeftPts[0].y(), bottomRightPts[0].x(),
90 bottomRightPts[0].y());
91 devBounds45->setLTRB(topLeftPts[1].x(), topLeftPts[1].y(), bottomRightPts[1].x(),
92 bottomRightPts[1].y());
93
94 this->parsePath(path, fLocalDevPtsBuffer.get());
95}
96
97void GrCCPathParser::parseDeviceSpacePath(const SkPath& deviceSpacePath) {
98 this->parsePath(deviceSpacePath, SkPathPriv::PointData(deviceSpacePath));
99}
100
101void GrCCPathParser::parsePath(const SkPath& path, const SkPoint* deviceSpacePts) {
102 SkASSERT(!fInstanceBuffer); // Can't call after finalize().
103 SkASSERT(!fParsingPath); // Call saveParsedPath() or discardParsedPath() for the last one first.
104 SkDEBUGCODE(fParsingPath = true);
105 SkASSERT(path.isEmpty() || deviceSpacePts);
106
107 fCurrPathPointsIdx = fGeometry.points().count();
108 fCurrPathVerbsIdx = fGeometry.verbs().count();
109 fCurrPathPrimitiveCounts = PrimitiveTallies();
110
111 fGeometry.beginPath();
112
113 if (path.isEmpty()) {
114 return;
115 }
116
117 int ptsIdx = 0;
118 bool insideContour = false;
119
120 for (SkPath::Verb verb : SkPathPriv::Verbs(path)) {
121 switch (verb) {
122 case SkPath::kMove_Verb:
123 this->endContourIfNeeded(insideContour);
124 fGeometry.beginContour(deviceSpacePts[ptsIdx]);
125 ++ptsIdx;
126 insideContour = true;
127 continue;
128 case SkPath::kClose_Verb:
129 this->endContourIfNeeded(insideContour);
130 insideContour = false;
131 continue;
132 case SkPath::kLine_Verb:
133 fGeometry.lineTo(deviceSpacePts[ptsIdx]);
134 ++ptsIdx;
135 continue;
136 case SkPath::kQuad_Verb:
137 fGeometry.quadraticTo(deviceSpacePts[ptsIdx], deviceSpacePts[ptsIdx + 1]);
138 ptsIdx += 2;
139 continue;
140 case SkPath::kCubic_Verb:
141 fGeometry.cubicTo(deviceSpacePts[ptsIdx], deviceSpacePts[ptsIdx + 1],
142 deviceSpacePts[ptsIdx + 2]);
143 ptsIdx += 3;
144 continue;
145 case SkPath::kConic_Verb:
146 SK_ABORT("Conics are not supported.");
147 default:
148 SK_ABORT("Unexpected path verb.");
149 }
150 }
151
152 this->endContourIfNeeded(insideContour);
153}
154
155void GrCCPathParser::endContourIfNeeded(bool insideContour) {
156 if (insideContour) {
157 fCurrPathPrimitiveCounts += fGeometry.endContour();
158 }
159}
160
161void GrCCPathParser::saveParsedPath(ScissorMode scissorMode, const SkIRect& clippedDevIBounds,
162 int16_t atlasOffsetX, int16_t atlasOffsetY) {
163 SkASSERT(fParsingPath);
164
Chris Dalton84403d72018-02-13 21:46:17 -0500165 fPathsInfo.emplace_back(scissorMode, atlasOffsetX, atlasOffsetY);
166
167 // Tessellate fans from very large and/or simple paths, in order to reduce overdraw.
168 int numVerbs = fGeometry.verbs().count() - fCurrPathVerbsIdx - 1;
169 int64_t tessellationWork = (int64_t)numVerbs * (32 - SkCLZ(numVerbs)); // N log N.
170 int64_t fanningWork = (int64_t)clippedDevIBounds.height() * clippedDevIBounds.width();
171 if (tessellationWork * (50*50) + (100*100) < fanningWork) { // Don't tessellate under 100x100.
172 fCurrPathPrimitiveCounts.fTriangles =
173 fCurrPathPrimitiveCounts.fWoundTriangles = 0;
174
175 const SkTArray<GrCCGeometry::Verb, true>& verbs = fGeometry.verbs();
176 const SkTArray<SkPoint, true>& pts = fGeometry.points();
177 int ptsIdx = fCurrPathPointsIdx;
178
Chris Dalton45e46602018-02-15 12:27:29 -0700179 // Build an SkPath of the Redbook fan. We use "winding" fill type right now because we are
180 // producing a coverage count, and must fill in every region that has non-zero wind. The
181 // path processor will convert coverage count to the appropriate fill type later.
Chris Dalton84403d72018-02-13 21:46:17 -0500182 SkPath fan;
Chris Dalton45e46602018-02-15 12:27:29 -0700183 fan.setFillType(SkPath::kWinding_FillType);
Chris Dalton84403d72018-02-13 21:46:17 -0500184 SkASSERT(GrCCGeometry::Verb::kBeginPath == verbs[fCurrPathVerbsIdx]);
185 for (int i = fCurrPathVerbsIdx + 1; i < fGeometry.verbs().count(); ++i) {
186 switch (verbs[i]) {
187 case GrCCGeometry::Verb::kBeginPath:
188 SK_ABORT("Invalid GrCCGeometry");
189 continue;
190
191 case GrCCGeometry::Verb::kBeginContour:
192 fan.moveTo(pts[ptsIdx++]);
193 continue;
194
195 case GrCCGeometry::Verb::kLineTo:
196 fan.lineTo(pts[ptsIdx++]);
197 continue;
198
199 case GrCCGeometry::Verb::kMonotonicQuadraticTo:
200 fan.lineTo(pts[ptsIdx + 1]);
201 ptsIdx += 2;
202 continue;
203
204 case GrCCGeometry::Verb::kMonotonicCubicTo:
205 fan.lineTo(pts[ptsIdx + 2]);
206 ptsIdx += 3;
207 continue;
208
209 case GrCCGeometry::Verb::kEndClosedContour:
210 case GrCCGeometry::Verb::kEndOpenContour:
211 fan.close();
212 continue;
213 }
214 }
215 GrTessellator::WindingVertex* vertices = nullptr;
216 int count = GrTessellator::PathToVertices(fan, std::numeric_limits<float>::infinity(),
217 SkRect::Make(clippedDevIBounds), &vertices);
218 SkASSERT(0 == count % 3);
219 for (int i = 0; i < count; i += 3) {
Chris Dalton45e46602018-02-15 12:27:29 -0700220 int tessWinding = vertices[i].fWinding;
221 SkASSERT(tessWinding == vertices[i + 1].fWinding);
222 SkASSERT(tessWinding == vertices[i + 2].fWinding);
223
224 // Ensure this triangle's points actually wind in the same direction as tessWinding.
225 // CCPR shaders use the sign of wind to determine which direction to bloat, so even for
226 // "wound" triangles the winding sign and point ordering need to agree.
227 float ax = vertices[i].fPos.fX - vertices[i + 1].fPos.fX;
228 float ay = vertices[i].fPos.fY - vertices[i + 1].fPos.fY;
229 float bx = vertices[i].fPos.fX - vertices[i + 2].fPos.fX;
230 float by = vertices[i].fPos.fY - vertices[i + 2].fPos.fY;
231 float wind = ax*by - ay*bx;
232 if ((wind > 0) != (-tessWinding > 0)) { // Tessellator has opposite winding sense.
233 std::swap(vertices[i + 1].fPos, vertices[i + 2].fPos);
234 }
235
236 if (1 == abs(tessWinding)) {
Chris Dalton84403d72018-02-13 21:46:17 -0500237 ++fCurrPathPrimitiveCounts.fTriangles;
238 } else {
239 ++fCurrPathPrimitiveCounts.fWoundTriangles;
240 }
241 }
242
243 fPathsInfo.back().fFanTessellation.reset(vertices);
244 fPathsInfo.back().fFanTessellationCount = count;
245 }
246
Chris Dalton9ca27842018-01-18 12:24:50 -0700247 fTotalPrimitiveCounts[(int)scissorMode] += fCurrPathPrimitiveCounts;
248
249 if (ScissorMode::kScissored == scissorMode) {
250 fScissorSubBatches.push_back() = {fTotalPrimitiveCounts[(int)ScissorMode::kScissored],
251 clippedDevIBounds.makeOffset(atlasOffsetX, atlasOffsetY)};
252 }
253
254 SkDEBUGCODE(fParsingPath = false);
255}
256
257void GrCCPathParser::discardParsedPath() {
258 SkASSERT(fParsingPath);
259 fGeometry.resize_back(fCurrPathPointsIdx, fCurrPathVerbsIdx);
260 SkDEBUGCODE(fParsingPath = false);
261}
262
263GrCCPathParser::CoverageCountBatchID GrCCPathParser::closeCurrentBatch() {
264 SkASSERT(!fInstanceBuffer);
265 SkASSERT(!fCoverageCountBatches.empty());
Chris Dalton84403d72018-02-13 21:46:17 -0500266 const auto& lastBatch = fCoverageCountBatches.back();
267 const auto& lastScissorSubBatch = fScissorSubBatches[lastBatch.fEndScissorSubBatchIdx - 1];
Chris Dalton9ca27842018-01-18 12:24:50 -0700268
Chris Dalton84403d72018-02-13 21:46:17 -0500269 PrimitiveTallies batchTotalCounts = fTotalPrimitiveCounts[(int)ScissorMode::kNonScissored] -
270 lastBatch.fEndNonScissorIndices;
271 batchTotalCounts += fTotalPrimitiveCounts[(int)ScissorMode::kScissored] -
272 lastScissorSubBatch.fEndPrimitiveIndices;
Chris Dalton9ca27842018-01-18 12:24:50 -0700273
274 fCoverageCountBatches.push_back() = {
275 fTotalPrimitiveCounts[(int)ScissorMode::kNonScissored],
Chris Dalton84403d72018-02-13 21:46:17 -0500276 fScissorSubBatches.count(),
277 batchTotalCounts
Chris Dalton9ca27842018-01-18 12:24:50 -0700278 };
Chris Dalton84403d72018-02-13 21:46:17 -0500279
280 int maxMeshes = 1 + fScissorSubBatches.count() - lastBatch.fEndScissorSubBatchIdx;
281 fMaxMeshesPerDraw = SkTMax(fMaxMeshesPerDraw, maxMeshes);
282
Chris Dalton9ca27842018-01-18 12:24:50 -0700283 return fCoverageCountBatches.count() - 1;
284}
285
286// Emits a contour's triangle fan.
287//
288// Classic Redbook fanning would be the triangles: [0 1 2], [0 2 3], ..., [0 n-2 n-1].
289//
290// This function emits the triangle: [0 n/3 n*2/3], and then recurses on all three sides. The
291// advantage to this approach is that for a convex-ish contour, it generates larger triangles.
292// Classic fanning tends to generate long, skinny triangles, which are expensive to draw since they
293// have a longer perimeter to rasterize and antialias.
294//
295// The indices array indexes the fan's points (think: glDrawElements), and must have at least log3
296// elements past the end for this method to use as scratch space.
297//
298// Returns the next triangle instance after the final one emitted.
Chris Dalton84403d72018-02-13 21:46:17 -0500299static TriPointInstance* emit_recursive_fan(const SkTArray<SkPoint, true>& pts,
Chris Dalton9ca27842018-01-18 12:24:50 -0700300 SkTArray<int32_t, true>& indices, int firstIndex,
301 int indexCount, const Sk2f& atlasOffset,
Chris Dalton84403d72018-02-13 21:46:17 -0500302 TriPointInstance out[]) {
Chris Dalton9ca27842018-01-18 12:24:50 -0700303 if (indexCount < 3) {
304 return out;
305 }
306
307 int32_t oneThirdCount = indexCount / 3;
308 int32_t twoThirdsCount = (2 * indexCount) / 3;
309 out++->set(pts[indices[firstIndex]], pts[indices[firstIndex + oneThirdCount]],
310 pts[indices[firstIndex + twoThirdsCount]], atlasOffset);
311
312 out = emit_recursive_fan(pts, indices, firstIndex, oneThirdCount + 1, atlasOffset, out);
313 out = emit_recursive_fan(pts, indices, firstIndex + oneThirdCount,
314 twoThirdsCount - oneThirdCount + 1, atlasOffset, out);
315
316 int endIndex = firstIndex + indexCount;
317 int32_t oldValue = indices[endIndex];
318 indices[endIndex] = indices[firstIndex];
319 out = emit_recursive_fan(pts, indices, firstIndex + twoThirdsCount,
320 indexCount - twoThirdsCount + 1, atlasOffset, out);
321 indices[endIndex] = oldValue;
322
323 return out;
324}
325
Chris Dalton84403d72018-02-13 21:46:17 -0500326static void emit_tessellated_fan(const GrTessellator::WindingVertex* vertices, int numVertices,
327 const Sk2f& atlasOffset, TriPointInstance* triPointInstanceData,
328 QuadPointInstance* quadPointInstanceData,
329 GrCCGeometry::PrimitiveTallies* indices) {
330 for (int i = 0; i < numVertices; i += 3) {
331 if (1 == abs(vertices[i].fWinding)) {
332 triPointInstanceData[indices->fTriangles++].set(vertices[i].fPos, vertices[i + 1].fPos,
333 vertices[i + 2].fPos, atlasOffset);
334 } else {
335 quadPointInstanceData[indices->fWoundTriangles++].set(
336 vertices[i].fPos, vertices[i+1].fPos, vertices[i + 2].fPos, atlasOffset,
Chris Dalton45e46602018-02-15 12:27:29 -0700337 // Tessellator has opposite winding sense.
338 -static_cast<float>(vertices[i].fWinding));
Chris Dalton84403d72018-02-13 21:46:17 -0500339 }
340 }
341}
342
Chris Dalton9ca27842018-01-18 12:24:50 -0700343bool GrCCPathParser::finalize(GrOnFlushResourceProvider* onFlushRP) {
344 SkASSERT(!fParsingPath); // Call saveParsedPath() or discardParsedPath().
345 SkASSERT(fCoverageCountBatches.back().fEndNonScissorIndices == // Call closeCurrentBatch().
346 fTotalPrimitiveCounts[(int)ScissorMode::kNonScissored]);
347 SkASSERT(fCoverageCountBatches.back().fEndScissorSubBatchIdx == fScissorSubBatches.count());
348
349 // Here we build a single instance buffer to share with every internal batch.
350 //
351 // CCPR processs 3 different types of primitives: triangles, quadratics, cubics. Each primitive
352 // type is further divided into instances that require a scissor and those that don't. This
353 // leaves us with 3*2 = 6 independent instance arrays to build for the GPU.
354 //
355 // Rather than place each instance array in its own GPU buffer, we allocate a single
356 // megabuffer and lay them all out side-by-side. We can offset the "baseInstance" parameter in
357 // our draw calls to direct the GPU to the applicable elements within a given array.
358 //
359 // We already know how big to make each of the 6 arrays from fTotalPrimitiveCounts, so layout is
360 // straightforward. Start with triangles and quadratics. They both view the instance buffer as
Chris Dalton84403d72018-02-13 21:46:17 -0500361 // an array of TriPointInstance[], so we can begin at zero and lay them out one after the other.
Chris Dalton9ca27842018-01-18 12:24:50 -0700362 fBaseInstances[0].fTriangles = 0;
363 fBaseInstances[1].fTriangles = fBaseInstances[0].fTriangles +
364 fTotalPrimitiveCounts[0].fTriangles;
365 fBaseInstances[0].fQuadratics = fBaseInstances[1].fTriangles +
366 fTotalPrimitiveCounts[1].fTriangles;
367 fBaseInstances[1].fQuadratics = fBaseInstances[0].fQuadratics +
368 fTotalPrimitiveCounts[0].fQuadratics;
369 int triEndIdx = fBaseInstances[1].fQuadratics + fTotalPrimitiveCounts[1].fQuadratics;
370
Chris Dalton84403d72018-02-13 21:46:17 -0500371 // Wound triangles and cubics both view the same instance buffer as an array of
372 // QuadPointInstance[]. So, reinterpreting the instance data as QuadPointInstance[], we start
373 // them on the first index that will not overwrite previous TriPointInstance data.
374 int quadBaseIdx =
375 GR_CT_DIV_ROUND_UP(triEndIdx * sizeof(TriPointInstance), sizeof(QuadPointInstance));
376 fBaseInstances[0].fWoundTriangles = quadBaseIdx;
377 fBaseInstances[1].fWoundTriangles = fBaseInstances[0].fWoundTriangles +
378 fTotalPrimitiveCounts[0].fWoundTriangles;
379 fBaseInstances[0].fCubics = fBaseInstances[1].fWoundTriangles +
380 fTotalPrimitiveCounts[1].fWoundTriangles;
Chris Dalton9ca27842018-01-18 12:24:50 -0700381 fBaseInstances[1].fCubics = fBaseInstances[0].fCubics + fTotalPrimitiveCounts[0].fCubics;
Chris Dalton84403d72018-02-13 21:46:17 -0500382 int quadEndIdx = fBaseInstances[1].fCubics + fTotalPrimitiveCounts[1].fCubics;
Chris Dalton9ca27842018-01-18 12:24:50 -0700383
384 fInstanceBuffer = onFlushRP->makeBuffer(kVertex_GrBufferType,
Chris Dalton84403d72018-02-13 21:46:17 -0500385 quadEndIdx * sizeof(QuadPointInstance));
Chris Dalton9ca27842018-01-18 12:24:50 -0700386 if (!fInstanceBuffer) {
387 return false;
388 }
389
Chris Dalton84403d72018-02-13 21:46:17 -0500390 TriPointInstance* triPointInstanceData = static_cast<TriPointInstance*>(fInstanceBuffer->map());
391 QuadPointInstance* quadPointInstanceData =
392 reinterpret_cast<QuadPointInstance*>(triPointInstanceData);
393 SkASSERT(quadPointInstanceData);
Chris Dalton9ca27842018-01-18 12:24:50 -0700394
Chris Dalton84403d72018-02-13 21:46:17 -0500395 PathInfo* nextPathInfo = fPathsInfo.begin();
Chris Dalton9ca27842018-01-18 12:24:50 -0700396 float atlasOffsetX = 0.0, atlasOffsetY = 0.0;
397 Sk2f atlasOffset;
Chris Dalton9ca27842018-01-18 12:24:50 -0700398 PrimitiveTallies instanceIndices[2] = {fBaseInstances[0], fBaseInstances[1]};
399 PrimitiveTallies* currIndices = nullptr;
400 SkSTArray<256, int32_t, true> currFan;
Chris Dalton84403d72018-02-13 21:46:17 -0500401 bool currFanIsTessellated = false;
Chris Dalton9ca27842018-01-18 12:24:50 -0700402
403 const SkTArray<SkPoint, true>& pts = fGeometry.points();
Chris Dalton84403d72018-02-13 21:46:17 -0500404 int ptsIdx = -1;
Chris Dalton9ca27842018-01-18 12:24:50 -0700405
406 // Expand the ccpr verbs into GPU instance buffers.
407 for (GrCCGeometry::Verb verb : fGeometry.verbs()) {
408 switch (verb) {
409 case GrCCGeometry::Verb::kBeginPath:
410 SkASSERT(currFan.empty());
Chris Dalton84403d72018-02-13 21:46:17 -0500411 currIndices = &instanceIndices[(int)nextPathInfo->fScissorMode];
412 atlasOffsetX = static_cast<float>(nextPathInfo->fAtlasOffsetX);
413 atlasOffsetY = static_cast<float>(nextPathInfo->fAtlasOffsetY);
Chris Dalton9ca27842018-01-18 12:24:50 -0700414 atlasOffset = {atlasOffsetX, atlasOffsetY};
Chris Dalton84403d72018-02-13 21:46:17 -0500415 currFanIsTessellated = nextPathInfo->fFanTessellation.get();
416 if (currFanIsTessellated) {
417 emit_tessellated_fan(nextPathInfo->fFanTessellation.get(),
418 nextPathInfo->fFanTessellationCount, atlasOffset,
419 triPointInstanceData, quadPointInstanceData, currIndices);
420 }
421 ++nextPathInfo;
Chris Dalton9ca27842018-01-18 12:24:50 -0700422 continue;
423
424 case GrCCGeometry::Verb::kBeginContour:
425 SkASSERT(currFan.empty());
Chris Dalton84403d72018-02-13 21:46:17 -0500426 ++ptsIdx;
427 if (!currFanIsTessellated) {
428 currFan.push_back(ptsIdx);
429 }
Chris Dalton9ca27842018-01-18 12:24:50 -0700430 continue;
431
432 case GrCCGeometry::Verb::kLineTo:
Chris Dalton84403d72018-02-13 21:46:17 -0500433 ++ptsIdx;
434 if (!currFanIsTessellated) {
435 SkASSERT(!currFan.empty());
436 currFan.push_back(ptsIdx);
437 }
Chris Dalton9ca27842018-01-18 12:24:50 -0700438 continue;
439
440 case GrCCGeometry::Verb::kMonotonicQuadraticTo:
Chris Dalton84403d72018-02-13 21:46:17 -0500441 triPointInstanceData[currIndices->fQuadratics++].set(&pts[ptsIdx], atlasOffset);
442 ptsIdx += 2;
443 if (!currFanIsTessellated) {
444 SkASSERT(!currFan.empty());
445 currFan.push_back(ptsIdx);
446 }
Chris Dalton9ca27842018-01-18 12:24:50 -0700447 continue;
448
449 case GrCCGeometry::Verb::kMonotonicCubicTo:
Chris Dalton84403d72018-02-13 21:46:17 -0500450 quadPointInstanceData[currIndices->fCubics++].set(&pts[ptsIdx], atlasOffsetX,
451 atlasOffsetY);
452 ptsIdx += 3;
453 if (!currFanIsTessellated) {
454 SkASSERT(!currFan.empty());
455 currFan.push_back(ptsIdx);
456 }
Chris Dalton9ca27842018-01-18 12:24:50 -0700457 continue;
458
459 case GrCCGeometry::Verb::kEndClosedContour: // endPt == startPt.
Chris Dalton84403d72018-02-13 21:46:17 -0500460 if (!currFanIsTessellated) {
461 SkASSERT(!currFan.empty());
462 currFan.pop_back();
463 }
Chris Dalton9ca27842018-01-18 12:24:50 -0700464 // fallthru.
465 case GrCCGeometry::Verb::kEndOpenContour: // endPt != startPt.
Chris Dalton84403d72018-02-13 21:46:17 -0500466 SkASSERT(!currFanIsTessellated || currFan.empty());
467 if (!currFanIsTessellated && currFan.count() >= 3) {
Chris Dalton9ca27842018-01-18 12:24:50 -0700468 int fanSize = currFan.count();
469 // Reserve space for emit_recursive_fan. Technically this can grow to
470 // fanSize + log3(fanSize), but we approximate with log2.
471 currFan.push_back_n(SkNextLog2(fanSize));
Chris Dalton84403d72018-02-13 21:46:17 -0500472 SkDEBUGCODE(TriPointInstance* end =)
Chris Dalton9ca27842018-01-18 12:24:50 -0700473 emit_recursive_fan(pts, currFan, 0, fanSize, atlasOffset,
Chris Dalton84403d72018-02-13 21:46:17 -0500474 triPointInstanceData + currIndices->fTriangles);
Chris Dalton9ca27842018-01-18 12:24:50 -0700475 currIndices->fTriangles += fanSize - 2;
Chris Dalton84403d72018-02-13 21:46:17 -0500476 SkASSERT(triPointInstanceData + currIndices->fTriangles == end);
Chris Dalton9ca27842018-01-18 12:24:50 -0700477 }
478 currFan.reset();
479 continue;
480 }
481 }
482
483 fInstanceBuffer->unmap();
484
Chris Dalton84403d72018-02-13 21:46:17 -0500485 SkASSERT(nextPathInfo == fPathsInfo.end());
Chris Dalton9ca27842018-01-18 12:24:50 -0700486 SkASSERT(ptsIdx == pts.count() - 1);
487 SkASSERT(instanceIndices[0].fTriangles == fBaseInstances[1].fTriangles);
488 SkASSERT(instanceIndices[1].fTriangles == fBaseInstances[0].fQuadratics);
489 SkASSERT(instanceIndices[0].fQuadratics == fBaseInstances[1].fQuadratics);
490 SkASSERT(instanceIndices[1].fQuadratics == triEndIdx);
Chris Dalton84403d72018-02-13 21:46:17 -0500491 SkASSERT(instanceIndices[0].fWoundTriangles == fBaseInstances[1].fWoundTriangles);
492 SkASSERT(instanceIndices[1].fWoundTriangles == fBaseInstances[0].fCubics);
Chris Dalton9ca27842018-01-18 12:24:50 -0700493 SkASSERT(instanceIndices[0].fCubics == fBaseInstances[1].fCubics);
Chris Dalton84403d72018-02-13 21:46:17 -0500494 SkASSERT(instanceIndices[1].fCubics == quadEndIdx);
Chris Dalton9ca27842018-01-18 12:24:50 -0700495
496 fMeshesScratchBuffer.reserve(fMaxMeshesPerDraw);
497 fDynamicStatesScratchBuffer.reserve(fMaxMeshesPerDraw);
498
499 return true;
500}
501
502void GrCCPathParser::drawCoverageCount(GrOpFlushState* flushState, CoverageCountBatchID batchID,
503 const SkIRect& drawBounds) const {
504 using RenderPass = GrCCCoverageProcessor::RenderPass;
Chris Dalton84403d72018-02-13 21:46:17 -0500505 using WindMethod = GrCCCoverageProcessor::WindMethod;
Chris Dalton9ca27842018-01-18 12:24:50 -0700506
507 SkASSERT(fInstanceBuffer);
508
Chris Dalton84403d72018-02-13 21:46:17 -0500509 const PrimitiveTallies& batchTotalCounts = fCoverageCountBatches[batchID].fTotalPrimitiveCounts;
510
Chris Dalton9ca27842018-01-18 12:24:50 -0700511 GrPipeline pipeline(flushState->drawOpArgs().fProxy, GrPipeline::ScissorState::kEnabled,
512 SkBlendMode::kPlus);
513
Chris Dalton84403d72018-02-13 21:46:17 -0500514 if (batchTotalCounts.fTriangles) {
515 this->drawRenderPass(flushState, pipeline, batchID, RenderPass::kTriangleHulls,
516 WindMethod::kCrossProduct, &PrimitiveTallies::fTriangles, drawBounds);
517 this->drawRenderPass(flushState, pipeline, batchID, RenderPass::kTriangleEdges,
518 WindMethod::kCrossProduct, &PrimitiveTallies::fTriangles,
519 drawBounds); // Might get skipped.
520 this->drawRenderPass(flushState, pipeline, batchID, RenderPass::kTriangleCorners,
521 WindMethod::kCrossProduct, &PrimitiveTallies::fTriangles, drawBounds);
522 }
Chris Dalton9ca27842018-01-18 12:24:50 -0700523
Chris Dalton84403d72018-02-13 21:46:17 -0500524 if (batchTotalCounts.fWoundTriangles) {
525 this->drawRenderPass(flushState, pipeline, batchID, RenderPass::kTriangleHulls,
526 WindMethod::kInstanceData, &PrimitiveTallies::fWoundTriangles,
527 drawBounds);
528 this->drawRenderPass(flushState, pipeline, batchID, RenderPass::kTriangleEdges,
529 WindMethod::kInstanceData, &PrimitiveTallies::fWoundTriangles,
530 drawBounds); // Might get skipped.
531 this->drawRenderPass(flushState, pipeline, batchID, RenderPass::kTriangleCorners,
532 WindMethod::kInstanceData, &PrimitiveTallies::fWoundTriangles,
533 drawBounds);
534 }
Chris Dalton9ca27842018-01-18 12:24:50 -0700535
Chris Dalton84403d72018-02-13 21:46:17 -0500536 if (batchTotalCounts.fQuadratics) {
537 this->drawRenderPass(flushState, pipeline, batchID, RenderPass::kQuadraticHulls,
538 WindMethod::kCrossProduct, &PrimitiveTallies::fQuadratics, drawBounds);
539 this->drawRenderPass(flushState, pipeline, batchID, RenderPass::kQuadraticCorners,
540 WindMethod::kCrossProduct, &PrimitiveTallies::fQuadratics, drawBounds);
541 }
542
543 if (batchTotalCounts.fCubics) {
544 this->drawRenderPass(flushState, pipeline, batchID, RenderPass::kCubicHulls,
545 WindMethod::kCrossProduct, &PrimitiveTallies::fCubics, drawBounds);
546 this->drawRenderPass(flushState, pipeline, batchID, RenderPass::kCubicCorners,
547 WindMethod::kCrossProduct, &PrimitiveTallies::fCubics, drawBounds);
548 }
Chris Dalton9ca27842018-01-18 12:24:50 -0700549}
550
551void GrCCPathParser::drawRenderPass(GrOpFlushState* flushState, const GrPipeline& pipeline,
552 CoverageCountBatchID batchID,
553 GrCCCoverageProcessor::RenderPass renderPass,
Chris Dalton84403d72018-02-13 21:46:17 -0500554 GrCCCoverageProcessor::WindMethod windMethod,
Chris Dalton9ca27842018-01-18 12:24:50 -0700555 int PrimitiveTallies::*instanceType,
556 const SkIRect& drawBounds) const {
557 SkASSERT(pipeline.getScissorState().enabled());
558
Chris Dalton27059d32018-01-23 14:06:50 -0700559 if (!GrCCCoverageProcessor::DoesRenderPass(renderPass, flushState->caps())) {
Chris Dalton9ca27842018-01-18 12:24:50 -0700560 return;
561 }
562
563 // Don't call reset(), as that also resets the reserve count.
564 fMeshesScratchBuffer.pop_back_n(fMeshesScratchBuffer.count());
565 fDynamicStatesScratchBuffer.pop_back_n(fDynamicStatesScratchBuffer.count());
566
Chris Dalton84403d72018-02-13 21:46:17 -0500567 GrCCCoverageProcessor proc(flushState->resourceProvider(), renderPass, windMethod);
Chris Dalton9ca27842018-01-18 12:24:50 -0700568
569 SkASSERT(batchID > 0);
570 SkASSERT(batchID < fCoverageCountBatches.count());
571 const CoverageCountBatch& previousBatch = fCoverageCountBatches[batchID - 1];
572 const CoverageCountBatch& batch = fCoverageCountBatches[batchID];
Chris Dalton84403d72018-02-13 21:46:17 -0500573 SkDEBUGCODE(int totalInstanceCount = 0);
Chris Dalton9ca27842018-01-18 12:24:50 -0700574
575 if (int instanceCount = batch.fEndNonScissorIndices.*instanceType -
576 previousBatch.fEndNonScissorIndices.*instanceType) {
577 SkASSERT(instanceCount > 0);
578 int baseInstance = fBaseInstances[(int)ScissorMode::kNonScissored].*instanceType +
579 previousBatch.fEndNonScissorIndices.*instanceType;
580 proc.appendMesh(fInstanceBuffer.get(), instanceCount, baseInstance, &fMeshesScratchBuffer);
581 fDynamicStatesScratchBuffer.push_back().fScissorRect.setXYWH(0, 0, drawBounds.width(),
582 drawBounds.height());
Chris Dalton84403d72018-02-13 21:46:17 -0500583 SkDEBUGCODE(totalInstanceCount += instanceCount);
Chris Dalton9ca27842018-01-18 12:24:50 -0700584 }
585
586 SkASSERT(previousBatch.fEndScissorSubBatchIdx > 0);
587 SkASSERT(batch.fEndScissorSubBatchIdx <= fScissorSubBatches.count());
588 int baseScissorInstance = fBaseInstances[(int)ScissorMode::kScissored].*instanceType;
589 for (int i = previousBatch.fEndScissorSubBatchIdx; i < batch.fEndScissorSubBatchIdx; ++i) {
590 const ScissorSubBatch& previousSubBatch = fScissorSubBatches[i - 1];
591 const ScissorSubBatch& scissorSubBatch = fScissorSubBatches[i];
592 int startIndex = previousSubBatch.fEndPrimitiveIndices.*instanceType;
593 int instanceCount = scissorSubBatch.fEndPrimitiveIndices.*instanceType - startIndex;
594 if (!instanceCount) {
595 continue;
596 }
597 SkASSERT(instanceCount > 0);
598 proc.appendMesh(fInstanceBuffer.get(), instanceCount,
599 baseScissorInstance + startIndex, &fMeshesScratchBuffer);
600 fDynamicStatesScratchBuffer.push_back().fScissorRect = scissorSubBatch.fScissor;
Chris Dalton84403d72018-02-13 21:46:17 -0500601 SkDEBUGCODE(totalInstanceCount += instanceCount);
Chris Dalton9ca27842018-01-18 12:24:50 -0700602 }
603
604 SkASSERT(fMeshesScratchBuffer.count() == fDynamicStatesScratchBuffer.count());
605 SkASSERT(fMeshesScratchBuffer.count() <= fMaxMeshesPerDraw);
Chris Dalton84403d72018-02-13 21:46:17 -0500606 SkASSERT(totalInstanceCount == batch.fTotalPrimitiveCounts.*instanceType);
Chris Dalton9ca27842018-01-18 12:24:50 -0700607
608 if (!fMeshesScratchBuffer.empty()) {
609 SkASSERT(flushState->rtCommandBuffer());
610 flushState->rtCommandBuffer()->draw(pipeline, proc, fMeshesScratchBuffer.begin(),
611 fDynamicStatesScratchBuffer.begin(),
612 fMeshesScratchBuffer.count(), SkRect::Make(drawBounds));
613 }
614}