Chris Dalton | c1e5963 | 2017-09-05 00:30:07 -0600 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2017 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | #include "GrCCPRCoverageOp.h" |
| 9 | |
| 10 | #include "GrGpuCommandBuffer.h" |
| 11 | #include "GrOnFlushResourceProvider.h" |
| 12 | #include "GrOpFlushState.h" |
| 13 | #include "SkMathPriv.h" |
| 14 | #include "SkPath.h" |
| 15 | #include "SkPathPriv.h" |
| 16 | #include "SkPoint.h" |
| 17 | #include "SkNx.h" |
| 18 | #include "ccpr/GrCCPRGeometry.h" |
| 19 | |
| 20 | using TriangleInstance = GrCCPRCoverageProcessor::TriangleInstance; |
| 21 | using CurveInstance = GrCCPRCoverageProcessor::CurveInstance; |
| 22 | |
| 23 | /** |
| 24 | * This is a view matrix that accumulates two bounding boxes as it maps points: device-space bounds |
| 25 | * and "45 degree" device-space bounds (| 1 -1 | * devCoords). |
| 26 | * | 1 1 | |
| 27 | */ |
| 28 | class AccumulatingViewMatrix { |
| 29 | public: |
| 30 | AccumulatingViewMatrix(const SkMatrix& m, const SkPoint& initialPoint); |
| 31 | |
| 32 | SkPoint transform(const SkPoint& pt); |
| 33 | void getAccumulatedBounds(SkRect* devBounds, SkRect* devBounds45) const; |
| 34 | |
| 35 | private: |
| 36 | Sk4f fX; |
| 37 | Sk4f fY; |
| 38 | Sk4f fT; |
| 39 | |
| 40 | Sk4f fTopLeft; |
| 41 | Sk4f fBottomRight; |
| 42 | }; |
| 43 | |
| 44 | inline AccumulatingViewMatrix::AccumulatingViewMatrix(const SkMatrix& m, |
| 45 | const SkPoint& initialPoint) { |
| 46 | // m45 transforms into 45 degree space in order to find the octagon's diagonals. We could |
| 47 | // use SK_ScalarRoot2Over2 if we wanted an orthonormal transform, but this is irrelevant as |
| 48 | // long as the shader uses the correct inverse when coming back to device space. |
| 49 | SkMatrix m45; |
| 50 | m45.setSinCos(1, 1); |
| 51 | m45.preConcat(m); |
| 52 | |
| 53 | fX = Sk4f(m.getScaleX(), m.getSkewY(), m45.getScaleX(), m45.getSkewY()); |
| 54 | fY = Sk4f(m.getSkewX(), m.getScaleY(), m45.getSkewX(), m45.getScaleY()); |
| 55 | fT = Sk4f(m.getTranslateX(), m.getTranslateY(), m45.getTranslateX(), m45.getTranslateY()); |
| 56 | |
| 57 | Sk4f transformed = SkNx_fma(fY, Sk4f(initialPoint.y()), fT); |
| 58 | transformed = SkNx_fma(fX, Sk4f(initialPoint.x()), transformed); |
| 59 | fTopLeft = fBottomRight = transformed; |
| 60 | } |
| 61 | |
| 62 | inline SkPoint AccumulatingViewMatrix::transform(const SkPoint& pt) { |
| 63 | Sk4f transformed = SkNx_fma(fY, Sk4f(pt.y()), fT); |
| 64 | transformed = SkNx_fma(fX, Sk4f(pt.x()), transformed); |
| 65 | |
| 66 | fTopLeft = Sk4f::Min(fTopLeft, transformed); |
| 67 | fBottomRight = Sk4f::Max(fBottomRight, transformed); |
| 68 | |
| 69 | // TODO: vst1_lane_f32? (Sk4f::storeLane?) |
| 70 | float data[4]; |
| 71 | transformed.store(data); |
| 72 | return SkPoint::Make(data[0], data[1]); |
| 73 | } |
| 74 | |
| 75 | inline void AccumulatingViewMatrix::getAccumulatedBounds(SkRect* devBounds, |
| 76 | SkRect* devBounds45) const { |
| 77 | float topLeft[4], bottomRight[4]; |
| 78 | fTopLeft.store(topLeft); |
| 79 | fBottomRight.store(bottomRight); |
| 80 | devBounds->setLTRB(topLeft[0], topLeft[1], bottomRight[0], bottomRight[1]); |
| 81 | devBounds45->setLTRB(topLeft[2], topLeft[3], bottomRight[2], bottomRight[3]); |
| 82 | } |
| 83 | |
Chris Dalton | a039d3b | 2017-09-28 11:16:36 -0600 | [diff] [blame] | 84 | void GrCCPRCoverageOpsBuilder::parsePath(const SkMatrix& viewMatrix, const SkPath& path, |
| 85 | SkRect* devBounds, SkRect* devBounds45) { |
Chris Dalton | c1e5963 | 2017-09-05 00:30:07 -0600 | [diff] [blame] | 86 | SkASSERT(!fParsingPath); |
| 87 | SkDEBUGCODE(fParsingPath = true); |
| 88 | |
| 89 | fCurrPathPointsIdx = fGeometry.points().count(); |
| 90 | fCurrPathVerbsIdx = fGeometry.verbs().count(); |
| 91 | fCurrPathTallies = PrimitiveTallies(); |
| 92 | |
| 93 | fGeometry.beginPath(); |
| 94 | |
Chris Dalton | a039d3b | 2017-09-28 11:16:36 -0600 | [diff] [blame] | 95 | if (path.isEmpty()) { |
| 96 | devBounds->setEmpty(); |
| 97 | devBounds45->setEmpty(); |
| 98 | return; |
| 99 | } |
| 100 | |
Chris Dalton | c1e5963 | 2017-09-05 00:30:07 -0600 | [diff] [blame] | 101 | const SkPoint* const pts = SkPathPriv::PointData(path); |
| 102 | int ptsIdx = 0; |
| 103 | bool insideContour = false; |
| 104 | |
Chris Dalton | c1e5963 | 2017-09-05 00:30:07 -0600 | [diff] [blame] | 105 | AccumulatingViewMatrix m(viewMatrix, pts[0]); |
| 106 | |
| 107 | for (SkPath::Verb verb : SkPathPriv::Verbs(path)) { |
| 108 | switch (verb) { |
| 109 | case SkPath::kMove_Verb: |
| 110 | this->endContourIfNeeded(insideContour); |
| 111 | fGeometry.beginContour(m.transform(pts[ptsIdx++])); |
| 112 | insideContour = true; |
| 113 | continue; |
| 114 | case SkPath::kClose_Verb: |
| 115 | this->endContourIfNeeded(insideContour); |
| 116 | insideContour = false; |
| 117 | continue; |
| 118 | case SkPath::kLine_Verb: |
| 119 | fGeometry.lineTo(m.transform(pts[ptsIdx++])); |
| 120 | continue; |
| 121 | case SkPath::kQuad_Verb: |
| 122 | SkASSERT(ptsIdx >= 1); // SkPath should have inserted an implicit moveTo if needed. |
| 123 | fGeometry.quadraticTo(m.transform(pts[ptsIdx]), m.transform(pts[ptsIdx + 1])); |
| 124 | ptsIdx += 2; |
| 125 | continue; |
| 126 | case SkPath::kCubic_Verb: |
| 127 | SkASSERT(ptsIdx >= 1); // SkPath should have inserted an implicit moveTo if needed. |
| 128 | fGeometry.cubicTo(m.transform(pts[ptsIdx]), m.transform(pts[ptsIdx + 1]), |
| 129 | m.transform(pts[ptsIdx + 2])); |
| 130 | ptsIdx += 3; |
| 131 | continue; |
| 132 | case SkPath::kConic_Verb: |
| 133 | SK_ABORT("Conics are not supported."); |
| 134 | default: |
| 135 | SK_ABORT("Unexpected path verb."); |
| 136 | } |
| 137 | } |
| 138 | |
| 139 | this->endContourIfNeeded(insideContour); |
| 140 | m.getAccumulatedBounds(devBounds, devBounds45); |
| 141 | } |
| 142 | |
| 143 | void GrCCPRCoverageOpsBuilder::endContourIfNeeded(bool insideContour) { |
| 144 | if (insideContour) { |
| 145 | fCurrPathTallies += fGeometry.endContour(); |
| 146 | } |
| 147 | } |
| 148 | |
| 149 | void GrCCPRCoverageOpsBuilder::saveParsedPath(ScissorMode scissorMode, |
| 150 | const SkIRect& clippedDevIBounds, |
| 151 | int16_t atlasOffsetX, int16_t atlasOffsetY) { |
| 152 | SkASSERT(fParsingPath); |
| 153 | |
| 154 | fPathsInfo.push_back() = { |
| 155 | scissorMode, |
Chris Dalton | 364e248 | 2017-10-03 11:32:11 -0600 | [diff] [blame] | 156 | (int32_t) (((uint32_t) (int32_t) atlasOffsetY << 16) | (atlasOffsetX & 0xffff)), |
Chris Dalton | c1e5963 | 2017-09-05 00:30:07 -0600 | [diff] [blame] | 157 | std::move(fTerminatingOp) |
| 158 | }; |
| 159 | |
| 160 | fTallies[(int)scissorMode] += fCurrPathTallies; |
| 161 | |
| 162 | if (ScissorMode::kScissored == scissorMode) { |
| 163 | fScissorBatches.push_back() = { |
| 164 | fCurrPathTallies, |
| 165 | clippedDevIBounds.makeOffset(atlasOffsetX, atlasOffsetY) |
| 166 | }; |
| 167 | } |
| 168 | |
| 169 | SkDEBUGCODE(fParsingPath = false); |
| 170 | } |
| 171 | |
| 172 | void GrCCPRCoverageOpsBuilder::discardParsedPath() { |
| 173 | SkASSERT(fParsingPath); |
| 174 | |
| 175 | // The code will still work whether or not the below assertion is true. It is just unlikely that |
| 176 | // the caller would want this, and probably indicative of of a mistake. (Why emit an |
| 177 | // intermediate Op (to switch to a new atlas?), just to then throw the path away?) |
| 178 | SkASSERT(!fTerminatingOp); |
| 179 | |
| 180 | fGeometry.resize_back(fCurrPathPointsIdx, fCurrPathVerbsIdx); |
| 181 | SkDEBUGCODE(fParsingPath = false); |
| 182 | } |
| 183 | |
| 184 | void GrCCPRCoverageOpsBuilder::emitOp(SkISize drawBounds) { |
| 185 | SkASSERT(!fTerminatingOp); |
| 186 | fTerminatingOp.reset(new GrCCPRCoverageOp(std::move(fScissorBatches), drawBounds)); |
| 187 | SkASSERT(fScissorBatches.empty()); |
| 188 | } |
| 189 | |
| 190 | // Emits a contour's triangle fan. |
| 191 | // |
| 192 | // Classic Redbook fanning would be the triangles: [0 1 2], [0 2 3], ..., [0 n-2 n-1]. |
| 193 | // |
| 194 | // This function emits the triangle: [0 n/3 n*2/3], and then recurses on all three sides. The |
| 195 | // advantage to this approach is that for a convex-ish contour, it generates larger triangles. |
| 196 | // Classic fanning tends to generate long, skinny triangles, which are expensive to draw since they |
| 197 | // have a longer perimeter to rasterize and antialias. |
| 198 | // |
| 199 | // The indices array indexes the fan's points (think: glDrawElements), and must have at least log3 |
| 200 | // elements past the end for this method to use as scratch space. |
| 201 | // |
| 202 | // Returns the next triangle instance after the final one emitted. |
| 203 | static TriangleInstance* emit_recursive_fan(SkTArray<int32_t, true>& indices, int firstIndex, |
Chris Dalton | 364e248 | 2017-10-03 11:32:11 -0600 | [diff] [blame] | 204 | int indexCount, int32_t packedAtlasOffset, |
Chris Dalton | c1e5963 | 2017-09-05 00:30:07 -0600 | [diff] [blame] | 205 | TriangleInstance out[]) { |
| 206 | if (indexCount < 3) { |
| 207 | return out; |
| 208 | } |
| 209 | |
| 210 | const int32_t oneThirdCount = indexCount / 3; |
| 211 | const int32_t twoThirdsCount = (2 * indexCount) / 3; |
| 212 | *out++ = { |
| 213 | indices[firstIndex], |
| 214 | indices[firstIndex + oneThirdCount], |
| 215 | indices[firstIndex + twoThirdsCount], |
| 216 | packedAtlasOffset |
| 217 | }; |
| 218 | |
| 219 | out = emit_recursive_fan(indices, firstIndex, oneThirdCount + 1, packedAtlasOffset, out); |
| 220 | out = emit_recursive_fan(indices, firstIndex + oneThirdCount, |
| 221 | twoThirdsCount - oneThirdCount + 1, packedAtlasOffset, out); |
| 222 | |
| 223 | int endIndex = firstIndex + indexCount; |
| 224 | int32_t oldValue = indices[endIndex]; |
| 225 | indices[endIndex] = indices[firstIndex]; |
| 226 | out = emit_recursive_fan(indices, firstIndex + twoThirdsCount, indexCount - twoThirdsCount + 1, |
| 227 | packedAtlasOffset, out); |
| 228 | indices[endIndex] = oldValue; |
| 229 | |
| 230 | return out; |
| 231 | } |
| 232 | |
| 233 | bool GrCCPRCoverageOpsBuilder::finalize(GrOnFlushResourceProvider* onFlushRP, |
| 234 | SkTArray<std::unique_ptr<GrCCPRCoverageOp>>* ops) { |
| 235 | SkASSERT(!fParsingPath); |
| 236 | |
| 237 | const SkTArray<SkPoint, true>& points = fGeometry.points(); |
| 238 | sk_sp<GrBuffer> pointsBuffer = onFlushRP->makeBuffer(kTexel_GrBufferType, |
| 239 | points.count() * 2 * sizeof(float), |
| 240 | points.begin()); |
| 241 | if (!pointsBuffer) { |
| 242 | return false; |
| 243 | } |
| 244 | |
| 245 | // Configure the instance buffer layout. |
| 246 | PrimitiveTallies baseInstances[kNumScissorModes]; |
| 247 | // int4 indices. |
| 248 | baseInstances[0].fTriangles = 0; |
| 249 | baseInstances[1].fTriangles = baseInstances[0].fTriangles + fTallies[0].fTriangles; |
| 250 | // int2 indices (curves index the buffer as int2 rather than int4). |
| 251 | baseInstances[0].fQuadratics = (baseInstances[1].fTriangles + fTallies[1].fTriangles) * 2; |
| 252 | baseInstances[1].fQuadratics = baseInstances[0].fQuadratics + fTallies[0].fQuadratics; |
| 253 | baseInstances[0].fSerpentines = baseInstances[1].fQuadratics + fTallies[1].fQuadratics; |
| 254 | baseInstances[1].fSerpentines = baseInstances[0].fSerpentines + fTallies[0].fSerpentines; |
| 255 | baseInstances[0].fLoops = baseInstances[1].fSerpentines + fTallies[1].fSerpentines; |
| 256 | baseInstances[1].fLoops = baseInstances[0].fLoops + fTallies[0].fLoops; |
| 257 | int instanceBufferSize = (baseInstances[1].fLoops + fTallies[1].fLoops) * sizeof(CurveInstance); |
| 258 | |
| 259 | sk_sp<GrBuffer> instanceBuffer = onFlushRP->makeBuffer(kVertex_GrBufferType, |
| 260 | instanceBufferSize); |
| 261 | if (!instanceBuffer) { |
| 262 | return false; |
| 263 | } |
| 264 | |
| 265 | TriangleInstance* triangleInstanceData = static_cast<TriangleInstance*>(instanceBuffer->map()); |
| 266 | CurveInstance* curveInstanceData = reinterpret_cast<CurveInstance*>(triangleInstanceData); |
| 267 | SkASSERT(curveInstanceData); |
| 268 | |
| 269 | PathInfo* currPathInfo = fPathsInfo.begin(); |
| 270 | int32_t packedAtlasOffset; |
| 271 | int ptsIdx = -1; |
| 272 | PrimitiveTallies instanceIndices[2] = {baseInstances[0], baseInstances[1]}; |
| 273 | PrimitiveTallies* currIndices; |
| 274 | SkSTArray<256, int32_t, true> currFan; |
| 275 | |
| 276 | #ifdef SK_DEBUG |
| 277 | int numScissoredPaths = 0; |
| 278 | int numScissorBatches = 0; |
| 279 | PrimitiveTallies initialBaseInstances[] = {baseInstances[0], baseInstances[1]}; |
| 280 | #endif |
| 281 | |
| 282 | // Expand the ccpr verbs into GPU instance buffers. |
| 283 | for (GrCCPRGeometry::Verb verb : fGeometry.verbs()) { |
| 284 | switch (verb) { |
| 285 | case GrCCPRGeometry::Verb::kBeginPath: |
| 286 | SkASSERT(currFan.empty()); |
| 287 | currIndices = &instanceIndices[(int)currPathInfo->fScissorMode]; |
| 288 | packedAtlasOffset = currPathInfo->fPackedAtlasOffset; |
| 289 | #ifdef SK_DEBUG |
| 290 | if (ScissorMode::kScissored == currPathInfo->fScissorMode) { |
| 291 | ++numScissoredPaths; |
| 292 | } |
| 293 | #endif |
| 294 | if (auto op = std::move(currPathInfo->fTerminatingOp)) { |
| 295 | op->setBuffers(pointsBuffer, instanceBuffer, baseInstances, instanceIndices); |
| 296 | baseInstances[0] = instanceIndices[0]; |
| 297 | baseInstances[1] = instanceIndices[1]; |
| 298 | SkDEBUGCODE(numScissorBatches += op->fScissorBatches.count()); |
| 299 | ops->push_back(std::move(op)); |
| 300 | } |
| 301 | ++currPathInfo; |
| 302 | continue; |
| 303 | |
| 304 | case GrCCPRGeometry::Verb::kBeginContour: |
| 305 | SkASSERT(currFan.empty()); |
| 306 | currFan.push_back(++ptsIdx); |
| 307 | continue; |
| 308 | |
| 309 | case GrCCPRGeometry::Verb::kLineTo: |
| 310 | SkASSERT(!currFan.empty()); |
| 311 | currFan.push_back(++ptsIdx); |
| 312 | continue; |
| 313 | |
| 314 | case GrCCPRGeometry::Verb::kMonotonicQuadraticTo: |
| 315 | SkASSERT(!currFan.empty()); |
| 316 | curveInstanceData[currIndices->fQuadratics++] = {ptsIdx, packedAtlasOffset}; |
| 317 | currFan.push_back(ptsIdx += 2); |
| 318 | continue; |
| 319 | |
Chris Dalton | 7f578bf | 2017-09-05 16:46:48 -0600 | [diff] [blame] | 320 | case GrCCPRGeometry::Verb::kMonotonicSerpentineTo: |
Chris Dalton | c1e5963 | 2017-09-05 00:30:07 -0600 | [diff] [blame] | 321 | SkASSERT(!currFan.empty()); |
| 322 | curveInstanceData[currIndices->fSerpentines++] = {ptsIdx, packedAtlasOffset}; |
| 323 | currFan.push_back(ptsIdx += 3); |
| 324 | continue; |
| 325 | |
Chris Dalton | 7f578bf | 2017-09-05 16:46:48 -0600 | [diff] [blame] | 326 | case GrCCPRGeometry::Verb::kMonotonicLoopTo: |
Chris Dalton | c1e5963 | 2017-09-05 00:30:07 -0600 | [diff] [blame] | 327 | SkASSERT(!currFan.empty()); |
| 328 | curveInstanceData[currIndices->fLoops++] = {ptsIdx, packedAtlasOffset}; |
| 329 | currFan.push_back(ptsIdx += 3); |
| 330 | continue; |
| 331 | |
| 332 | case GrCCPRGeometry::Verb::kEndClosedContour: // endPt == startPt. |
| 333 | SkASSERT(!currFan.empty()); |
| 334 | currFan.pop_back(); |
| 335 | // fallthru. |
| 336 | case GrCCPRGeometry::Verb::kEndOpenContour: // endPt != startPt. |
| 337 | if (currFan.count() >= 3) { |
| 338 | int fanSize = currFan.count(); |
| 339 | // Reserve space for emit_recursive_fan. Technically this can grow to |
| 340 | // fanSize + log3(fanSize), but we approximate with log2. |
| 341 | currFan.push_back_n(SkNextLog2(fanSize)); |
| 342 | SkDEBUGCODE(TriangleInstance* end =) |
| 343 | emit_recursive_fan(currFan, 0, fanSize, packedAtlasOffset, |
| 344 | triangleInstanceData + currIndices->fTriangles); |
| 345 | currIndices->fTriangles += fanSize - 2; |
| 346 | SkASSERT(triangleInstanceData + currIndices->fTriangles == end); |
| 347 | } |
| 348 | currFan.reset(); |
| 349 | continue; |
| 350 | } |
| 351 | } |
| 352 | |
| 353 | instanceBuffer->unmap(); |
| 354 | |
| 355 | if (auto op = std::move(fTerminatingOp)) { |
| 356 | op->setBuffers(std::move(pointsBuffer), std::move(instanceBuffer), baseInstances, |
| 357 | instanceIndices); |
| 358 | SkDEBUGCODE(numScissorBatches += op->fScissorBatches.count()); |
| 359 | ops->push_back(std::move(op)); |
| 360 | } |
| 361 | |
| 362 | SkASSERT(currPathInfo == fPathsInfo.end()); |
| 363 | SkASSERT(ptsIdx == points.count() - 1); |
| 364 | SkASSERT(numScissoredPaths == numScissorBatches); |
| 365 | SkASSERT(instanceIndices[0].fTriangles == initialBaseInstances[1].fTriangles); |
| 366 | SkASSERT(instanceIndices[1].fTriangles * 2 == initialBaseInstances[0].fQuadratics); |
| 367 | SkASSERT(instanceIndices[0].fQuadratics == initialBaseInstances[1].fQuadratics); |
| 368 | SkASSERT(instanceIndices[1].fQuadratics == initialBaseInstances[0].fSerpentines); |
| 369 | SkASSERT(instanceIndices[0].fSerpentines == initialBaseInstances[1].fSerpentines); |
| 370 | SkASSERT(instanceIndices[1].fSerpentines == initialBaseInstances[0].fLoops); |
| 371 | SkASSERT(instanceIndices[0].fLoops == initialBaseInstances[1].fLoops); |
| 372 | SkASSERT(instanceIndices[1].fLoops * (int) sizeof(CurveInstance) == instanceBufferSize); |
| 373 | return true; |
| 374 | } |
| 375 | |
| 376 | void GrCCPRCoverageOp::setBuffers(sk_sp<GrBuffer> pointsBuffer, sk_sp<GrBuffer> instanceBuffer, |
| 377 | const PrimitiveTallies baseInstances[kNumScissorModes], |
| 378 | const PrimitiveTallies endInstances[kNumScissorModes]) { |
| 379 | fPointsBuffer = std::move(pointsBuffer); |
| 380 | fInstanceBuffer = std::move(instanceBuffer); |
| 381 | fBaseInstances[0] = baseInstances[0]; |
| 382 | fBaseInstances[1] = baseInstances[1]; |
| 383 | fInstanceCounts[0] = endInstances[0] - baseInstances[0]; |
| 384 | fInstanceCounts[1] = endInstances[1] - baseInstances[1]; |
| 385 | } |
| 386 | |
| 387 | void GrCCPRCoverageOp::onExecute(GrOpFlushState* flushState) { |
| 388 | using Mode = GrCCPRCoverageProcessor::Mode; |
| 389 | |
| 390 | SkDEBUGCODE(GrCCPRCoverageProcessor::Validate(flushState->drawOpArgs().fProxy)); |
| 391 | SkASSERT(fPointsBuffer); |
| 392 | SkASSERT(fInstanceBuffer); |
| 393 | |
| 394 | GrPipeline pipeline(flushState->drawOpArgs().fProxy, GrPipeline::ScissorState::kEnabled, |
| 395 | SkBlendMode::kPlus); |
| 396 | |
| 397 | fMeshesScratchBuffer.reserve(1 + fScissorBatches.count()); |
| 398 | fDynamicStatesScratchBuffer.reserve(1 + fScissorBatches.count()); |
| 399 | |
| 400 | // Triangles. |
| 401 | auto constexpr kTrianglesGrPrimitiveType = GrCCPRCoverageProcessor::kTrianglesGrPrimitiveType; |
Chris Dalton | 8d17769 | 2017-09-26 09:56:43 -0600 | [diff] [blame] | 402 | this->drawMaskPrimitives(flushState, pipeline, Mode::kTriangleHulls, |
| 403 | kTrianglesGrPrimitiveType, 3, &PrimitiveTallies::fTriangles); |
| 404 | this->drawMaskPrimitives(flushState, pipeline, Mode::kTriangleEdges, |
Chris Dalton | c1e5963 | 2017-09-05 00:30:07 -0600 | [diff] [blame] | 405 | kTrianglesGrPrimitiveType, 3, &PrimitiveTallies::fTriangles); |
| 406 | this->drawMaskPrimitives(flushState, pipeline, Mode::kTriangleCorners, |
| 407 | kTrianglesGrPrimitiveType, 3, &PrimitiveTallies::fTriangles); |
| 408 | |
| 409 | // Quadratics. |
| 410 | auto constexpr kQuadraticsGrPrimitiveType = GrCCPRCoverageProcessor::kQuadraticsGrPrimitiveType; |
| 411 | this->drawMaskPrimitives(flushState, pipeline, Mode::kQuadraticHulls, |
| 412 | kQuadraticsGrPrimitiveType, 3, &PrimitiveTallies::fQuadratics); |
| 413 | this->drawMaskPrimitives(flushState, pipeline, Mode::kQuadraticCorners, |
| 414 | kQuadraticsGrPrimitiveType, 3, &PrimitiveTallies::fQuadratics); |
| 415 | |
| 416 | // Cubics. |
| 417 | auto constexpr kCubicsGrPrimitiveType = GrCCPRCoverageProcessor::kCubicsGrPrimitiveType; |
Chris Dalton | 7f578bf | 2017-09-05 16:46:48 -0600 | [diff] [blame] | 418 | this->drawMaskPrimitives(flushState, pipeline, Mode::kSerpentineHulls, |
Chris Dalton | c1e5963 | 2017-09-05 00:30:07 -0600 | [diff] [blame] | 419 | kCubicsGrPrimitiveType, 4, &PrimitiveTallies::fSerpentines); |
Chris Dalton | 7f578bf | 2017-09-05 16:46:48 -0600 | [diff] [blame] | 420 | this->drawMaskPrimitives(flushState, pipeline, Mode::kLoopHulls, |
Chris Dalton | c1e5963 | 2017-09-05 00:30:07 -0600 | [diff] [blame] | 421 | kCubicsGrPrimitiveType, 4, &PrimitiveTallies::fLoops); |
Chris Dalton | 7f578bf | 2017-09-05 16:46:48 -0600 | [diff] [blame] | 422 | this->drawMaskPrimitives(flushState, pipeline, Mode::kSerpentineCorners, |
Chris Dalton | c1e5963 | 2017-09-05 00:30:07 -0600 | [diff] [blame] | 423 | kCubicsGrPrimitiveType, 4, &PrimitiveTallies::fSerpentines); |
Chris Dalton | 7f578bf | 2017-09-05 16:46:48 -0600 | [diff] [blame] | 424 | this->drawMaskPrimitives(flushState, pipeline, Mode::kLoopCorners, |
Chris Dalton | c1e5963 | 2017-09-05 00:30:07 -0600 | [diff] [blame] | 425 | kCubicsGrPrimitiveType, 4, &PrimitiveTallies::fLoops); |
| 426 | } |
| 427 | |
| 428 | void GrCCPRCoverageOp::drawMaskPrimitives(GrOpFlushState* flushState, const GrPipeline& pipeline, |
| 429 | GrCCPRCoverageProcessor::Mode mode, |
| 430 | GrPrimitiveType primType, int vertexCount, |
| 431 | int PrimitiveTallies::* instanceType) const { |
| 432 | using ScissorMode = GrCCPRCoverageOpsBuilder::ScissorMode; |
| 433 | SkASSERT(pipeline.getScissorState().enabled()); |
| 434 | |
| 435 | fMeshesScratchBuffer.reset(); |
| 436 | fDynamicStatesScratchBuffer.reset(); |
| 437 | |
| 438 | if (const int instanceCount = fInstanceCounts[(int)ScissorMode::kNonScissored].*instanceType) { |
| 439 | SkASSERT(instanceCount > 0); |
| 440 | const int baseInstance = fBaseInstances[(int)ScissorMode::kNonScissored].*instanceType; |
| 441 | GrMesh& mesh = fMeshesScratchBuffer.emplace_back(primType); |
| 442 | mesh.setInstanced(fInstanceBuffer.get(), instanceCount, baseInstance, vertexCount); |
| 443 | fDynamicStatesScratchBuffer.push_back().fScissorRect.setXYWH(0, 0, fDrawBounds.width(), |
| 444 | fDrawBounds.height()); |
| 445 | } |
| 446 | |
| 447 | if (fInstanceCounts[(int)ScissorMode::kScissored].*instanceType) { |
| 448 | int baseInstance = fBaseInstances[(int)ScissorMode::kScissored].*instanceType; |
| 449 | for (const ScissorBatch& batch : fScissorBatches) { |
| 450 | SkASSERT(this->bounds().contains(batch.fScissor)); |
| 451 | const int instanceCount = batch.fInstanceCounts.*instanceType; |
| 452 | if (!instanceCount) { |
| 453 | continue; |
| 454 | } |
| 455 | SkASSERT(instanceCount > 0); |
| 456 | GrMesh& mesh = fMeshesScratchBuffer.emplace_back(primType); |
| 457 | mesh.setInstanced(fInstanceBuffer.get(), instanceCount, baseInstance, vertexCount); |
| 458 | fDynamicStatesScratchBuffer.push_back().fScissorRect = batch.fScissor; |
| 459 | baseInstance += instanceCount; |
| 460 | } |
| 461 | } |
| 462 | |
| 463 | SkASSERT(fMeshesScratchBuffer.count() == fDynamicStatesScratchBuffer.count()); |
| 464 | |
| 465 | if (!fMeshesScratchBuffer.empty()) { |
| 466 | GrCCPRCoverageProcessor proc(mode, fPointsBuffer.get()); |
| 467 | SkASSERT(flushState->rtCommandBuffer()); |
| 468 | flushState->rtCommandBuffer()->draw(pipeline, proc, fMeshesScratchBuffer.begin(), |
| 469 | fDynamicStatesScratchBuffer.begin(), |
| 470 | fMeshesScratchBuffer.count(), this->bounds()); |
| 471 | } |
| 472 | } |