blob: c067e3a34ed05ceb7079c71ddf52165fa3b21ef0 [file] [log] [blame]
Chris Dalton1a325d22017-07-14 15:17:41 -06001/*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GrCoverageCountingPathRenderer.h"
9
10#include "GrCaps.h"
11#include "GrClip.h"
12#include "GrGpu.h"
13#include "GrGpuCommandBuffer.h"
14#include "SkMakeUnique.h"
15#include "SkMatrix.h"
16#include "GrOpFlushState.h"
17#include "GrRenderTargetOpList.h"
18#include "GrStyle.h"
19#include "ccpr/GrCCPRPathProcessor.h"
20
21using DrawPathsOp = GrCoverageCountingPathRenderer::DrawPathsOp;
22using ScissorMode = GrCCPRCoverageOpsBuilder::ScissorMode;
23
24bool GrCoverageCountingPathRenderer::IsSupported(const GrCaps& caps) {
25 const GrShaderCaps& shaderCaps = *caps.shaderCaps();
26 return shaderCaps.geometryShaderSupport() &&
27 shaderCaps.texelBufferSupport() &&
28 shaderCaps.integerSupport() &&
29 shaderCaps.flatInterpolationSupport() &&
30 shaderCaps.maxVertexSamplers() >= 1 &&
31 caps.instanceAttribSupport() &&
32 caps.isConfigTexturable(kAlpha_half_GrPixelConfig) &&
33 caps.isConfigRenderable(kAlpha_half_GrPixelConfig, /*withMSAA=*/false);
34}
35
36sk_sp<GrCoverageCountingPathRenderer>
37GrCoverageCountingPathRenderer::CreateIfSupported(const GrCaps& caps) {
38 return sk_sp<GrCoverageCountingPathRenderer>(IsSupported(caps) ?
39 new GrCoverageCountingPathRenderer : nullptr);
40}
41
42bool GrCoverageCountingPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
43 if (!args.fShape->style().isSimpleFill() ||
44 args.fShape->inverseFilled() ||
45 args.fViewMatrix->hasPerspective() ||
46 GrAAType::kCoverage != args.fAAType) {
47 return false;
48 }
49
50 SkPath path;
51 args.fShape->asPath(&path);
52 return !SkPathPriv::ConicWeightCnt(path);
53}
54
55bool GrCoverageCountingPathRenderer::onDrawPath(const DrawPathArgs& args) {
56 SkASSERT(!fFlushing);
57 SkASSERT(!args.fShape->isEmpty());
58
59 auto op = skstd::make_unique<DrawPathsOp>(this, args, args.fPaint.getColor());
60 args.fRenderTargetContext->addDrawOp(*args.fClip, std::move(op));
61
62 return true;
63}
64
65GrCoverageCountingPathRenderer::DrawPathsOp::DrawPathsOp(GrCoverageCountingPathRenderer* ccpr,
66 const DrawPathArgs& args, GrColor color)
67 : INHERITED(ClassID())
68 , fCCPR(ccpr)
69 , fSRGBFlags(GrPipeline::SRGBFlagsFromPaint(args.fPaint))
70 , fProcessors(std::move(args.fPaint))
71 , fTailDraw(&fHeadDraw)
72 , fOwningRTPendingOps(nullptr) {
73 SkDEBUGCODE(fBaseInstance = -1);
74 SkDEBUGCODE(fDebugInstanceCount = 1;)
75
76 GrRenderTargetContext* const rtc = args.fRenderTargetContext;
77
78 SkRect devBounds;
79 args.fViewMatrix->mapRect(&devBounds, args.fShape->bounds());
80
81 args.fClip->getConservativeBounds(rtc->width(), rtc->height(), &fHeadDraw.fClipBounds, nullptr);
82 fHeadDraw.fScissorMode = fHeadDraw.fClipBounds.contains(devBounds) ?
83 ScissorMode::kNonScissored : ScissorMode::kScissored;
84 fHeadDraw.fMatrix = *args.fViewMatrix;
85 args.fShape->asPath(&fHeadDraw.fPath);
86 fHeadDraw.fColor = color; // Can't call args.fPaint.getColor() because it has been std::move'd.
87
88 // FIXME: intersect with clip bounds to (hopefully) improve batching.
89 // (This is nontrivial due to assumptions in generating the octagon cover geometry.)
90 this->setBounds(devBounds, GrOp::HasAABloat::kYes, GrOp::IsZeroArea::kNo);
91}
92
93GrDrawOp::RequiresDstTexture DrawPathsOp::finalize(const GrCaps& caps, const GrAppliedClip* clip) {
94 SingleDraw& onlyDraw = this->getOnlyPathDraw();
95 GrProcessorSet::Analysis analysis = fProcessors.finalize(onlyDraw.fColor,
96 GrProcessorAnalysisCoverage::kSingleChannel,
97 clip, false, caps, &onlyDraw.fColor);
98 return analysis.requiresDstTexture() ? RequiresDstTexture::kYes : RequiresDstTexture::kNo;
99}
100
101bool DrawPathsOp::onCombineIfPossible(GrOp* op, const GrCaps& caps) {
102 DrawPathsOp* that = op->cast<DrawPathsOp>();
103 SkASSERT(fCCPR == that->fCCPR);
104 SkASSERT(fOwningRTPendingOps);
105 SkASSERT(fDebugInstanceCount);
106 SkASSERT(that->fDebugInstanceCount);
107
108 if (this->getFillType() != that->getFillType() ||
109 fSRGBFlags != that->fSRGBFlags ||
110 fProcessors != that->fProcessors) {
111 return false;
112 }
113
114 if (RTPendingOps* owningRTPendingOps = that->fOwningRTPendingOps) {
115 SkASSERT(owningRTPendingOps == fOwningRTPendingOps);
116 owningRTPendingOps->fOpList.remove(that);
117 } else {
118 // wasRecorded is not called when the op gets combined first. Count path items here instead.
119 SingleDraw& onlyDraw = that->getOnlyPathDraw();
120 fOwningRTPendingOps->fMaxBufferItems.countPathItems(onlyDraw.fScissorMode, onlyDraw.fPath);
121 }
122
123 fTailDraw->fNext = &fOwningRTPendingOps->fDrawsAllocator.push_back(that->fHeadDraw);
124 fTailDraw = that->fTailDraw == &that->fHeadDraw ? fTailDraw->fNext : that->fTailDraw;
125
126 this->joinBounds(*that);
127
128 SkDEBUGCODE(fDebugInstanceCount += that->fDebugInstanceCount;)
129 SkDEBUGCODE(that->fDebugInstanceCount = 0);
130 return true;
131}
132
133void DrawPathsOp::wasRecorded(GrRenderTargetOpList* opList) {
134 SkASSERT(!fOwningRTPendingOps);
135 SingleDraw& onlyDraw = this->getOnlyPathDraw();
136 fOwningRTPendingOps = &fCCPR->fRTPendingOpsMap[opList->uniqueID()];
137 fOwningRTPendingOps->fOpList.addToTail(this);
138 fOwningRTPendingOps->fMaxBufferItems.countPathItems(onlyDraw.fScissorMode, onlyDraw.fPath);
139}
140
141void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlushRP,
142 const uint32_t* opListIDs, int numOpListIDs,
143 SkTArray<sk_sp<GrRenderTargetContext>>* results) {
144 using PathInstance = GrCCPRPathProcessor::Instance;
145
146 SkASSERT(!fPerFlushIndexBuffer);
147 SkASSERT(!fPerFlushVertexBuffer);
148 SkASSERT(!fPerFlushInstanceBuffer);
149 SkASSERT(fPerFlushAtlases.empty());
150 SkASSERT(!fFlushing);
151 SkDEBUGCODE(fFlushing = true;)
152
153 if (fRTPendingOpsMap.empty()) {
154 return; // Nothing to draw.
155 }
156
157 SkTInternalLList<DrawPathsOp> flushingOps;
158 GrCCPRCoverageOpsBuilder::MaxBufferItems maxBufferItems;
159
160 for (int i = 0; i < numOpListIDs; ++i) {
161 auto it = fRTPendingOpsMap.find(opListIDs[i]);
162 if (fRTPendingOpsMap.end() != it) {
163 RTPendingOps& rtPendingOps = it->second;
164 SkASSERT(!rtPendingOps.fOpList.isEmpty());
165 flushingOps.concat(std::move(rtPendingOps.fOpList));
166 maxBufferItems += rtPendingOps.fMaxBufferItems;
167 }
168 }
169
170 SkASSERT(flushingOps.isEmpty() == !maxBufferItems.fMaxPaths);
171 if (flushingOps.isEmpty()) {
172 return; // Still nothing to draw.
173 }
174
175 fPerFlushIndexBuffer = GrCCPRPathProcessor::FindOrMakeIndexBuffer(onFlushRP);
176 if (!fPerFlushIndexBuffer) {
177 SkDebugf("WARNING: failed to allocate ccpr path index buffer.\n");
178 return;
179 }
180
181 fPerFlushVertexBuffer = GrCCPRPathProcessor::FindOrMakeVertexBuffer(onFlushRP);
182 if (!fPerFlushVertexBuffer) {
183 SkDebugf("WARNING: failed to allocate ccpr path vertex buffer.\n");
184 return;
185 }
186
187 GrCCPRCoverageOpsBuilder atlasOpsBuilder;
188 if (!atlasOpsBuilder.init(onFlushRP, maxBufferItems)) {
189 SkDebugf("WARNING: failed to allocate buffers for coverage ops. No paths will be drawn.\n");
190 return;
191 }
192
193 fPerFlushInstanceBuffer = onFlushRP->makeBuffer(kVertex_GrBufferType,
194 maxBufferItems.fMaxPaths * sizeof(PathInstance));
195 if (!fPerFlushInstanceBuffer) {
196 SkDebugf("WARNING: failed to allocate path instance buffer. No paths will be drawn.\n");
197 return;
198 }
199
200 PathInstance* pathInstanceData = static_cast<PathInstance*>(fPerFlushInstanceBuffer->map());
201 SkASSERT(pathInstanceData);
202 int pathInstanceIdx = 0;
203
204 GrCCPRAtlas* atlas = nullptr;
205 SkDEBUGCODE(int skippedPaths = 0;)
206
207 SkTInternalLList<DrawPathsOp>::Iter iter;
208 iter.init(flushingOps, SkTInternalLList<DrawPathsOp>::Iter::kHead_IterStart);
209 while (DrawPathsOp* op = iter.get()) {
210 SkASSERT(op->fDebugInstanceCount > 0);
211 SkASSERT(-1 == op->fBaseInstance);
212 op->fBaseInstance = pathInstanceIdx;
213
214 for (const DrawPathsOp::SingleDraw* draw = &op->fHeadDraw; draw; draw = draw->fNext) {
215 // parsePath gives us two tight bounding boxes: one in device space, as well as a second
216 // one rotated an additional 45 degrees. The path vertex shader uses these two bounding
217 // boxes to generate an octagon that circumscribes the path.
218 SkRect devBounds, devBounds45;
219 atlasOpsBuilder.parsePath(draw->fScissorMode, draw->fMatrix, draw->fPath, &devBounds,
220 &devBounds45);
221
222 SkRect clippedDevBounds = devBounds;
223 if (ScissorMode::kScissored == draw->fScissorMode &&
224 !clippedDevBounds.intersect(devBounds, SkRect::Make(draw->fClipBounds))) {
225 SkDEBUGCODE(--op->fDebugInstanceCount);
226 SkDEBUGCODE(++skippedPaths;)
227 continue;
228 }
229
230 SkIRect clippedDevIBounds;
231 clippedDevBounds.roundOut(&clippedDevIBounds);
232 const int h = clippedDevIBounds.height(), w = clippedDevIBounds.width();
233
234 SkIPoint16 atlasLocation;
235 if (atlas && !atlas->addRect(w, h, &atlasLocation)) {
236 // The atlas is out of room and can't grow any bigger.
237 auto atlasOp = atlasOpsBuilder.createIntermediateOp(atlas->drawBounds());
238 if (auto rtc = atlas->finalize(onFlushRP, std::move(atlasOp))) {
239 results->push_back(std::move(rtc));
240 }
241 if (pathInstanceIdx > op->fBaseInstance) {
242 op->addAtlasBatch(atlas, pathInstanceIdx);
243 }
244 atlas = nullptr;
245 }
246
247 if (!atlas) {
248 atlas = &fPerFlushAtlases.emplace_back(*onFlushRP->caps(), w, h);
249 SkAssertResult(atlas->addRect(w, h, &atlasLocation));
250 }
251
252 const SkMatrix& m = draw->fMatrix;
253 const int16_t offsetX = atlasLocation.x() - static_cast<int16_t>(clippedDevIBounds.x()),
254 offsetY = atlasLocation.y() - static_cast<int16_t>(clippedDevIBounds.y());
255
256 pathInstanceData[pathInstanceIdx++] = {
257 devBounds,
258 devBounds45,
259 {{m.getScaleX(), m.getSkewY(), m.getSkewX(), m.getScaleY()}},
260 {{m.getTranslateX(), m.getTranslateY()}},
261 {{offsetX, offsetY}},
262 draw->fColor
263 };
264
265 atlasOpsBuilder.saveParsedPath(clippedDevIBounds, offsetX, offsetY);
266 }
267
268 SkASSERT(pathInstanceIdx == op->fBaseInstance + op->fDebugInstanceCount);
269 op->addAtlasBatch(atlas, pathInstanceIdx);
270
271 iter.next();
272 }
273
274 SkASSERT(pathInstanceIdx == maxBufferItems.fMaxPaths - skippedPaths);
275 fPerFlushInstanceBuffer->unmap();
276
277 std::unique_ptr<GrDrawOp> atlasOp = atlasOpsBuilder.finalize(atlas->drawBounds());
278 if (auto rtc = atlas->finalize(onFlushRP, std::move(atlasOp))) {
279 results->push_back(std::move(rtc));
280 }
281
282 // Erase these last, once we are done accessing data from the SingleDraw allocators.
283 for (int i = 0; i < numOpListIDs; ++i) {
284 fRTPendingOpsMap.erase(opListIDs[i]);
285 }
286}
287
288void DrawPathsOp::onExecute(GrOpFlushState* flushState) {
289 SkASSERT(fCCPR->fFlushing);
290
291 if (!fCCPR->fPerFlushInstanceBuffer) {
292 return; // Setup failed.
293 }
294
Chris Dalton1a325d22017-07-14 15:17:41 -0600295 GrPipeline::InitArgs args;
296 args.fAppliedClip = flushState->drawOpArgs().fAppliedClip;
297 args.fCaps = &flushState->caps();
Chris Dalton1a325d22017-07-14 15:17:41 -0600298 args.fFlags = fSRGBFlags;
Robert Phillips2890fbf2017-07-26 15:48:41 -0400299 args.fProxy = flushState->drawOpArgs().fProxy;
Chris Dalton1a325d22017-07-14 15:17:41 -0600300 args.fDstProxy = flushState->drawOpArgs().fDstProxy;
Brian Salomon91326c32017-08-09 16:02:19 -0400301 GrPipeline pipeline(args, std::move(fProcessors));
Chris Dalton1a325d22017-07-14 15:17:41 -0600302
303 int baseInstance = fBaseInstance;
304
305 for (int i = 0; i < fAtlasBatches.count(); baseInstance = fAtlasBatches[i++].fEndInstanceIdx) {
306 const AtlasBatch& batch = fAtlasBatches[i];
307 SkASSERT(batch.fEndInstanceIdx > baseInstance);
308
309 if (!batch.fAtlas->textureProxy()) {
310 continue; // Atlas failed to allocate.
311 }
312
313 GrCCPRPathProcessor coverProc(flushState->resourceProvider(), batch.fAtlas->textureProxy(),
314 this->getFillType(), *flushState->gpu()->caps()->shaderCaps());
315
316 GrMesh mesh(GrPrimitiveType::kTriangles);
317 mesh.setIndexedInstanced(fCCPR->fPerFlushIndexBuffer.get(),
318 GrCCPRPathProcessor::kPerInstanceIndexCount,
319 fCCPR->fPerFlushInstanceBuffer.get(),
320 batch.fEndInstanceIdx - baseInstance, baseInstance);
321 mesh.setVertexData(fCCPR->fPerFlushVertexBuffer.get());
322
323 flushState->commandBuffer()->draw(pipeline, coverProc, &mesh, nullptr, 1, this->bounds());
324 }
325
326 SkASSERT(baseInstance == fBaseInstance + fDebugInstanceCount);
327}
328
329void GrCoverageCountingPathRenderer::postFlush() {
330 SkASSERT(fFlushing);
331 fPerFlushAtlases.reset();
332 fPerFlushInstanceBuffer.reset();
333 fPerFlushVertexBuffer.reset();
334 fPerFlushIndexBuffer.reset();
335 SkDEBUGCODE(fFlushing = false;)
336}