Lift atlas clip FP creation out of GrClip::apply
Atlas clips always had a potential point of failure: If the SDC's
opsTask ever got closed between GrClip::apply and
GrOpsTask::addDrawOp, their mask would have gotten sent to the wrong
opsTask. It didn't _look_ like this could happen with the current
code, but it could have also been inadvertently changed quite easily.
This CL adds a "pathsForClipAtlas" array for GrClip::apply to fill out
instead of creating FPs. The SDC then generates the actual clip atlas
FPs once it knows exactly which opsTask they will belong in.
Bug: chromium:928984
Change-Id: I507ab13b2b5e8c3c3c1916d97611297dbbd8a522
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/389926
Commit-Queue: Chris Dalton <csmartdalton@google.com>
Reviewed-by: Michael Ludwig <michaelludwig@google.com>
diff --git a/src/gpu/GrAppliedClip.h b/src/gpu/GrAppliedClip.h
index e8ccc2d..9a8f8b4 100644
--- a/src/gpu/GrAppliedClip.h
+++ b/src/gpu/GrAppliedClip.h
@@ -45,11 +45,9 @@
/**
* Intersects the applied clip with the provided rect. Returns false if the draw became empty.
- * 'clippedDrawBounds' will be intersected with 'irect'. This returns false if the clip becomes
- * empty or the draw no longer intersects the clip. In either case the draw can be skipped.
*/
- bool addScissor(const SkIRect& irect, SkRect* clippedDrawBounds) {
- return fScissorState.intersect(irect) && clippedDrawBounds->intersect(SkRect::Make(irect));
+ bool SK_WARN_UNUSED_RESULT addScissor(const SkIRect& scissor) {
+ return fScissorState.intersect(scissor);
}
void setScissor(const SkIRect& irect) {
diff --git a/src/gpu/GrClip.h b/src/gpu/GrClip.h
index 0d4f193..4c62bbe 100644
--- a/src/gpu/GrClip.h
+++ b/src/gpu/GrClip.h
@@ -56,14 +56,24 @@
/**
* This computes a GrAppliedClip from the clip which in turn can be used to build a GrPipeline.
* To determine the appropriate clipping implementation the GrClip subclass must know whether
- * the draw will enable HW AA or uses the stencil buffer. On input 'bounds' is a conservative
- * bounds of the draw that is to be clipped. If kClipped or kUnclipped is returned, the 'bounds'
- * will have been updated to be contained within the clip bounds (or the device's, for wide-open
- * clips). If kNoDraw is returned, 'bounds' and the applied clip are in an undetermined state
- * and should be ignored (and the draw should be skipped).
+ * the draw will enable HW AA or uses the stencil buffer.
+ *
+ * On input 'bounds' is a conservative bounds of the draw that is to be clipped. If kClipped or
+ * kUnclipped is returned, the 'bounds' will have been updated to be contained within the clip
+ * bounds (or the device's, for wide-open clips). If kNoDraw is returned, 'bounds' and the
+ * applied clip are in an undetermined state and should be ignored (and the draw should be
+ * skipped).
+ *
+ * If the applied clip uses atlas clip masks, they are not applied at this point. Instead they
+ * are returned in the 'pathsForClipAtlas' array. The caller is responsible to create the atlas
+ * clip FPs and add them to the GrAppliedClip once they know exactly which opsTask the atlas
+ * will come from.
+ *
+ * If atlas clips are not supported then pathsForClipAtlas must be null.
*/
virtual Effect apply(GrRecordingContext*, GrSurfaceDrawContext*, GrAAType,
- bool hasUserStencilSettings, GrAppliedClip*, SkRect* bounds) const = 0;
+ bool hasUserStencilSettings, GrAppliedClip*,
+ SkRect* bounds, SkTArray<SkPath>* pathsForClipAtlas) const = 0;
/**
* Perform preliminary, conservative analysis on the draw bounds as if it were provided to
@@ -244,7 +254,8 @@
private:
Effect apply(GrRecordingContext*, GrSurfaceDrawContext* rtc, GrAAType aa,
- bool hasUserStencilSettings, GrAppliedClip* out, SkRect* bounds) const final {
+ bool hasUserStencilSettings, GrAppliedClip* out, SkRect* bounds,
+ SkTArray<SkPath>* /*pathsForClipAtlas*/) const final {
SkIRect pixelBounds = GetPixelIBounds(*bounds, GrAA(aa != GrAAType::kNone));
Effect effect = this->apply(&out->hardClip(), &pixelBounds);
bounds->intersect(SkRect::Make(pixelBounds));
diff --git a/src/gpu/GrClipStack.cpp b/src/gpu/GrClipStack.cpp
index 5461bcd..480780a 100644
--- a/src/gpu/GrClipStack.cpp
+++ b/src/gpu/GrClipStack.cpp
@@ -232,47 +232,6 @@
return GrFPFailure(std::move(fp));
}
-// TODO: Currently this only works with CCPR because CCPR owns and manages the clip atlas. The
-// high-level concept should be generalized to support any path renderer going into a shared atlas.
-static GrFPResult clip_atlas_fp(GrCoverageCountingPathRenderer* ccpr,
- uint32_t opsTaskID,
- const SkIRect& bounds,
- const GrClipStack::Element& e,
- SkPath* devicePath,
- const GrCaps& caps,
- std::unique_ptr<GrFragmentProcessor> fp) {
- // TODO: Currently the atlas manages device-space paths, so we have to transform by the ctm.
- // In the future, the atlas manager should see the local path and the ctm so that it can
- // cache across integer-only translations (internally, it already does this, just not exposed).
- if (devicePath->isEmpty()) {
- e.fShape.asPath(devicePath);
- devicePath->transform(e.fLocalToDevice);
- SkASSERT(!devicePath->isEmpty());
- }
-
- SkASSERT(!devicePath->isInverseFillType());
- if (e.fOp == SkClipOp::kIntersect) {
- return ccpr->makeClipProcessor(std::move(fp), opsTaskID, *devicePath, bounds, caps);
- } else {
- // Use kDstOut to convert the non-inverted mask alpha into (1-alpha), so the atlas only
- // ever renders non-inverse filled paths.
- // - When the input FP is null, this turns into "(1-sample(ccpr, 1).a) * input"
- // - When not null, it works out to
- // (1-sample(ccpr, input.rgb1).a) * sample(fp, input.rgb1) * input.a
- // - Since clips only care about the alpha channel, these are both equivalent to the
- // desired product of (1-ccpr) * fp * input.a.
- auto [success, atlasFP] = ccpr->makeClipProcessor(nullptr, opsTaskID, *devicePath, bounds,
- caps);
- if (!success) {
- // "Difference" draws that don't intersect the clip need to be drawn "wide open".
- return GrFPSuccess(nullptr);
- }
- return GrFPSuccess(GrBlendFragmentProcessor::Make(std::move(atlasFP), // src
- std::move(fp), // dst
- SkBlendMode::kDstOut));
- }
-}
-
static void draw_to_sw_mask(GrSWMaskHelper* helper, const GrClipStack::Element& e, bool clearMask) {
// If the first element to draw is an intersect, we clear to 0 and will draw it directly with
// coverage 1 (subsequent intersect elements will be inverse-filled and draw 0 outside).
@@ -1126,7 +1085,7 @@
// of the draws, with extra head room for more complex clips encountered in the wild.
//
// The mask stack increment size was chosen to be smaller since only 0.2% of the evaluated draw call
-// set ever used a mask (which includes stencil masks), or up to 0.3% when CCPR is disabled.
+// set ever used a mask (which includes stencil masks), or up to 0.3% when atlas clips are disabled.
static constexpr int kElementStackIncrement = 8;
static constexpr int kSaveStackIncrement = 8;
static constexpr int kMaskStackIncrement = 4;
@@ -1263,7 +1222,10 @@
GrClip::Effect GrClipStack::apply(GrRecordingContext* context, GrSurfaceDrawContext* rtc,
GrAAType aa, bool hasUserStencilSettings,
- GrAppliedClip* out, SkRect* bounds) const {
+ GrAppliedClip* out, SkRect* bounds,
+ SkTArray<SkPath>* pathsForClipAtlas) const {
+ SkASSERT(!pathsForClipAtlas || pathsForClipAtlas->empty());
+
// TODO: Once we no longer store SW masks, we don't need to sneak the provider in like this
if (!fProxyProvider) {
fProxyProvider = context->priv().proxyProvider();
@@ -1361,12 +1323,10 @@
GrWindowRectangles windowRects;
// Elements not represented as an analytic FP or skipped will be collected here and later
- // applied by using the stencil buffer, CCPR clip atlas, or a cached SW mask.
+ // applied by using the stencil buffer, clip atlas, or a cached SW mask.
SkSTArray<kNumStackMasks, const Element*> elementsForMask;
- SkSTArray<kNumStackMasks, const RawElement*> elementsForAtlas;
bool maskRequiresAA = false;
- auto* ccpr = context->priv().drawingManager()->getCoverageCountingPathRenderer();
int i = fElements.count();
for (const RawElement& e : fElements.ritems()) {
@@ -1420,7 +1380,7 @@
std::move(clipFP));
if (fullyApplied) {
remainingAnalyticFPs--;
- } else if (ccpr && e.aa() == GrAA::kYes) {
+ } else if (pathsForClipAtlas && e.aa() == GrAA::kYes) {
constexpr static int64_t kMaxClipPathArea =
GrCoverageCountingPathRenderer::kMaxClipPathArea;
SkIRect maskBounds;
@@ -1428,10 +1388,16 @@
maskBounds.height64() * maskBounds.width64() < kMaxClipPathArea) {
// While technically the element is turned into a mask, each atlas entry
// counts towards the FP complexity of the clip.
- // TODO - CCPR needs a stable ops task ID so we can't create FPs until
- // we know any other mask generation is finished. It also only works
- // with AA shapes, future atlas systems can improve on this.
- elementsForAtlas.push_back(&e);
+ if (e.devicePath()->isEmpty()) {
+ // Lazily fill in e.devicePath() if needed.
+ e.shape().asPath(e.devicePath());
+ e.devicePath()->transform(e.localToDevice());
+ SkASSERT(!e.devicePath()->isEmpty());
+ }
+ pathsForClipAtlas->push_back(*e.devicePath());
+ if (e.op() == SkClipOp::kDifference) {
+ pathsForClipAtlas->back().toggleInverseFillType();
+ }
remainingAnalyticFPs--;
fullyApplied = true;
}
@@ -1450,17 +1416,23 @@
if (!scissorIsNeeded) {
// More detailed analysis of the element shapes determined no clip is needed
- SkASSERT(elementsForMask.empty() && elementsForAtlas.empty() && !clipFP);
+ SkASSERT(elementsForMask.empty() && (!pathsForClipAtlas || pathsForClipAtlas->empty()) &&
+ !clipFP);
return Effect::kUnclipped;
}
// Fill out the GrAppliedClip with what we know so far, possibly with a tightened scissor
if (cs.op() == SkClipOp::kIntersect &&
- (!elementsForMask.empty() || !elementsForAtlas.empty())) {
+ (!elementsForMask.empty() || (pathsForClipAtlas && !pathsForClipAtlas->empty()))) {
SkAssertResult(scissorBounds.intersect(draw.outerBounds()));
}
if (!GrClip::IsInsideClip(scissorBounds, *bounds)) {
- out->hardClip().addScissor(scissorBounds, bounds);
+ if (!out->hardClip().addScissor(scissorBounds)) {
+ return Effect::kClippedOut;
+ }
+ }
+ if (!bounds->intersect(SkRect::Make(scissorBounds))) {
+ return Effect::kClippedOut;
}
if (!windowRects.empty()) {
out->hardClip().addWindowRectangles(windowRects, GrWindowRectsState::Mode::kExclusive);
@@ -1494,28 +1466,14 @@
}
}
- // Finish CCPR paths now that the render target's ops task is stable.
- if (!elementsForAtlas.empty()) {
- uint32_t opsTaskID = rtc->getOpsTask()->uniqueID();
- for (int i = 0; i < elementsForAtlas.count(); ++i) {
- SkASSERT(elementsForAtlas[i]->aa() == GrAA::kYes);
- bool success;
- std::tie(success, clipFP) = clip_atlas_fp(ccpr, opsTaskID, scissorBounds,
- elementsForAtlas[i]->asElement(),
- elementsForAtlas[i]->devicePath(), *caps,
- std::move(clipFP));
- if (!success) {
- return Effect::kClippedOut;
- }
- }
- }
-
if (clipFP) {
- // This will include all analytic FPs, all CCPR atlas FPs, and a SW mask FP.
+ // This will include all analytic FPs and a SW mask FP. The caller is responsible to add
+ // atlas clip FPs once they know exactly which opsTask the atlas will come from.
out->addCoverageFP(std::move(clipFP));
}
- SkASSERT(out->doesClip());
+ SkASSERT(scissorBounds.contains(*bounds));
+ SkASSERT(out->doesClip() || (pathsForClipAtlas && !pathsForClipAtlas->empty()));
return Effect::kClipped;
}
diff --git a/src/gpu/GrClipStack.h b/src/gpu/GrClipStack.h
index 9abb00e..a4ca8d4 100644
--- a/src/gpu/GrClipStack.h
+++ b/src/gpu/GrClipStack.h
@@ -72,8 +72,8 @@
// GrClip implementation
GrClip::Effect apply(GrRecordingContext*, GrSurfaceDrawContext*, GrAAType aa,
- bool hasUserStencilSettings,
- GrAppliedClip*, SkRect* bounds) const override;
+ bool hasUserStencilSettings, GrAppliedClip*, SkRect* bounds,
+ SkTArray<SkPath>* pathsForClipAtlas) const override;
GrClip::PreClipResult preApply(const SkRect& drawBounds, GrAA aa) const override;
SkIRect getConservativeBounds() const override;
diff --git a/src/gpu/GrClipStackClip.cpp b/src/gpu/GrClipStackClip.cpp
index 4edf950..2a2765f 100644
--- a/src/gpu/GrClipStackClip.cpp
+++ b/src/gpu/GrClipStackClip.cpp
@@ -188,7 +188,8 @@
GrClip::Effect GrClipStackClip::apply(GrRecordingContext* context,
GrSurfaceDrawContext* surfaceDrawContext,
GrAAType aa, bool hasUserStencilSettings,
- GrAppliedClip* out, SkRect* bounds) const {
+ GrAppliedClip* out, SkRect* bounds,
+ SkTArray<SkPath>* pathsForClipAtlas) const {
SkASSERT(surfaceDrawContext->width() == fDeviceSize.fWidth &&
surfaceDrawContext->height() == fDeviceSize.fHeight);
SkRect devBounds = SkRect::MakeIWH(fDeviceSize.fWidth, fDeviceSize.fHeight);
@@ -216,10 +217,9 @@
// We disable MSAA when avoiding stencil.
SkASSERT(!context->priv().caps()->avoidStencilBuffers());
}
- auto* ccpr = context->priv().drawingManager()->getCoverageCountingPathRenderer();
GrReducedClip reducedClip(*fStack, devBounds, context->priv().caps(), maxWindowRectangles,
- maxAnalyticElements, ccpr ? maxAnalyticElements : 0);
+ maxAnalyticElements, (pathsForClipAtlas) ? maxAnalyticElements : 0);
if (InitialState::kAllOut == reducedClip.initialState() &&
reducedClip.maskElements().isEmpty()) {
return Effect::kClippedOut;
@@ -227,9 +227,14 @@
Effect effect = Effect::kUnclipped;
if (reducedClip.hasScissor() && !GrClip::IsInsideClip(reducedClip.scissor(), devBounds)) {
- out->hardClip().addScissor(reducedClip.scissor(), bounds);
+ if (!out->hardClip().addScissor(reducedClip.scissor())) {
+ return Effect::kClippedOut;
+ }
effect = Effect::kClipped;
}
+ if (!bounds->intersect(SkRect::Make(reducedClip.scissor()))) {
+ return Effect::kClippedOut;
+ }
if (!reducedClip.windowRectangles().empty()) {
out->hardClip().addWindowRectangles(reducedClip.windowRectangles(),
@@ -247,10 +252,8 @@
// The opsTask ID must not be looked up until AFTER producing the clip mask (if any). That step
// can cause a flush or otherwise change which opstask our draw is going into.
- uint32_t opsTaskID = surfaceDrawContext->getOpsTask()->uniqueID();
- auto [success, clipFPs] = reducedClip.finishAndDetachAnalyticElements(context, *fMatrixProvider,
- ccpr, opsTaskID);
- if (success) {
+ if (auto clipFPs = reducedClip.finishAndDetachAnalyticElements(context, *fMatrixProvider,
+ pathsForClipAtlas)) {
out->addCoverageFP(std::move(clipFPs));
effect = Effect::kClipped;
} else {
diff --git a/src/gpu/GrClipStackClip.h b/src/gpu/GrClipStackClip.h
index ec4f1cc..360a72b 100644
--- a/src/gpu/GrClipStackClip.h
+++ b/src/gpu/GrClipStackClip.h
@@ -29,7 +29,8 @@
SkIRect getConservativeBounds() const final;
Effect apply(GrRecordingContext*, GrSurfaceDrawContext*, GrAAType aaType,
- bool hasUserStencilSettings, GrAppliedClip* out, SkRect* bounds) const final;
+ bool hasUserStencilSettings, GrAppliedClip* out, SkRect* bounds,
+ SkTArray<SkPath>* pathsForClipAtlas) const final;
PreClipResult preApply(const SkRect& drawBounds, GrAA aa) const final;
sk_sp<GrTextureProxy> testingOnly_createClipMask(GrRecordingContext*) const;
diff --git a/src/gpu/GrReducedClip.cpp b/src/gpu/GrReducedClip.cpp
index 4665b9c..dd947a8 100644
--- a/src/gpu/GrReducedClip.cpp
+++ b/src/gpu/GrReducedClip.cpp
@@ -902,23 +902,17 @@
return fCCPRClipPaths.size() + fNumAnalyticElements;
}
-GrFPResult GrReducedClip::finishAndDetachAnalyticElements(GrRecordingContext* context,
- const SkMatrixProvider& matrixProvider,
- GrCoverageCountingPathRenderer* ccpr,
- uint32_t opsTaskID) {
+std::unique_ptr<GrFragmentProcessor> GrReducedClip::finishAndDetachAnalyticElements(
+ GrRecordingContext* context, const SkMatrixProvider& matrixProvider,
+ SkTArray<SkPath>* pathsForClipAtlas) {
// Combine the analytic FP with any CCPR clip processors.
std::unique_ptr<GrFragmentProcessor> clipFP = std::move(fAnalyticFP);
fNumAnalyticElements = 0;
for (const SkPath& ccprClipPath : fCCPRClipPaths) {
- SkASSERT(ccpr);
+ SkASSERT(pathsForClipAtlas);
SkASSERT(fHasScissor);
- bool success;
- std::tie(success, clipFP) = ccpr->makeClipProcessor(std::move(clipFP), opsTaskID,
- ccprClipPath, fScissor, *fCaps);
- if (!success) {
- return GrFPFailure(nullptr);
- }
+ pathsForClipAtlas->push_back(ccprClipPath);
}
fCCPRClipPaths.reset();
@@ -935,5 +929,5 @@
}
// Compose the clip and shader FPs.
- return GrFPSuccess(GrFragmentProcessor::Compose(std::move(shaderFP), std::move(clipFP)));
+ return GrFragmentProcessor::Compose(std::move(shaderFP), std::move(clipFP));
}
diff --git a/src/gpu/GrReducedClip.h b/src/gpu/GrReducedClip.h
index 30cd601..0301f76 100644
--- a/src/gpu/GrReducedClip.h
+++ b/src/gpu/GrReducedClip.h
@@ -104,9 +104,9 @@
* the render target context, surface allocations, and even switching render targets (pre MDB)
* may cause flushes or otherwise change which opsTask the actual draw is going into.
*/
- GrFPResult finishAndDetachAnalyticElements(GrRecordingContext*, const SkMatrixProvider&
- matrixProvider, GrCoverageCountingPathRenderer*,
- uint32_t opsTaskID);
+ std::unique_ptr<GrFragmentProcessor> finishAndDetachAnalyticElements(
+ GrRecordingContext*, const SkMatrixProvider& matrixProvider,
+ SkTArray<SkPath>* pathsForClipAtlas);
private:
void walkStack(const SkClipStack&, const SkRect& queryBounds);
diff --git a/src/gpu/GrSurfaceDrawContext.cpp b/src/gpu/GrSurfaceDrawContext.cpp
index 5f8c49b..b53257a 100644
--- a/src/gpu/GrSurfaceDrawContext.cpp
+++ b/src/gpu/GrSurfaceDrawContext.cpp
@@ -48,6 +48,7 @@
#include "src/gpu/GrStyle.h"
#include "src/gpu/GrTracing.h"
#include "src/gpu/SkGr.h"
+#include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
#include "src/gpu/effects/GrBicubicEffect.h"
#include "src/gpu/effects/GrBlendFragmentProcessor.h"
#include "src/gpu/effects/GrRRectEffect.h"
@@ -1864,8 +1865,7 @@
}
}
-void GrSurfaceDrawContext::addDrawOp(const GrClip* clip,
- GrOp::Owner op,
+void GrSurfaceDrawContext::addDrawOp(const GrClip* clip, GrOp::Owner op,
const std::function<WillAddOpFn>& willAddFn) {
ASSERT_SINGLE_OWNER
if (fContext->abandoned()) {
@@ -1880,6 +1880,8 @@
SkRect bounds;
op_bounds(&bounds, op.get());
GrAppliedClip appliedClip(this->dimensions(), this->asSurfaceProxy()->backingStoreDimensions());
+ SkSTArray<4, SkPath> pathsForClipAtlas;
+ auto* ccpr = this->drawingManager()->getCoverageCountingPathRenderer();
GrDrawOp::FixedFunctionFlags fixedFunctionFlags = drawOp->fixedFunctionFlags();
bool usesHWAA = fixedFunctionFlags & GrDrawOp::FixedFunctionFlags::kUsesHWAA;
bool usesUserStencilBits = fixedFunctionFlags & GrDrawOp::FixedFunctionFlags::kUsesStencil;
@@ -1897,8 +1899,9 @@
} else {
aaType = op->hasAABloat() ? GrAAType::kCoverage : GrAAType::kNone;
}
- skipDraw = clip->apply(fContext, this, aaType, usesUserStencilBits,
- &appliedClip, &bounds) == GrClip::Effect::kClippedOut;
+ auto clipEffect = clip->apply(fContext, this, aaType, usesUserStencilBits, &appliedClip,
+ &bounds, (ccpr) ? &pathsForClipAtlas : nullptr);
+ skipDraw = (clipEffect == GrClip::Effect::kClippedOut);
} else {
// No clipping, so just clip the bounds against the logical render target dimensions
skipDraw = !bounds.intersect(this->asSurfaceProxy()->getBoundsRect());
@@ -1908,6 +1911,27 @@
return;
}
+ // Create atlas clip paths now that we know exactly which opsTask their atlas will come from.
+ if (!pathsForClipAtlas.empty()) {
+ std::unique_ptr<GrFragmentProcessor> clipFP;
+ if (appliedClip.hasCoverageFragmentProcessor()) {
+ clipFP = appliedClip.detachCoverageFragmentProcessor();
+ }
+ for (const SkPath& clipPath : pathsForClipAtlas) {
+ bool success;
+ // FIXME: ccpr shouldn't be given an opsTaskID until after the potential call to
+ // setupDstProxyView.
+ std::tie(success, clipFP) = ccpr->makeClipProcessor(std::move(clipFP),
+ this->getOpsTask()->uniqueID(),
+ clipPath, bounds.roundOut(),
+ *this->caps());
+ if (!success) {
+ return; // Clipped completely out.
+ }
+ }
+ appliedClip.addCoverageFP(std::move(clipFP));
+ }
+
bool willUseStencil = usesUserStencilBits || appliedClip.hasStencilClip();
SkASSERT(!willUseStencil || fNumStencilSamples > 0);
@@ -1934,12 +1958,17 @@
}
auto opsTask = this->getOpsTask();
+
if (willAddFn) {
willAddFn(op.get(), opsTask->uniqueID());
}
opsTask->addDrawOp(this->drawingManager(), std::move(op), analysis, std::move(appliedClip),
dstProxyView, GrTextureResolveManager(this->drawingManager()),
*this->caps());
+
+ // Our opsTask should not have changed or closed between the previous getOpsTask call and now.
+ SkASSERT(!opsTask->isClosed());
+ SkASSERT(this->getOpsTask() == opsTask);
}
bool GrSurfaceDrawContext::setupDstProxyView(const GrOp& op,
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
index 21e158c..12bd622 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
@@ -66,8 +66,9 @@
if (deviceSpacePath.isEmpty() ||
!SkIRect::Intersects(accessRect, deviceSpacePath.getBounds().roundOut())) {
- // "Intersect" draws that don't intersect the clip can be dropped.
- return deviceSpacePath.isInverseFillType() ? GrFPSuccess(nullptr) : GrFPFailure(nullptr);
+ // The accessRect never touches the path.
+ return deviceSpacePath.isInverseFillType() ? GrFPSuccess(std::move(inputFP)) // Wide open.
+ : GrFPFailure(nullptr); // Clipped out.
}
uint32_t key = deviceSpacePath.getGenerationID();