Don't fail clip atlas instantiation when the access is out of bounds

This means the draw is entirely clipped out, so we just don't even
create the FP to begin with.

Change-Id: I6d8a2a2e18be07c8a1408437c4bcc3d9349b77a2
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/387057
Commit-Queue: Chris Dalton <csmartdalton@google.com>
Reviewed-by: Adlai Holler <adlai@google.com>
Reviewed-by: Robert Phillips <robertphillips@google.com>
diff --git a/src/gpu/GrClipStack.cpp b/src/gpu/GrClipStack.cpp
index 166693d..257bee8 100644
--- a/src/gpu/GrClipStack.cpp
+++ b/src/gpu/GrClipStack.cpp
@@ -234,13 +234,13 @@
 
 // TODO: Currently this only works with CCPR because CCPR owns and manages the clip atlas. The
 // high-level concept should be generalized to support any path renderer going into a shared atlas.
-static std::unique_ptr<GrFragmentProcessor> clip_atlas_fp(GrCoverageCountingPathRenderer* ccpr,
-                                                          uint32_t opsTaskID,
-                                                          const SkIRect& bounds,
-                                                          const GrClipStack::Element& e,
-                                                          SkPath* devicePath,
-                                                          const GrCaps& caps,
-                                                          std::unique_ptr<GrFragmentProcessor> fp) {
+static GrFPResult clip_atlas_fp(GrCoverageCountingPathRenderer* ccpr,
+                                uint32_t opsTaskID,
+                                const SkIRect& bounds,
+                                const GrClipStack::Element& e,
+                                SkPath* devicePath,
+                                const GrCaps& caps,
+                                std::unique_ptr<GrFragmentProcessor> fp) {
     // TODO: Currently the atlas manages device-space paths, so we have to transform by the ctm.
     // In the future, the atlas manager should see the local path and the ctm so that it can
     // cache across integer-only translations (internally, it already does this, just not exposed).
@@ -261,10 +261,15 @@
         //       (1-sample(ccpr, input.rgb1).a) * sample(fp, input.rgb1) * input.a
         //  - Since clips only care about the alpha channel, these are both equivalent to the
         //    desired product of (1-ccpr) * fp * input.a.
-        return GrBlendFragmentProcessor::Make(
-                ccpr->makeClipProcessor(nullptr, opsTaskID, *devicePath, bounds, caps), // src
-                std::move(fp),                                                          // dst
-                SkBlendMode::kDstOut);
+        auto [success, atlasFP] = ccpr->makeClipProcessor(nullptr, opsTaskID, *devicePath, bounds,
+                                                          caps);
+        if (!success) {
+            // "Difference" draws that don't intersect the clip need to be drawn "wide open".
+            return GrFPSuccess(nullptr);
+        }
+        return GrFPSuccess(GrBlendFragmentProcessor::Make(std::move(atlasFP),  // src
+                                                          std::move(fp),       // dst
+                                                          SkBlendMode::kDstOut));
     }
 }
 
@@ -1494,8 +1499,14 @@
         uint32_t opsTaskID = rtc->getOpsTask()->uniqueID();
         for (int i = 0; i < elementsForAtlas.count(); ++i) {
             SkASSERT(elementsForAtlas[i]->aa() == GrAA::kYes);
-            clipFP = clip_atlas_fp(ccpr, opsTaskID, scissorBounds, elementsForAtlas[i]->asElement(),
-                                   elementsForAtlas[i]->devicePath(), *caps, std::move(clipFP));
+            bool success;
+            std::tie(success, clipFP) = clip_atlas_fp(ccpr, opsTaskID, scissorBounds,
+                                                      elementsForAtlas[i]->asElement(),
+                                                      elementsForAtlas[i]->devicePath(), *caps,
+                                                      std::move(clipFP));
+            if (!success) {
+                return Effect::kClippedOut;
+            }
         }
     }
 
diff --git a/src/gpu/GrClipStackClip.cpp b/src/gpu/GrClipStackClip.cpp
index 82d8c7f..4edf950 100644
--- a/src/gpu/GrClipStackClip.cpp
+++ b/src/gpu/GrClipStackClip.cpp
@@ -248,10 +248,13 @@
     // The opsTask ID must not be looked up until AFTER producing the clip mask (if any). That step
     // can cause a flush or otherwise change which opstask our draw is going into.
     uint32_t opsTaskID = surfaceDrawContext->getOpsTask()->uniqueID();
-    if (auto clipFPs = reducedClip.finishAndDetachAnalyticElements(context, *fMatrixProvider, ccpr,
-                                                                   opsTaskID)) {
+    auto [success, clipFPs] = reducedClip.finishAndDetachAnalyticElements(context, *fMatrixProvider,
+                                                                          ccpr, opsTaskID);
+    if (success) {
         out->addCoverageFP(std::move(clipFPs));
         effect = Effect::kClipped;
+    } else {
+        effect = Effect::kClippedOut;
     }
 
     return effect;
diff --git a/src/gpu/GrReducedClip.cpp b/src/gpu/GrReducedClip.cpp
index 73819eb..7092bc8 100644
--- a/src/gpu/GrReducedClip.cpp
+++ b/src/gpu/GrReducedClip.cpp
@@ -902,9 +902,10 @@
     return fCCPRClipPaths.size() + fNumAnalyticElements;
 }
 
-std::unique_ptr<GrFragmentProcessor> GrReducedClip::finishAndDetachAnalyticElements(
-        GrRecordingContext* context, const SkMatrixProvider& matrixProvider,
-        GrCoverageCountingPathRenderer* ccpr, uint32_t opsTaskID) {
+GrFPResult GrReducedClip::finishAndDetachAnalyticElements(GrRecordingContext* context,
+                                                          const SkMatrixProvider& matrixProvider,
+                                                          GrCoverageCountingPathRenderer* ccpr,
+                                                          uint32_t opsTaskID) {
     // Combine the analytic FP with any CCPR clip processors.
     std::unique_ptr<GrFragmentProcessor> clipFP = std::move(fAnalyticFP);
     fNumAnalyticElements = 0;
@@ -912,8 +913,12 @@
     for (const SkPath& ccprClipPath : fCCPRClipPaths) {
         SkASSERT(ccpr);
         SkASSERT(fHasScissor);
-        clipFP = ccpr->makeClipProcessor(std::move(clipFP), opsTaskID, ccprClipPath,
-                                         fScissor, *fCaps);
+        bool success;
+        std::tie(success, clipFP) = ccpr->makeClipProcessor(std::move(clipFP), opsTaskID,
+                                                            ccprClipPath, fScissor, *fCaps);
+        if (!success) {
+            return GrFPFailure(nullptr);
+        }
     }
     fCCPRClipPaths.reset();
 
@@ -930,5 +935,5 @@
     }
 
     // Compose the clip and shader FPs.
-    return GrFragmentProcessor::Compose(std::move(shaderFP), std::move(clipFP));
+    return GrFPSuccess(GrFragmentProcessor::Compose(std::move(shaderFP), std::move(clipFP)));
 }
diff --git a/src/gpu/GrReducedClip.h b/src/gpu/GrReducedClip.h
index e11e0ed..30cd601 100644
--- a/src/gpu/GrReducedClip.h
+++ b/src/gpu/GrReducedClip.h
@@ -104,9 +104,9 @@
      * the render target context, surface allocations, and even switching render targets (pre MDB)
      * may cause flushes or otherwise change which opsTask the actual draw is going into.
      */
-    std::unique_ptr<GrFragmentProcessor> finishAndDetachAnalyticElements(
-            GrRecordingContext*, const SkMatrixProvider& matrixProvider,
-            GrCoverageCountingPathRenderer*, uint32_t opsTaskID);
+    GrFPResult finishAndDetachAnalyticElements(GrRecordingContext*, const SkMatrixProvider&
+                                               matrixProvider, GrCoverageCountingPathRenderer*,
+                                               uint32_t opsTaskID);
 
 private:
     void walkStack(const SkClipStack&, const SkRect& queryBounds);
diff --git a/src/gpu/ccpr/GrCCClipPath.cpp b/src/gpu/ccpr/GrCCClipPath.cpp
index 510d3da..d2845d1 100644
--- a/src/gpu/ccpr/GrCCClipPath.cpp
+++ b/src/gpu/ccpr/GrCCClipPath.cpp
@@ -15,6 +15,7 @@
 
 void GrCCClipPath::init(const SkPath& deviceSpacePath, const SkIRect& accessRect,
                         const GrCaps& caps) {
+    SkASSERT(!deviceSpacePath.isEmpty());
     SkASSERT(!this->isInitialized());
 
     fAtlasLazyProxy = GrCCAtlas::MakeLazyAtlasProxy(
@@ -45,6 +46,7 @@
     fDeviceSpacePath = deviceSpacePath;
     fDeviceSpacePath.getBounds().roundOut(&fPathDevIBounds);
     fAccessRect = accessRect;
+    SkASSERT(SkIRect::Intersects(fAccessRect, fPathDevIBounds));
 }
 
 void GrCCClipPath::accountForOwnPath(GrCCAtlas::Specs* specs) const {
diff --git a/src/gpu/ccpr/GrCCPerFlushResources.cpp b/src/gpu/ccpr/GrCCPerFlushResources.cpp
index a606476..19c3202 100644
--- a/src/gpu/ccpr/GrCCPerFlushResources.cpp
+++ b/src/gpu/ccpr/GrCCPerFlushResources.cpp
@@ -24,20 +24,15 @@
 const GrCCAtlas* GrCCPerFlushResources::renderDeviceSpacePathInAtlas(
         GrOnFlushResourceProvider* onFlushRP, const SkIRect& clipIBounds, const SkPath& devPath,
         const SkIRect& devPathIBounds, GrFillRule fillRule, SkIVector* devToAtlasOffset) {
-    if (devPath.isEmpty()) {
-        return nullptr;
-    }
-
+    SkASSERT(!devPath.isEmpty());
     GrScissorTest enableScissorInAtlas;
     SkIRect clippedPathIBounds;
     if (clipIBounds.contains(devPathIBounds)) {
         clippedPathIBounds = devPathIBounds;
         enableScissorInAtlas = GrScissorTest::kDisabled;
-    } else if (clippedPathIBounds.intersect(clipIBounds, devPathIBounds)) {
-        enableScissorInAtlas = GrScissorTest::kEnabled;
     } else {
-        // The clip and path bounds do not intersect. Draw nothing.
-        return nullptr;
+        SkAssertResult(clippedPathIBounds.intersect(clipIBounds, devPathIBounds));
+        enableScissorInAtlas = GrScissorTest::kEnabled;
     }
 
     this->placeRenderedPathInAtlas(onFlushRP, clippedPathIBounds, enableScissorInAtlas,
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
index f44502f..ec08d69 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
@@ -51,7 +51,7 @@
     return it->second.get();
 }
 
-std::unique_ptr<GrFragmentProcessor> GrCoverageCountingPathRenderer::makeClipProcessor(
+GrFPResult GrCoverageCountingPathRenderer::makeClipProcessor(
         std::unique_ptr<GrFragmentProcessor> inputFP, uint32_t opsTaskID,
         const SkPath& deviceSpacePath, const SkIRect& accessRect, const GrCaps& caps) {
 #ifdef SK_DEBUG
@@ -64,6 +64,12 @@
     }
 #endif
 
+    if (deviceSpacePath.isEmpty() ||
+        !SkIRect::Intersects(accessRect, deviceSpacePath.getBounds().roundOut())) {
+        // "Intersect" draws that don't intersect the clip can be dropped.
+        return deviceSpacePath.isInverseFillType() ? GrFPSuccess(nullptr) : GrFPFailure(nullptr);
+    }
+
     uint32_t key = deviceSpacePath.getGenerationID();
     key = (key << 1) | (uint32_t)GrFillRuleForSkPath(deviceSpacePath);
     GrCCClipPath& clipPath =
@@ -77,8 +83,8 @@
 
     auto mustCheckBounds = GrCCClipProcessor::MustCheckBounds(
             !clipPath.pathDevIBounds().contains(accessRect));
-    return std::make_unique<GrCCClipProcessor>(std::move(inputFP), caps, &clipPath,
-                                               mustCheckBounds);
+    return GrFPSuccess(std::make_unique<GrCCClipProcessor>(std::move(inputFP), caps, &clipPath,
+                                                           mustCheckBounds));
 }
 
 void GrCoverageCountingPathRenderer::preFlush(
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
index 0742151..3d577da 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
@@ -51,9 +51,9 @@
     // Large clip paths should consider a different method, like MSAA stencil.
     constexpr static int64_t kMaxClipPathArea = 256 * 256;
 
-    std::unique_ptr<GrFragmentProcessor> makeClipProcessor(
-            std::unique_ptr<GrFragmentProcessor> inputFP, uint32_t opsTaskID,
-            const SkPath& deviceSpacePath, const SkIRect& accessRect, const GrCaps& caps);
+    GrFPResult makeClipProcessor(std::unique_ptr<GrFragmentProcessor> inputFP, uint32_t opsTaskID,
+                                 const SkPath& deviceSpacePath, const SkIRect& accessRect,
+                                 const GrCaps& caps);
 
     // GrOnFlushCallbackObject overrides.
     void preFlush(GrOnFlushResourceProvider*, SkSpan<const uint32_t> taskIDs) override;
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp
index e07ebec..5e080fc 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp
@@ -17,8 +17,8 @@
     return nullptr;
 }
 
-std::unique_ptr<GrFragmentProcessor> GrCoverageCountingPathRenderer::makeClipProcessor(
+GrFPResult GrCoverageCountingPathRenderer::makeClipProcessor(
         std::unique_ptr<GrFragmentProcessor> inputFP, uint32_t opsTaskID,
         const SkPath& deviceSpacePath, const SkIRect& accessRect, const GrCaps& caps) {
-    return nullptr;
+    return GrFPFailure(nullptr);
 }