Add customData capability to the thread-safe proxy cache ...

and begin using it for cached SW-generated blur masks.

This is needed to begin mixing and matching HW & SW-generated blur
masks since they have different draw-rects.

It will also be useful if/when we add support for triangulated paths
to the thread-safe cache.

Bug: 1108408
Change-Id: I085ad1127dc2deb98b35d704b06e50b27c72fd1c
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/322657
Reviewed-by: Adlai Holler <adlai@google.com>
Commit-Queue: Robert Phillips <robertphillips@google.com>
diff --git a/src/gpu/GrBlurUtils.cpp b/src/gpu/GrBlurUtils.cpp
index 92e6717..c38314c 100644
--- a/src/gpu/GrBlurUtils.cpp
+++ b/src/gpu/GrBlurUtils.cpp
@@ -69,48 +69,83 @@
     SkMask::FreeImage(addr);
 }
 
+#ifdef SK_DEBUG
+// Brute force computation of the destination bounds of a SW filtered mask
+static SkIRect sw_calc_draw_rect(const SkMatrix& viewMatrix,
+                                 const GrStyledShape& shape,
+                                 const SkMaskFilter* filter,
+                                 const SkIRect& clipBounds) {
+    SkRect devBounds = shape.bounds();
+    viewMatrix.mapRect(&devBounds);
+
+    SkMask srcM, dstM;
+    if (!SkDraw::ComputeMaskBounds(devBounds, &clipBounds, filter, &viewMatrix, &srcM.fBounds)) {
+        return {};
+    }
+
+    srcM.fFormat = SkMask::kA8_Format;
+
+    if (!as_MFB(filter)->filterMask(&dstM, srcM, viewMatrix, nullptr)) {
+        return {};
+    }
+
+    return dstM.fBounds;
+}
+#endif
+
+// This stores the mapping from an unclipped, integerized, device-space, shape bounds to
+// the filtered mask's draw rect.
+struct DrawRectData {
+    SkIVector fOffset;
+    SkISize   fSize;
+};
+
+static sk_sp<SkData> create_data(const SkIRect& drawRect, const SkIRect& origDevBounds) {
+
+    DrawRectData drawRectData { {drawRect.fLeft - origDevBounds.fLeft,
+                                 drawRect.fTop - origDevBounds.fTop},
+                                drawRect.size() };
+
+    return SkData::MakeWithCopy(&drawRectData, sizeof(drawRectData));
+}
+
+static SkIRect extract_draw_rect_from_data(SkData* data, const SkIRect& origDevBounds) {
+    auto drawRectData = static_cast<const DrawRectData*>(data->data());
+
+    return SkIRect::MakeXYWH(origDevBounds.fLeft + drawRectData->fOffset.fX,
+                             origDevBounds.fTop + drawRectData->fOffset.fY,
+                             drawRectData->fSize.fWidth,
+                             drawRectData->fSize.fHeight);
+}
+
 static GrSurfaceProxyView sw_create_filtered_mask(GrRecordingContext* rContext,
                                                   const SkMatrix& viewMatrix,
                                                   const GrStyledShape& shape,
                                                   const SkMaskFilter* filter,
+                                                  const SkIRect& unclippedDevShapeBounds,
                                                   const SkIRect& clipBounds,
                                                   SkIRect* drawRect,
-                                                  const GrUniqueKey& key) {
+                                                  GrUniqueKey* key) {
     SkASSERT(filter);
     SkASSERT(!shape.style().applies());
 
     auto threadSafeViewCache = rContext->priv().threadSafeViewCache();
 
     GrSurfaceProxyView filteredMaskView;
+    sk_sp<SkData> data;
 
-    if (key.isValid()) {
-        filteredMaskView = threadSafeViewCache->find(key);
+    if (key->isValid()) {
+        std::tie(filteredMaskView, data) = threadSafeViewCache->findWithData(*key);
     }
 
     if (filteredMaskView) {
+        SkASSERT(data);
         SkASSERT(kMaskOrigin == filteredMaskView.origin());
 
-        SkRect devBounds = shape.bounds();
-        viewMatrix.mapRect(&devBounds);
+        *drawRect = extract_draw_rect_from_data(data.get(), unclippedDevShapeBounds);
 
-        // Here we need to recompute the destination bounds in order to draw the mask correctly
-        SkMask srcM, dstM;
-        if (!SkDraw::ComputeMaskBounds(devBounds, &clipBounds, filter, &viewMatrix,
-                                       &srcM.fBounds)) {
-            return {};
-        }
-
-        srcM.fFormat = SkMask::kA8_Format;
-
-        if (!as_MFB(filter)->filterMask(&dstM, srcM, viewMatrix, nullptr)) {
-            return {};
-        }
-
-        // Unfortunately, we cannot double check that the computed bounds (i.e., dstM.fBounds)
-        // match the stored bounds of the mask bc the proxy may have been recreated and,
-        // when it is recreated, it just gets the bounds of the underlying GrTexture (which
-        // might be a loose fit).
-        *drawRect = dstM.fBounds;
+        SkDEBUGCODE(auto oldDrawRect = sw_calc_draw_rect(viewMatrix, shape, filter, clipBounds));
+        SkASSERT(*drawRect == oldDrawRect);
     } else {
         SkStrokeRec::InitStyle fillOrHairline = shape.style().isSimpleHairline()
                                                         ? SkStrokeRec::kHairline_InitStyle
@@ -162,8 +197,12 @@
 
         *drawRect = dstM.fBounds;
 
-        if (key.isValid()) {
-            filteredMaskView = threadSafeViewCache->add(key, filteredMaskView);
+        if (key->isValid()) {
+            key->setCustomData(create_data(*drawRect, unclippedDevShapeBounds));
+            std::tie(filteredMaskView, data) = threadSafeViewCache->addWithData(*key,
+                                                                                filteredMaskView);
+            // If we got a different view back from 'addWithData' it could have a different drawRect
+            *drawRect = extract_draw_rect_from_data(data.get(), unclippedDevShapeBounds);
         }
     }
 
@@ -256,7 +295,7 @@
 }
 
 // The key and clip-bounds are computed together because the caching decision can impact the
-// clip-bound.
+// clip-bound - since we only cache un-clipped masks the clip can be removed entirely.
 // A 'false' return value indicates that the shape is known to be clipped away.
 static bool compute_key_and_clip_bounds(GrUniqueKey* maskKey,
                                         SkIRect* boundsForClip,
@@ -312,7 +351,8 @@
         SkScalar ky = viewMatrix.get(SkMatrix::kMSkewY);
         SkScalar tx = viewMatrix.get(SkMatrix::kMTransX);
         SkScalar ty = viewMatrix.get(SkMatrix::kMTransY);
-        // Allow 8 bits each in x and y of subpixel positioning.
+        // Allow 8 bits each in x and y of subpixel positioning. But, note that we're allowing
+        // reuse for integer translations.
         SkFixed fracX = SkScalarToFixed(SkScalarFraction(tx)) & 0x0000FF00;
         SkFixed fracY = SkScalarToFixed(SkScalarFraction(ty)) & 0x0000FF00;
 
@@ -349,7 +389,7 @@
                                                   const SkIRect& unclippedDevShapeBounds,
                                                   const SkIRect& clipBounds,
                                                   SkIRect* maskRect,
-                                                  const GrUniqueKey& key) {
+                                                  GrUniqueKey* key) {
     GrSurfaceProxyView filteredMaskView;
 
     if (filter->canFilterMaskGPU(shape,
@@ -365,8 +405,8 @@
         GrProxyProvider* proxyProvider = rContext->priv().proxyProvider();
 
         // TODO: this path should also use the thread-safe proxy-view cache!
-        if (key.isValid()) {
-            filteredMaskView = find_filtered_mask(proxyProvider, key);
+        if (key->isValid()) {
+            filteredMaskView = find_filtered_mask(proxyProvider, *key);
         }
 
         if (!filteredMaskView) {
@@ -383,9 +423,12 @@
                                                          maskRTC->colorInfo().alphaType(),
                                                          viewMatrix,
                                                          *maskRect);
-                if (filteredMaskView && key.isValid()) {
+                if (filteredMaskView && key->isValid()) {
                     SkASSERT(filteredMaskView.asTextureProxy());
-                    proxyProvider->assignUniqueKeyToProxy(key, filteredMaskView.asTextureProxy());
+
+                    // This customData isn't being used yet
+                    key->setCustomData(create_data(*maskRect, unclippedDevShapeBounds));
+                    proxyProvider->assignUniqueKeyToProxy(*key, filteredMaskView.asTextureProxy());
                 }
             }
         }
@@ -460,7 +503,7 @@
         filteredMaskView = hw_create_filtered_mask(rContext, renderTargetContext,
                                                    viewMatrix, *shape, maskFilter,
                                                    unclippedDevShapeBounds, boundsForClip,
-                                                   &maskRect, maskKey);
+                                                   &maskRect, &maskKey);
         if (filteredMaskView) {
             if (draw_mask(renderTargetContext, clip, viewMatrix, maskRect, std::move(paint),
                           std::move(filteredMaskView))) {
@@ -474,8 +517,8 @@
     // Either HW mask rendering failed or we're in a DDL recording thread
     filteredMaskView = sw_create_filtered_mask(rContext,
                                                viewMatrix, *shape, maskFilter,
-                                               boundsForClip,
-                                               &maskRect, maskKey);
+                                               unclippedDevShapeBounds, boundsForClip,
+                                               &maskRect, &maskKey);
     if (filteredMaskView) {
         if (draw_mask(renderTargetContext, clip, viewMatrix, maskRect, std::move(paint),
                       std::move(filteredMaskView))) {
diff --git a/src/gpu/GrThreadSafeUniquelyKeyedProxyViewCache.cpp b/src/gpu/GrThreadSafeUniquelyKeyedProxyViewCache.cpp
index c819339..d467d2b 100644
--- a/src/gpu/GrThreadSafeUniquelyKeyedProxyViewCache.cpp
+++ b/src/gpu/GrThreadSafeUniquelyKeyedProxyViewCache.cpp
@@ -92,9 +92,8 @@
     }
 }
 
-GrSurfaceProxyView GrThreadSafeUniquelyKeyedProxyViewCache::find(const GrUniqueKey& key) {
-    SkAutoSpinlock lock{fSpinLock};
-
+std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeUniquelyKeyedProxyViewCache::internalFind(
+                                                       const GrUniqueKey& key) {
     Entry* tmp = fUniquelyKeyedProxyViewMap.find(key);
     if (tmp) {
         SkASSERT(fUniquelyKeyedProxyViewList.isInList(tmp));
@@ -102,12 +101,27 @@
         tmp->fLastAccess = GrStdSteadyClock::now();
         fUniquelyKeyedProxyViewList.remove(tmp);
         fUniquelyKeyedProxyViewList.addToHead(tmp);
-        return tmp->fView;
+        return { tmp->fView, tmp->fKey.refCustomData() };
     }
 
     return {};
 }
 
+GrSurfaceProxyView GrThreadSafeUniquelyKeyedProxyViewCache::find(const GrUniqueKey& key) {
+    SkAutoSpinlock lock{fSpinLock};
+
+    GrSurfaceProxyView view;
+    std::tie(view, std::ignore) = this->internalFind(key);
+    return view;
+}
+
+std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeUniquelyKeyedProxyViewCache::findWithData(
+                                                                        const GrUniqueKey& key) {
+    SkAutoSpinlock lock{fSpinLock};
+
+    return this->internalFind(key);
+}
+
 GrThreadSafeUniquelyKeyedProxyViewCache::Entry*
 GrThreadSafeUniquelyKeyedProxyViewCache::getEntry(const GrUniqueKey& key,
                                                   const GrSurfaceProxyView& view) {
@@ -141,7 +155,7 @@
     fFreeEntryList = dead;
 }
 
-GrSurfaceProxyView GrThreadSafeUniquelyKeyedProxyViewCache::internalAdd(
+std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeUniquelyKeyedProxyViewCache::internalAdd(
                                                                 const GrUniqueKey& key,
                                                                 const GrSurfaceProxyView& view) {
     Entry* tmp = fUniquelyKeyedProxyViewMap.find(key);
@@ -151,13 +165,23 @@
         SkASSERT(fUniquelyKeyedProxyViewMap.find(key));
     }
 
-    return tmp->fView;
+    return { tmp->fView, tmp->fKey.refCustomData() };
 }
 
 GrSurfaceProxyView GrThreadSafeUniquelyKeyedProxyViewCache::add(const GrUniqueKey& key,
                                                                 const GrSurfaceProxyView& view) {
     SkAutoSpinlock lock{fSpinLock};
 
+    GrSurfaceProxyView newView;
+    std::tie(newView, std::ignore) = this->internalAdd(key, view);
+    return newView;
+}
+
+std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeUniquelyKeyedProxyViewCache::addWithData(
+                                                                const GrUniqueKey& key,
+                                                                const GrSurfaceProxyView& view) {
+    SkAutoSpinlock lock{fSpinLock};
+
     return this->internalAdd(key, view);
 }
 
@@ -165,14 +189,24 @@
                                                                       const GrSurfaceProxyView& v) {
     SkAutoSpinlock lock{fSpinLock};
 
-    Entry* tmp = fUniquelyKeyedProxyViewMap.find(key);
-    if (tmp) {
-        SkASSERT(fUniquelyKeyedProxyViewList.isInList(tmp));
-        // make the sought out entry the MRU
-        tmp->fLastAccess = GrStdSteadyClock::now();
-        fUniquelyKeyedProxyViewList.remove(tmp);
-        fUniquelyKeyedProxyViewList.addToHead(tmp);
-        return tmp->fView;
+    GrSurfaceProxyView view;
+    std::tie(view, std::ignore) = this->internalFind(key);
+    if (view) {
+        return view;
+    }
+
+    std::tie(view, std::ignore) = this->internalAdd(key, v);
+    return view;
+}
+
+std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeUniquelyKeyedProxyViewCache::findOrAddWithData(
+                                                                      const GrUniqueKey& key,
+                                                                      const GrSurfaceProxyView& v) {
+    SkAutoSpinlock lock{fSpinLock};
+
+    auto [view, data] = this->internalFind(key);
+    if (view) {
+        return { std::move(view), std::move(data) };
     }
 
     return this->internalAdd(key, v);
diff --git a/src/gpu/GrThreadSafeUniquelyKeyedProxyViewCache.h b/src/gpu/GrThreadSafeUniquelyKeyedProxyViewCache.h
index 596772a..f6f9d70 100644
--- a/src/gpu/GrThreadSafeUniquelyKeyedProxyViewCache.h
+++ b/src/gpu/GrThreadSafeUniquelyKeyedProxyViewCache.h
@@ -81,11 +81,17 @@
     void dropUniqueRefsOlderThan(GrStdSteadyClock::time_point purgeTime)  SK_EXCLUDES(fSpinLock);
 
     GrSurfaceProxyView find(const GrUniqueKey&)  SK_EXCLUDES(fSpinLock);
+    std::tuple<GrSurfaceProxyView, sk_sp<SkData>> findWithData(
+                                                      const GrUniqueKey&)  SK_EXCLUDES(fSpinLock);
 
     GrSurfaceProxyView add(const GrUniqueKey&, const GrSurfaceProxyView&)  SK_EXCLUDES(fSpinLock);
+    std::tuple<GrSurfaceProxyView, sk_sp<SkData>> addWithData(
+                            const GrUniqueKey&, const GrSurfaceProxyView&)  SK_EXCLUDES(fSpinLock);
 
     GrSurfaceProxyView findOrAdd(const GrUniqueKey&,
                                  const GrSurfaceProxyView&)  SK_EXCLUDES(fSpinLock);
+    std::tuple<GrSurfaceProxyView, sk_sp<SkData>> findOrAddWithData(
+                            const GrUniqueKey&, const GrSurfaceProxyView&)  SK_EXCLUDES(fSpinLock);
 
     void remove(const GrUniqueKey&)  SK_EXCLUDES(fSpinLock);
 
@@ -108,8 +114,10 @@
     Entry* getEntry(const GrUniqueKey&, const GrSurfaceProxyView&) SK_REQUIRES(fSpinLock);
     void recycleEntry(Entry*)  SK_REQUIRES(fSpinLock);
 
-    GrSurfaceProxyView internalAdd(const GrUniqueKey&,
-                                   const GrSurfaceProxyView&)  SK_REQUIRES(fSpinLock);
+    std::tuple<GrSurfaceProxyView, sk_sp<SkData>> internalFind(
+                                                       const GrUniqueKey&)  SK_REQUIRES(fSpinLock);
+    std::tuple<GrSurfaceProxyView, sk_sp<SkData>> internalAdd(
+                            const GrUniqueKey&, const GrSurfaceProxyView&)  SK_REQUIRES(fSpinLock);
 
     mutable SkSpinlock fSpinLock;