ccpr: Use SkMessageBus for thread-safe eviction of cache entries

Bug: skia:
Change-Id: I87725b95761deb689333315ce681a4968d98190a
Reviewed-on: https://skia-review.googlesource.com/c/163511
Reviewed-by: Brian Salomon <bsalomon@google.com>
Commit-Queue: Chris Dalton <csmartdalton@google.com>
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.cpp b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
index 7390b9f..1dae085 100644
--- a/src/gpu/ccpr/GrCCDrawPathsOp.cpp
+++ b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
@@ -345,8 +345,7 @@
                 SkIVector newOffset;
                 GrCCAtlas* atlas =
                         resources->copyPathToCachedAtlas(*cacheEntry, doEvenOddFill, &newOffset);
-                cacheEntry->updateToCachedAtlas(atlas->getOrAssignUniqueKey(onFlushRP),
-                                                onFlushRP->contextUniqueID(), newOffset,
+                cacheEntry->updateToCachedAtlas(atlas->getOrAssignUniqueKey(onFlushRP), newOffset,
                                                 atlas->refOrMakeCachedAtlasInfo());
                 this->recordInstance(atlas->textureProxy(), resources->nextPathInstanceIdx());
                 resources->appendDrawPathInstance().set(*cacheEntry, draw.fCachedMaskShift,
@@ -392,9 +391,8 @@
 
                 const GrUniqueKey& atlasKey =
                         resources->nextAtlasToStash()->getOrAssignUniqueKey(onFlushRP);
-                cacheEntry->initAsStashedAtlas(atlasKey, onFlushRP->contextUniqueID(),
-                                               devToAtlasOffset, devBounds, devBounds45, devIBounds,
-                                               draw.fCachedMaskShift);
+                cacheEntry->initAsStashedAtlas(atlasKey, devToAtlasOffset, devBounds, devBounds45,
+                                               devIBounds, draw.fCachedMaskShift);
                 // Remember this atlas in case we encounter the path again during the same flush.
                 cacheEntry->setCurrFlushAtlas(atlas);
             }
diff --git a/src/gpu/ccpr/GrCCPathCache.cpp b/src/gpu/ccpr/GrCCPathCache.cpp
index 367caff..db85641 100644
--- a/src/gpu/ccpr/GrCCPathCache.cpp
+++ b/src/gpu/ccpr/GrCCPathCache.cpp
@@ -10,6 +10,13 @@
 #include "GrShape.h"
 #include "SkNx.h"
 
+DECLARE_SKMESSAGEBUS_MESSAGE(sk_sp<GrCCPathCacheEntry>);
+
+static inline bool SkShouldPostMessageToBus(
+        const sk_sp<GrCCPathCacheEntry>& entry, uint32_t msgBusUniqueID) {
+    return entry->pathCacheUniqueID() == msgBusUniqueID;
+}
+
 // The maximum number of cache entries we allow in our own cache.
 static constexpr int kMaxCacheCount = 1 << 16;
 
@@ -94,14 +101,14 @@
 
 }
 
-inline GrCCPathCache::HashNode::HashNode(GrCCPathCache* cache, const MaskTransform& m,
+inline GrCCPathCache::HashNode::HashNode(uint32_t pathCacheUniqueID, const MaskTransform& m,
                                          const GrShape& shape) {
     SkASSERT(shape.hasUnstyledKey());
 
     WriteStyledKey writeKey(shape);
     void* memory = ::operator new (sizeof(GrCCPathCacheEntry) +
                                    writeKey.allocCountU32() * sizeof(uint32_t));
-    fEntry = new (memory) GrCCPathCacheEntry(cache, m);
+    fEntry.reset(new (memory) GrCCPathCacheEntry(pathCacheUniqueID, m));
 
     // The shape key is a variable-length footer to the entry allocation.
     uint32_t* keyData = (uint32_t*)((char*)memory + sizeof(GrCCPathCacheEntry));
@@ -121,22 +128,17 @@
     return GrResourceKeyHash(&key.fData[1], key.fData[0]);
 }
 
-GrCCPathCache::HashNode::~HashNode() {
-    if (!fEntry) {
-        return;
+#ifdef SK_DEBUG
+GrCCPathCache::~GrCCPathCache() {
+    // Ensure the hash table and LRU list are still coherent.
+    int lruCount = 0;
+    for (const GrCCPathCacheEntry* entry : fLRU) {
+        SkASSERT(fHashTable.find(HashNode::GetKey(entry))->entry() == entry);
+        ++lruCount;
     }
-
-    // Finalize our eviction from the path cache.
-    SkASSERT(fEntry->fCacheWeakPtr);
-    fEntry->fCacheWeakPtr->fLRU.remove(fEntry);
-    fEntry->fCacheWeakPtr = nullptr;
-    fEntry->unref();
+    SkASSERT(fHashTable.count() == lruCount);
 }
-
-GrCCPathCache::HashNode& GrCCPathCache::HashNode::operator=(HashNode&& node) {
-    this->~HashNode();
-    return *new (this) HashNode(std::move(node));
-}
+#endif
 
 sk_sp<GrCCPathCacheEntry> GrCCPathCache::find(const GrShape& shape, const MaskTransform& m,
                                               CreateIfAbsent createIfAbsent) {
@@ -151,7 +153,7 @@
     GrCCPathCacheEntry* entry = nullptr;
     if (HashNode* node = fHashTable.find({keyData.get()})) {
         entry = node->entry();
-        SkASSERT(this == entry->fCacheWeakPtr);
+        SkASSERT(fLRU.isInList(entry));
         if (fuzzy_equals(m, entry->fMaskTransform)) {
             ++entry->fHitCount;  // The path was reused with a compatible matrix.
         } else if (CreateIfAbsent::kYes == createIfAbsent && entry->unique()) {
@@ -173,7 +175,7 @@
         if (fHashTable.count() >= kMaxCacheCount) {
             this->evict(fLRU.tail());  // We've exceeded our limit.
         }
-        entry = fHashTable.set(HashNode(this, m, shape))->entry();
+        entry = fHashTable.set(HashNode(fInvalidatedEntriesInbox.uniqueID(), m, shape))->entry();
         shape.addGenIDChangeListener(sk_ref_sp(entry));
         SkASSERT(fHashTable.count() <= kMaxCacheCount);
     } else {
@@ -184,33 +186,36 @@
     return sk_ref_sp(entry);
 }
 
-void GrCCPathCache::evict(const GrCCPathCacheEntry* entry) {
-    SkASSERT(entry);
-    SkASSERT(this == entry->fCacheWeakPtr);
-    SkASSERT(fLRU.isInList(entry));
-    SkASSERT(fHashTable.find(HashNode::GetKey(entry))->entry() == entry);
+void GrCCPathCache::evict(GrCCPathCacheEntry* entry) {
+    bool isInCache = entry->fNext || (fLRU.tail() == entry);
+    SkASSERT(isInCache == fLRU.isInList(entry));
+    if (isInCache) {
+        fLRU.remove(entry);
+        fHashTable.remove(HashNode::GetKey(entry));  // Do this last, as it might delete the entry.
+    }
+}
 
-    fHashTable.remove(HashNode::GetKey(entry));  // ~HashNode() handles the rest.
+void GrCCPathCache::purgeAsNeeded() {
+    SkTArray<sk_sp<GrCCPathCacheEntry>> invalidatedEntries;
+    fInvalidatedEntriesInbox.poll(&invalidatedEntries);
+    for (const sk_sp<GrCCPathCacheEntry>& entry : invalidatedEntries) {
+        this->evict(entry.get());
+    }
 }
 
 
 GrCCPathCacheEntry::~GrCCPathCacheEntry() {
-    SkASSERT(!fCacheWeakPtr);  // HashNode should have cleared our cache pointer.
     SkASSERT(!fCurrFlushAtlas);  // Client is required to reset fCurrFlushAtlas back to null.
-
     this->invalidateAtlas();
 }
 
-void GrCCPathCacheEntry::initAsStashedAtlas(const GrUniqueKey& atlasKey, uint32_t contextUniqueID,
+void GrCCPathCacheEntry::initAsStashedAtlas(const GrUniqueKey& atlasKey,
                                             const SkIVector& atlasOffset, const SkRect& devBounds,
                                             const SkRect& devBounds45, const SkIRect& devIBounds,
                                             const SkIVector& maskShift) {
-    SkASSERT(contextUniqueID != SK_InvalidUniqueID);
     SkASSERT(atlasKey.isValid());
     SkASSERT(!fCurrFlushAtlas);  // Otherwise we should reuse the atlas from last time.
 
-    fContextUniqueID = contextUniqueID;
-
     fAtlasKey = atlasKey;
     fAtlasOffset = atlasOffset + maskShift;
     SkASSERT(!fCachedAtlasInfo);  // Otherwise they should have reused the cached atlas instead.
@@ -221,15 +226,12 @@
     fDevIBounds = devIBounds.makeOffset(-maskShift.fX, -maskShift.fY);
 }
 
-void GrCCPathCacheEntry::updateToCachedAtlas(const GrUniqueKey& atlasKey, uint32_t contextUniqueID,
+void GrCCPathCacheEntry::updateToCachedAtlas(const GrUniqueKey& atlasKey,
                                              const SkIVector& newAtlasOffset,
                                              sk_sp<GrCCAtlas::CachedAtlasInfo> info) {
-    SkASSERT(contextUniqueID != SK_InvalidUniqueID);
     SkASSERT(atlasKey.isValid());
     SkASSERT(!fCurrFlushAtlas);  // Otherwise we should reuse the atlas from last time.
 
-    fContextUniqueID = contextUniqueID;
-
     fAtlasKey = atlasKey;
     fAtlasOffset = newAtlasOffset;
 
@@ -245,8 +247,10 @@
         if (!fCachedAtlasInfo->fIsPurgedFromResourceCache &&
             fCachedAtlasInfo->fNumInvalidatedPathPixels >= fCachedAtlasInfo->fNumPathPixels / 2) {
             // Too many invalidated pixels: purge the atlas texture from the resource cache.
+            // The GrContext and CCPR path cache both share the same unique ID.
+            uint32_t contextUniqueID = fPathCacheUniqueID;
             SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(
-                    GrUniqueKeyInvalidatedMessage(fAtlasKey, fContextUniqueID));
+                    GrUniqueKeyInvalidatedMessage(fAtlasKey, contextUniqueID));
             fCachedAtlasInfo->fIsPurgedFromResourceCache = true;
         }
     }
@@ -256,8 +260,6 @@
 }
 
 void GrCCPathCacheEntry::onChange() {
-    // Our corresponding path was modified or deleted. Evict ourselves.
-    if (fCacheWeakPtr) {
-        fCacheWeakPtr->evict(this);
-    }
+    // Post a thread-safe eviction message.
+    SkMessageBus<sk_sp<GrCCPathCacheEntry>>::Post(sk_ref_sp(this));
 }
diff --git a/src/gpu/ccpr/GrCCPathCache.h b/src/gpu/ccpr/GrCCPathCache.h
index 7a552bb..e8ce928 100644
--- a/src/gpu/ccpr/GrCCPathCache.h
+++ b/src/gpu/ccpr/GrCCPathCache.h
@@ -24,13 +24,8 @@
  */
 class GrCCPathCache {
 public:
-#ifdef SK_DEBUG
-    ~GrCCPathCache() {
-        // Ensure the hash table and LRU list are still coherent.
-        fHashTable.reset();
-        SkASSERT(fLRU.isEmpty());
-    }
-#endif
+    GrCCPathCache(uint32_t contextUniqueID) : fInvalidatedEntriesInbox(contextUniqueID) {}
+    SkDEBUGCODE(~GrCCPathCache();)
 
     // Stores the components of a transformation that affect a path mask (i.e. everything but
     // integer translation). During construction, any integer portions of the matrix's translate are
@@ -55,7 +50,9 @@
     sk_sp<GrCCPathCacheEntry> find(const GrShape&, const MaskTransform&,
                                    CreateIfAbsent = CreateIfAbsent::kNo);
 
-    void evict(const GrCCPathCacheEntry*);
+    void evict(GrCCPathCacheEntry*);
+
+    void purgeAsNeeded();
 
 private:
     // Wrapper around a raw GrShape key that has a specialized operator==. Used by the hash table.
@@ -64,32 +61,38 @@
     };
     friend bool operator==(const HashKey&, const HashKey&);
 
-    // This is a special ref ptr for GrCCPathCacheEntry, used by the hash table. It can only be
-    // moved, which guarantees the hash table holds exactly one reference for each entry. When a
-    // HashNode goes out of scope, it therefore means the entry has been evicted from the cache.
+    // This is a special ref ptr for GrCCPathCacheEntry, used by the hash table. It provides static
+    // methods for SkTHash, and can only be moved. This guarantees the hash table holds exactly one
+    // reference for each entry.
     class HashNode : SkNoncopyable {
     public:
-        static HashKey GetKey(const HashNode& node) { return GetKey(node.fEntry); }
+        static HashKey GetKey(const HashNode& node) { return GetKey(node.entry()); }
         static HashKey GetKey(const GrCCPathCacheEntry*);
         static uint32_t Hash(HashKey);
 
         HashNode() = default;
-        HashNode(GrCCPathCache*, const MaskTransform&, const GrShape&);
-        HashNode(HashNode&& node) { fEntry = skstd::exchange(node.fEntry, nullptr); }
-        ~HashNode();  // Called when fEntry (if not null) has been evicted from the cache.
+        HashNode(uint32_t pathCacheUniqueID, const MaskTransform&, const GrShape&);
+        HashNode(HashNode&& node) : fEntry(std::move(node.fEntry)) {
+            SkASSERT(!node.fEntry);
+        }
 
-        HashNode& operator=(HashNode&&);
+        HashNode& operator=(HashNode&& node) {
+            fEntry = std::move(node.fEntry);
+            SkASSERT(!node.fEntry);
+            return *this;
+        }
 
-        GrCCPathCacheEntry* entry() const { return fEntry; }
+        GrCCPathCacheEntry* entry() const { return fEntry.get(); }
 
     private:
-        GrCCPathCacheEntry* fEntry = nullptr;
+        sk_sp<GrCCPathCacheEntry> fEntry;
         // The GrShape's unstyled key is stored as a variable-length footer to the 'fEntry'
         // allocation. GetKey provides access to it.
     };
 
     SkTHashTable<HashNode, HashKey> fHashTable;
     SkTInternalLList<GrCCPathCacheEntry> fLRU;
+    SkMessageBus<sk_sp<GrCCPathCacheEntry>>::Inbox fInvalidatedEntriesInbox;
 };
 
 /**
@@ -102,6 +105,8 @@
 
     ~GrCCPathCacheEntry() override;
 
+    uint32_t pathCacheUniqueID() const { return fPathCacheUniqueID; }
+
     // The number of times this specific entry (path + matrix combination) has been pulled from
     // the path cache. As long as the caller does exactly one lookup per draw, this translates to
     // the number of times the path has been drawn with a compatible matrix.
@@ -121,15 +126,14 @@
     // Called once our path has been rendered into the mainline CCPR (fp16, coverage count) atlas.
     // The caller will stash this atlas texture away after drawing, and during the next flush,
     // recover it and attempt to copy any paths that got reused into permanent 8-bit atlases.
-    void initAsStashedAtlas(const GrUniqueKey& atlasKey, uint32_t contextUniqueID,
-                            const SkIVector& atlasOffset, const SkRect& devBounds,
-                            const SkRect& devBounds45, const SkIRect& devIBounds,
-                            const SkIVector& maskShift);
+    void initAsStashedAtlas(const GrUniqueKey& atlasKey, const SkIVector& atlasOffset,
+                            const SkRect& devBounds, const SkRect& devBounds45,
+                            const SkIRect& devIBounds, const SkIVector& maskShift);
 
     // Called once our path mask has been copied into a permanent, 8-bit atlas. This method points
     // the entry at the new atlas and updates the CachedAtlasInfo data.
-    void updateToCachedAtlas(const GrUniqueKey& atlasKey, uint32_t contextUniqueID,
-                             const SkIVector& newAtlasOffset, sk_sp<GrCCAtlas::CachedAtlasInfo>);
+    void updateToCachedAtlas(const GrUniqueKey& atlasKey, const SkIVector& newAtlasOffset,
+                             sk_sp<GrCCAtlas::CachedAtlasInfo>);
 
     const GrUniqueKey& atlasKey() const { return fAtlasKey; }
 
@@ -153,18 +157,19 @@
 private:
     using MaskTransform = GrCCPathCache::MaskTransform;
 
-    GrCCPathCacheEntry(GrCCPathCache* cache, const MaskTransform& m)
-            : fCacheWeakPtr(cache), fMaskTransform(m) {}
+    GrCCPathCacheEntry(uint32_t pathCacheUniqueID, const MaskTransform& maskTransform)
+            : fPathCacheUniqueID(pathCacheUniqueID), fMaskTransform(maskTransform) {
+        SkASSERT(SK_InvalidUniqueID != fPathCacheUniqueID);
+    }
 
     // Resets this entry back to not having an atlas, and purges its previous atlas texture from the
     // resource cache if needed.
     void invalidateAtlas();
 
-    // Called when our corresponding path is modified or deleted.
+    // Called when our corresponding path is modified or deleted. Not threadsafe.
     void onChange() override;
 
-    uint32_t fContextUniqueID;
-    GrCCPathCache* fCacheWeakPtr;  // Gets manually reset to null by the path cache upon eviction.
+    const uint32_t fPathCacheUniqueID;
     MaskTransform fMaskTransform;
     int fHitCount = 1;
 
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
index f77bd75..50eb1eb 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
@@ -30,14 +30,16 @@
 }
 
 sk_sp<GrCoverageCountingPathRenderer> GrCoverageCountingPathRenderer::CreateIfSupported(
-        const GrCaps& caps, AllowCaching allowCaching) {
-    return sk_sp<GrCoverageCountingPathRenderer>(
-            IsSupported(caps) ? new GrCoverageCountingPathRenderer(allowCaching) : nullptr);
+        const GrCaps& caps, AllowCaching allowCaching, uint32_t contextUniqueID) {
+    return sk_sp<GrCoverageCountingPathRenderer>((IsSupported(caps))
+            ? new GrCoverageCountingPathRenderer(allowCaching, contextUniqueID)
+            : nullptr);
 }
 
-GrCoverageCountingPathRenderer::GrCoverageCountingPathRenderer(AllowCaching allowCaching) {
+GrCoverageCountingPathRenderer::GrCoverageCountingPathRenderer(AllowCaching allowCaching,
+                                                               uint32_t contextUniqueID) {
     if (AllowCaching::kYes == allowCaching) {
-        fPathCache = skstd::make_unique<GrCCPathCache>();
+        fPathCache = skstd::make_unique<GrCCPathCache>(contextUniqueID);
     }
 }
 
@@ -306,6 +308,10 @@
         fFlushingPaths.reset();
     }
 
+    if (fPathCache) {
+        fPathCache->purgeAsNeeded();
+    }
+
     SkDEBUGCODE(fFlushing = false);
 }
 
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
index 19e42a9..1457e9b 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
@@ -34,7 +34,8 @@
         kYes = true
     };
 
-    static sk_sp<GrCoverageCountingPathRenderer> CreateIfSupported(const GrCaps&, AllowCaching);
+    static sk_sp<GrCoverageCountingPathRenderer> CreateIfSupported(const GrCaps&, AllowCaching,
+                                                                   uint32_t contextUniqueID);
 
     using PendingPathsMap = std::map<uint32_t, sk_sp<GrCCPerOpListPaths>>;
 
@@ -82,7 +83,7 @@
                                    float* inflationRadius = nullptr);
 
 private:
-    GrCoverageCountingPathRenderer(AllowCaching);
+    GrCoverageCountingPathRenderer(AllowCaching, uint32_t contextUniqueID);
 
     // GrPathRenderer overrides.
     StencilSupport onGetStencilSupport(const GrShape&) const override {