ccpr: Use SkMessageBus for thread-safe eviction of cache entries

Bug: skia:
Change-Id: I87725b95761deb689333315ce681a4968d98190a
Reviewed-on: https://skia-review.googlesource.com/c/163511
Reviewed-by: Brian Salomon <bsalomon@google.com>
Commit-Queue: Chris Dalton <csmartdalton@google.com>
diff --git a/include/gpu/GrResourceKey.h b/include/gpu/GrResourceKey.h
index a125006..fb1b892 100644
--- a/include/gpu/GrResourceKey.h
+++ b/include/gpu/GrResourceKey.h
@@ -348,12 +348,16 @@
     GrUniqueKeyInvalidatedMessage& operator=(const GrUniqueKeyInvalidatedMessage&) = default;
 
     const GrUniqueKey& key() const { return fKey; }
-
-    bool shouldSend(uint32_t inboxID) const { return fContextID == inboxID; }
+    uint32_t contextID() const { return fContextID; }
 
 private:
     GrUniqueKey fKey;
     uint32_t fContextID;
 };
 
+static inline bool SkShouldPostMessageToBus(
+        const GrUniqueKeyInvalidatedMessage& msg, uint32_t msgBusUniqueID) {
+    return msg.contextID() == msgBusUniqueID;
+}
+
 #endif
diff --git a/include/private/SkMessageBus.h b/include/private/SkMessageBus.h
index 01145ec..649edf6 100644
--- a/include/private/SkMessageBus.h
+++ b/include/private/SkMessageBus.h
@@ -16,15 +16,18 @@
 #include "SkTypes.h"
 
 /**
- * Message must implement bool Message::shouldSend(uint32_t inboxID) const. Perhaps someday we
- * can use std::experimental::is_detected to avoid this requirement by sending to all inboxes when
- * the method is not detected on Message.
+ * The following method must have a specialization for type 'Message':
+ *
+ *     bool SkShouldPostMessageToBus(const Message&, uint32_t msgBusUniqueID)
+ *
+ * We may want to consider providing a default template implementation, to avoid this requirement by
+ * sending to all inboxes when the specialization for type 'Message' is not present.
  */
 template <typename Message>
 class SkMessageBus : SkNoncopyable {
 public:
     // Post a message to be received by Inboxes for this Message type. Checks
-    // Message::shouldSend() for each inbox. Threadsafe.
+    // SkShouldPostMessageToBus() for each inbox. Threadsafe.
     static void Post(const Message& m);
 
     class Inbox {
@@ -32,6 +35,8 @@
         Inbox(uint32_t uniqueID = SK_InvalidUniqueID);
         ~Inbox();
 
+        uint32_t uniqueID() const { return fUniqueID; }
+
         // Overwrite out with all the messages we've received since the last call.  Threadsafe.
         void poll(SkTArray<Message>* out);
 
@@ -111,7 +116,7 @@
     SkMessageBus<Message>* bus = SkMessageBus<Message>::Get();
     SkAutoMutexAcquire lock(bus->fInboxesMutex);
     for (int i = 0; i < bus->fInboxes.count(); i++) {
-        if (m.shouldSend(bus->fInboxes[i]->fUniqueID)) {
+        if (SkShouldPostMessageToBus(m, bus->fInboxes[i]->fUniqueID)) {
             bus->fInboxes[i]->receive(m);
         }
     }
diff --git a/src/core/SkResourceCache.cpp b/src/core/SkResourceCache.cpp
index e016d23..b992c85 100644
--- a/src/core/SkResourceCache.cpp
+++ b/src/core/SkResourceCache.cpp
@@ -20,6 +20,13 @@
 
 DECLARE_SKMESSAGEBUS_MESSAGE(SkResourceCache::PurgeSharedIDMessage)
 
+static inline bool SkShouldPostMessageToBus(
+        const SkResourceCache::PurgeSharedIDMessage&, uint32_t) {
+    // SkResourceCache is typically used as a singleton and we don't label Inboxes so all messages
+    // go to all inboxes.
+    return true;
+}
+
 // This can be defined by the caller's build system
 //#define SK_USE_DISCARDABLE_SCALEDIMAGECACHE
 
diff --git a/src/core/SkResourceCache.h b/src/core/SkResourceCache.h
index 4794669..c805852 100644
--- a/src/core/SkResourceCache.h
+++ b/src/core/SkResourceCache.h
@@ -112,10 +112,7 @@
     // Used with SkMessageBus
     struct PurgeSharedIDMessage {
         PurgeSharedIDMessage(uint64_t sharedID) : fSharedID(sharedID) {}
-        // SkResourceCache is typically used as a singleton and we don't label Inboxes so all
-        // messages go to all inboxes.
-        bool shouldSend(uint32_t inboxID) const { return true; }
-        uint64_t    fSharedID;
+        uint64_t fSharedID;
     };
 
     typedef const Rec* ID;
diff --git a/src/gpu/GrPathRendererChain.cpp b/src/gpu/GrPathRendererChain.cpp
index 60c935a..93741da 100644
--- a/src/gpu/GrPathRendererChain.cpp
+++ b/src/gpu/GrPathRendererChain.cpp
@@ -37,7 +37,8 @@
     if (options.fGpuPathRenderers & GpuPathRenderers::kCoverageCounting) {
         using AllowCaching = GrCoverageCountingPathRenderer::AllowCaching;
         if (auto ccpr = GrCoverageCountingPathRenderer::CreateIfSupported(
-                                caps, AllowCaching(options.fAllowPathMaskCaching))) {
+                                caps, AllowCaching(options.fAllowPathMaskCaching),
+                                context->uniqueID())) {
             fCoverageCountingPathRenderer = ccpr.get();
             context->contextPriv().addOnFlushCallbackObject(fCoverageCountingPathRenderer);
             fChain.push_back(std::move(ccpr));
diff --git a/src/gpu/GrResourceCache.h b/src/gpu/GrResourceCache.h
index 1d3598e..16accbb 100644
--- a/src/gpu/GrResourceCache.h
+++ b/src/gpu/GrResourceCache.h
@@ -27,12 +27,14 @@
 struct GrGpuResourceFreedMessage {
     GrGpuResource* fResource;
     uint32_t fOwningUniqueID;
-    bool shouldSend(uint32_t inboxID) const {
-        // The inbox's ID is the unique ID of the owning GrContext.
-        return inboxID == fOwningUniqueID;
-    }
 };
 
+static inline bool SkShouldPostMessageToBus(
+        const GrGpuResourceFreedMessage& msg, uint32_t msgBusUniqueID) {
+    // The inbox's ID is the unique ID of the owning GrContext.
+    return msgBusUniqueID == msg.fOwningUniqueID;
+}
+
 /**
  * Manages the lifetime of all GrGpuResource instances.
  *
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.cpp b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
index 7390b9f..1dae085 100644
--- a/src/gpu/ccpr/GrCCDrawPathsOp.cpp
+++ b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
@@ -345,8 +345,7 @@
                 SkIVector newOffset;
                 GrCCAtlas* atlas =
                         resources->copyPathToCachedAtlas(*cacheEntry, doEvenOddFill, &newOffset);
-                cacheEntry->updateToCachedAtlas(atlas->getOrAssignUniqueKey(onFlushRP),
-                                                onFlushRP->contextUniqueID(), newOffset,
+                cacheEntry->updateToCachedAtlas(atlas->getOrAssignUniqueKey(onFlushRP), newOffset,
                                                 atlas->refOrMakeCachedAtlasInfo());
                 this->recordInstance(atlas->textureProxy(), resources->nextPathInstanceIdx());
                 resources->appendDrawPathInstance().set(*cacheEntry, draw.fCachedMaskShift,
@@ -392,9 +391,8 @@
 
                 const GrUniqueKey& atlasKey =
                         resources->nextAtlasToStash()->getOrAssignUniqueKey(onFlushRP);
-                cacheEntry->initAsStashedAtlas(atlasKey, onFlushRP->contextUniqueID(),
-                                               devToAtlasOffset, devBounds, devBounds45, devIBounds,
-                                               draw.fCachedMaskShift);
+                cacheEntry->initAsStashedAtlas(atlasKey, devToAtlasOffset, devBounds, devBounds45,
+                                               devIBounds, draw.fCachedMaskShift);
                 // Remember this atlas in case we encounter the path again during the same flush.
                 cacheEntry->setCurrFlushAtlas(atlas);
             }
diff --git a/src/gpu/ccpr/GrCCPathCache.cpp b/src/gpu/ccpr/GrCCPathCache.cpp
index 367caff..db85641 100644
--- a/src/gpu/ccpr/GrCCPathCache.cpp
+++ b/src/gpu/ccpr/GrCCPathCache.cpp
@@ -10,6 +10,13 @@
 #include "GrShape.h"
 #include "SkNx.h"
 
+DECLARE_SKMESSAGEBUS_MESSAGE(sk_sp<GrCCPathCacheEntry>);
+
+static inline bool SkShouldPostMessageToBus(
+        const sk_sp<GrCCPathCacheEntry>& entry, uint32_t msgBusUniqueID) {
+    return entry->pathCacheUniqueID() == msgBusUniqueID;
+}
+
 // The maximum number of cache entries we allow in our own cache.
 static constexpr int kMaxCacheCount = 1 << 16;
 
@@ -94,14 +101,14 @@
 
 }
 
-inline GrCCPathCache::HashNode::HashNode(GrCCPathCache* cache, const MaskTransform& m,
+inline GrCCPathCache::HashNode::HashNode(uint32_t pathCacheUniqueID, const MaskTransform& m,
                                          const GrShape& shape) {
     SkASSERT(shape.hasUnstyledKey());
 
     WriteStyledKey writeKey(shape);
     void* memory = ::operator new (sizeof(GrCCPathCacheEntry) +
                                    writeKey.allocCountU32() * sizeof(uint32_t));
-    fEntry = new (memory) GrCCPathCacheEntry(cache, m);
+    fEntry.reset(new (memory) GrCCPathCacheEntry(pathCacheUniqueID, m));
 
     // The shape key is a variable-length footer to the entry allocation.
     uint32_t* keyData = (uint32_t*)((char*)memory + sizeof(GrCCPathCacheEntry));
@@ -121,22 +128,17 @@
     return GrResourceKeyHash(&key.fData[1], key.fData[0]);
 }
 
-GrCCPathCache::HashNode::~HashNode() {
-    if (!fEntry) {
-        return;
+#ifdef SK_DEBUG
+GrCCPathCache::~GrCCPathCache() {
+    // Ensure the hash table and LRU list are still coherent.
+    int lruCount = 0;
+    for (const GrCCPathCacheEntry* entry : fLRU) {
+        SkASSERT(fHashTable.find(HashNode::GetKey(entry))->entry() == entry);
+        ++lruCount;
     }
-
-    // Finalize our eviction from the path cache.
-    SkASSERT(fEntry->fCacheWeakPtr);
-    fEntry->fCacheWeakPtr->fLRU.remove(fEntry);
-    fEntry->fCacheWeakPtr = nullptr;
-    fEntry->unref();
+    SkASSERT(fHashTable.count() == lruCount);
 }
-
-GrCCPathCache::HashNode& GrCCPathCache::HashNode::operator=(HashNode&& node) {
-    this->~HashNode();
-    return *new (this) HashNode(std::move(node));
-}
+#endif
 
 sk_sp<GrCCPathCacheEntry> GrCCPathCache::find(const GrShape& shape, const MaskTransform& m,
                                               CreateIfAbsent createIfAbsent) {
@@ -151,7 +153,7 @@
     GrCCPathCacheEntry* entry = nullptr;
     if (HashNode* node = fHashTable.find({keyData.get()})) {
         entry = node->entry();
-        SkASSERT(this == entry->fCacheWeakPtr);
+        SkASSERT(fLRU.isInList(entry));
         if (fuzzy_equals(m, entry->fMaskTransform)) {
             ++entry->fHitCount;  // The path was reused with a compatible matrix.
         } else if (CreateIfAbsent::kYes == createIfAbsent && entry->unique()) {
@@ -173,7 +175,7 @@
         if (fHashTable.count() >= kMaxCacheCount) {
             this->evict(fLRU.tail());  // We've exceeded our limit.
         }
-        entry = fHashTable.set(HashNode(this, m, shape))->entry();
+        entry = fHashTable.set(HashNode(fInvalidatedEntriesInbox.uniqueID(), m, shape))->entry();
         shape.addGenIDChangeListener(sk_ref_sp(entry));
         SkASSERT(fHashTable.count() <= kMaxCacheCount);
     } else {
@@ -184,33 +186,36 @@
     return sk_ref_sp(entry);
 }
 
-void GrCCPathCache::evict(const GrCCPathCacheEntry* entry) {
-    SkASSERT(entry);
-    SkASSERT(this == entry->fCacheWeakPtr);
-    SkASSERT(fLRU.isInList(entry));
-    SkASSERT(fHashTable.find(HashNode::GetKey(entry))->entry() == entry);
+void GrCCPathCache::evict(GrCCPathCacheEntry* entry) {
+    bool isInCache = entry->fNext || (fLRU.tail() == entry);
+    SkASSERT(isInCache == fLRU.isInList(entry));
+    if (isInCache) {
+        fLRU.remove(entry);
+        fHashTable.remove(HashNode::GetKey(entry));  // Do this last, as it might delete the entry.
+    }
+}
 
-    fHashTable.remove(HashNode::GetKey(entry));  // ~HashNode() handles the rest.
+void GrCCPathCache::purgeAsNeeded() {
+    SkTArray<sk_sp<GrCCPathCacheEntry>> invalidatedEntries;
+    fInvalidatedEntriesInbox.poll(&invalidatedEntries);
+    for (const sk_sp<GrCCPathCacheEntry>& entry : invalidatedEntries) {
+        this->evict(entry.get());
+    }
 }
 
 
 GrCCPathCacheEntry::~GrCCPathCacheEntry() {
-    SkASSERT(!fCacheWeakPtr);  // HashNode should have cleared our cache pointer.
     SkASSERT(!fCurrFlushAtlas);  // Client is required to reset fCurrFlushAtlas back to null.
-
     this->invalidateAtlas();
 }
 
-void GrCCPathCacheEntry::initAsStashedAtlas(const GrUniqueKey& atlasKey, uint32_t contextUniqueID,
+void GrCCPathCacheEntry::initAsStashedAtlas(const GrUniqueKey& atlasKey,
                                             const SkIVector& atlasOffset, const SkRect& devBounds,
                                             const SkRect& devBounds45, const SkIRect& devIBounds,
                                             const SkIVector& maskShift) {
-    SkASSERT(contextUniqueID != SK_InvalidUniqueID);
     SkASSERT(atlasKey.isValid());
     SkASSERT(!fCurrFlushAtlas);  // Otherwise we should reuse the atlas from last time.
 
-    fContextUniqueID = contextUniqueID;
-
     fAtlasKey = atlasKey;
     fAtlasOffset = atlasOffset + maskShift;
     SkASSERT(!fCachedAtlasInfo);  // Otherwise they should have reused the cached atlas instead.
@@ -221,15 +226,12 @@
     fDevIBounds = devIBounds.makeOffset(-maskShift.fX, -maskShift.fY);
 }
 
-void GrCCPathCacheEntry::updateToCachedAtlas(const GrUniqueKey& atlasKey, uint32_t contextUniqueID,
+void GrCCPathCacheEntry::updateToCachedAtlas(const GrUniqueKey& atlasKey,
                                              const SkIVector& newAtlasOffset,
                                              sk_sp<GrCCAtlas::CachedAtlasInfo> info) {
-    SkASSERT(contextUniqueID != SK_InvalidUniqueID);
     SkASSERT(atlasKey.isValid());
     SkASSERT(!fCurrFlushAtlas);  // Otherwise we should reuse the atlas from last time.
 
-    fContextUniqueID = contextUniqueID;
-
     fAtlasKey = atlasKey;
     fAtlasOffset = newAtlasOffset;
 
@@ -245,8 +247,10 @@
         if (!fCachedAtlasInfo->fIsPurgedFromResourceCache &&
             fCachedAtlasInfo->fNumInvalidatedPathPixels >= fCachedAtlasInfo->fNumPathPixels / 2) {
             // Too many invalidated pixels: purge the atlas texture from the resource cache.
+            // The GrContext and CCPR path cache both share the same unique ID.
+            uint32_t contextUniqueID = fPathCacheUniqueID;
             SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(
-                    GrUniqueKeyInvalidatedMessage(fAtlasKey, fContextUniqueID));
+                    GrUniqueKeyInvalidatedMessage(fAtlasKey, contextUniqueID));
             fCachedAtlasInfo->fIsPurgedFromResourceCache = true;
         }
     }
@@ -256,8 +260,6 @@
 }
 
 void GrCCPathCacheEntry::onChange() {
-    // Our corresponding path was modified or deleted. Evict ourselves.
-    if (fCacheWeakPtr) {
-        fCacheWeakPtr->evict(this);
-    }
+    // Post a thread-safe eviction message.
+    SkMessageBus<sk_sp<GrCCPathCacheEntry>>::Post(sk_ref_sp(this));
 }
diff --git a/src/gpu/ccpr/GrCCPathCache.h b/src/gpu/ccpr/GrCCPathCache.h
index 7a552bb..e8ce928 100644
--- a/src/gpu/ccpr/GrCCPathCache.h
+++ b/src/gpu/ccpr/GrCCPathCache.h
@@ -24,13 +24,8 @@
  */
 class GrCCPathCache {
 public:
-#ifdef SK_DEBUG
-    ~GrCCPathCache() {
-        // Ensure the hash table and LRU list are still coherent.
-        fHashTable.reset();
-        SkASSERT(fLRU.isEmpty());
-    }
-#endif
+    GrCCPathCache(uint32_t contextUniqueID) : fInvalidatedEntriesInbox(contextUniqueID) {}
+    SkDEBUGCODE(~GrCCPathCache();)
 
     // Stores the components of a transformation that affect a path mask (i.e. everything but
     // integer translation). During construction, any integer portions of the matrix's translate are
@@ -55,7 +50,9 @@
     sk_sp<GrCCPathCacheEntry> find(const GrShape&, const MaskTransform&,
                                    CreateIfAbsent = CreateIfAbsent::kNo);
 
-    void evict(const GrCCPathCacheEntry*);
+    void evict(GrCCPathCacheEntry*);
+
+    void purgeAsNeeded();
 
 private:
     // Wrapper around a raw GrShape key that has a specialized operator==. Used by the hash table.
@@ -64,32 +61,38 @@
     };
     friend bool operator==(const HashKey&, const HashKey&);
 
-    // This is a special ref ptr for GrCCPathCacheEntry, used by the hash table. It can only be
-    // moved, which guarantees the hash table holds exactly one reference for each entry. When a
-    // HashNode goes out of scope, it therefore means the entry has been evicted from the cache.
+    // This is a special ref ptr for GrCCPathCacheEntry, used by the hash table. It provides static
+    // methods for SkTHash, and can only be moved. This guarantees the hash table holds exactly one
+    // reference for each entry.
     class HashNode : SkNoncopyable {
     public:
-        static HashKey GetKey(const HashNode& node) { return GetKey(node.fEntry); }
+        static HashKey GetKey(const HashNode& node) { return GetKey(node.entry()); }
         static HashKey GetKey(const GrCCPathCacheEntry*);
         static uint32_t Hash(HashKey);
 
         HashNode() = default;
-        HashNode(GrCCPathCache*, const MaskTransform&, const GrShape&);
-        HashNode(HashNode&& node) { fEntry = skstd::exchange(node.fEntry, nullptr); }
-        ~HashNode();  // Called when fEntry (if not null) has been evicted from the cache.
+        HashNode(uint32_t pathCacheUniqueID, const MaskTransform&, const GrShape&);
+        HashNode(HashNode&& node) : fEntry(std::move(node.fEntry)) {
+            SkASSERT(!node.fEntry);
+        }
 
-        HashNode& operator=(HashNode&&);
+        HashNode& operator=(HashNode&& node) {
+            fEntry = std::move(node.fEntry);
+            SkASSERT(!node.fEntry);
+            return *this;
+        }
 
-        GrCCPathCacheEntry* entry() const { return fEntry; }
+        GrCCPathCacheEntry* entry() const { return fEntry.get(); }
 
     private:
-        GrCCPathCacheEntry* fEntry = nullptr;
+        sk_sp<GrCCPathCacheEntry> fEntry;
         // The GrShape's unstyled key is stored as a variable-length footer to the 'fEntry'
         // allocation. GetKey provides access to it.
     };
 
     SkTHashTable<HashNode, HashKey> fHashTable;
     SkTInternalLList<GrCCPathCacheEntry> fLRU;
+    SkMessageBus<sk_sp<GrCCPathCacheEntry>>::Inbox fInvalidatedEntriesInbox;
 };
 
 /**
@@ -102,6 +105,8 @@
 
     ~GrCCPathCacheEntry() override;
 
+    uint32_t pathCacheUniqueID() const { return fPathCacheUniqueID; }
+
     // The number of times this specific entry (path + matrix combination) has been pulled from
     // the path cache. As long as the caller does exactly one lookup per draw, this translates to
     // the number of times the path has been drawn with a compatible matrix.
@@ -121,15 +126,14 @@
     // Called once our path has been rendered into the mainline CCPR (fp16, coverage count) atlas.
     // The caller will stash this atlas texture away after drawing, and during the next flush,
     // recover it and attempt to copy any paths that got reused into permanent 8-bit atlases.
-    void initAsStashedAtlas(const GrUniqueKey& atlasKey, uint32_t contextUniqueID,
-                            const SkIVector& atlasOffset, const SkRect& devBounds,
-                            const SkRect& devBounds45, const SkIRect& devIBounds,
-                            const SkIVector& maskShift);
+    void initAsStashedAtlas(const GrUniqueKey& atlasKey, const SkIVector& atlasOffset,
+                            const SkRect& devBounds, const SkRect& devBounds45,
+                            const SkIRect& devIBounds, const SkIVector& maskShift);
 
     // Called once our path mask has been copied into a permanent, 8-bit atlas. This method points
     // the entry at the new atlas and updates the CachedAtlasInfo data.
-    void updateToCachedAtlas(const GrUniqueKey& atlasKey, uint32_t contextUniqueID,
-                             const SkIVector& newAtlasOffset, sk_sp<GrCCAtlas::CachedAtlasInfo>);
+    void updateToCachedAtlas(const GrUniqueKey& atlasKey, const SkIVector& newAtlasOffset,
+                             sk_sp<GrCCAtlas::CachedAtlasInfo>);
 
     const GrUniqueKey& atlasKey() const { return fAtlasKey; }
 
@@ -153,18 +157,19 @@
 private:
     using MaskTransform = GrCCPathCache::MaskTransform;
 
-    GrCCPathCacheEntry(GrCCPathCache* cache, const MaskTransform& m)
-            : fCacheWeakPtr(cache), fMaskTransform(m) {}
+    GrCCPathCacheEntry(uint32_t pathCacheUniqueID, const MaskTransform& maskTransform)
+            : fPathCacheUniqueID(pathCacheUniqueID), fMaskTransform(maskTransform) {
+        SkASSERT(SK_InvalidUniqueID != fPathCacheUniqueID);
+    }
 
     // Resets this entry back to not having an atlas, and purges its previous atlas texture from the
     // resource cache if needed.
     void invalidateAtlas();
 
-    // Called when our corresponding path is modified or deleted.
+    // Called when our corresponding path is modified or deleted. Not threadsafe.
     void onChange() override;
 
-    uint32_t fContextUniqueID;
-    GrCCPathCache* fCacheWeakPtr;  // Gets manually reset to null by the path cache upon eviction.
+    const uint32_t fPathCacheUniqueID;
     MaskTransform fMaskTransform;
     int fHitCount = 1;
 
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
index f77bd75..50eb1eb 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
@@ -30,14 +30,16 @@
 }
 
 sk_sp<GrCoverageCountingPathRenderer> GrCoverageCountingPathRenderer::CreateIfSupported(
-        const GrCaps& caps, AllowCaching allowCaching) {
-    return sk_sp<GrCoverageCountingPathRenderer>(
-            IsSupported(caps) ? new GrCoverageCountingPathRenderer(allowCaching) : nullptr);
+        const GrCaps& caps, AllowCaching allowCaching, uint32_t contextUniqueID) {
+    return sk_sp<GrCoverageCountingPathRenderer>((IsSupported(caps))
+            ? new GrCoverageCountingPathRenderer(allowCaching, contextUniqueID)
+            : nullptr);
 }
 
-GrCoverageCountingPathRenderer::GrCoverageCountingPathRenderer(AllowCaching allowCaching) {
+GrCoverageCountingPathRenderer::GrCoverageCountingPathRenderer(AllowCaching allowCaching,
+                                                               uint32_t contextUniqueID) {
     if (AllowCaching::kYes == allowCaching) {
-        fPathCache = skstd::make_unique<GrCCPathCache>();
+        fPathCache = skstd::make_unique<GrCCPathCache>(contextUniqueID);
     }
 }
 
@@ -306,6 +308,10 @@
         fFlushingPaths.reset();
     }
 
+    if (fPathCache) {
+        fPathCache->purgeAsNeeded();
+    }
+
     SkDEBUGCODE(fFlushing = false);
 }
 
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
index 19e42a9..1457e9b 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
@@ -34,7 +34,8 @@
         kYes = true
     };
 
-    static sk_sp<GrCoverageCountingPathRenderer> CreateIfSupported(const GrCaps&, AllowCaching);
+    static sk_sp<GrCoverageCountingPathRenderer> CreateIfSupported(const GrCaps&, AllowCaching,
+                                                                   uint32_t contextUniqueID);
 
     using PendingPathsMap = std::map<uint32_t, sk_sp<GrCCPerOpListPaths>>;
 
@@ -82,7 +83,7 @@
                                    float* inflationRadius = nullptr);
 
 private:
-    GrCoverageCountingPathRenderer(AllowCaching);
+    GrCoverageCountingPathRenderer(AllowCaching, uint32_t contextUniqueID);
 
     // GrPathRenderer overrides.
     StencilSupport onGetStencilSupport(const GrShape&) const override {
diff --git a/src/gpu/text/GrTextBlobCache.cpp b/src/gpu/text/GrTextBlobCache.cpp
index a60903f..3d8120b 100644
--- a/src/gpu/text/GrTextBlobCache.cpp
+++ b/src/gpu/text/GrTextBlobCache.cpp
@@ -9,6 +9,11 @@
 
 DECLARE_SKMESSAGEBUS_MESSAGE(GrTextBlobCache::PurgeBlobMessage)
 
+static inline bool SkShouldPostMessageToBus(
+        const GrTextBlobCache::PurgeBlobMessage& msg, uint32_t msgBusUniqueID) {
+    return msg.fContextID == msgBusUniqueID;
+}
+
 GrTextBlobCache::~GrTextBlobCache() {
     this->freeAll();
 }
diff --git a/src/gpu/text/GrTextBlobCache.h b/src/gpu/text/GrTextBlobCache.h
index fe245c9..ffccf73 100644
--- a/src/gpu/text/GrTextBlobCache.h
+++ b/src/gpu/text/GrTextBlobCache.h
@@ -116,7 +116,6 @@
     struct PurgeBlobMessage {
         PurgeBlobMessage(uint32_t blobID, uint32_t contextUniqueID)
                 : fBlobID(blobID), fContextID(contextUniqueID) {}
-        bool shouldSend(uint32_t inboxID) const { return fContextID == inboxID; }
 
         uint32_t fBlobID;
         uint32_t fContextID;
diff --git a/tests/MessageBusTest.cpp b/tests/MessageBusTest.cpp
index 145176b..d184149 100644
--- a/tests/MessageBusTest.cpp
+++ b/tests/MessageBusTest.cpp
@@ -9,13 +9,18 @@
 #include "Test.h"
 
 namespace {
+
 struct TestMessage {
-    bool shouldSend(uint32_t inboxID) const { return true; }
     TestMessage(int i, float f) : x(i), y(f) {}
 
     int x;
     float y;
 };
+
+static inline bool SkShouldPostMessageToBus(const TestMessage&, uint32_t) {
+    return true;
+}
+
 }
 DECLARE_SKMESSAGEBUS_MESSAGE(TestMessage)
 
@@ -56,21 +61,24 @@
 }
 
 namespace {
+
 struct AddressedMessage {
     uint32_t fInboxID;
-
-    bool shouldSend(uint32_t inboxID) const {
-        SkASSERT(inboxID);
-        if (!fInboxID) {
-            return true;
-        }
-        return inboxID == fInboxID;
-    }
 };
+
+static inline bool SkShouldPostMessageToBus(const AddressedMessage& msg, uint32_t msgBusUniqueID) {
+    SkASSERT(msgBusUniqueID);
+    if (!msg.fInboxID) {
+        return true;
+    }
+    return msgBusUniqueID == msg.fInboxID;
 }
+
+}
+
 DECLARE_SKMESSAGEBUS_MESSAGE(AddressedMessage)
 
-DEF_TEST(MessageBus_shouldSend, r) {
+DEF_TEST(MessageBus_SkShouldPostMessageToBus, r) {
     SkMessageBus<AddressedMessage>::Inbox inbox1(1), inbox2(2);
 
     SkMessageBus<AddressedMessage>::Post({0});  // Should go to both