ccpr: Age out path cache entries

Adds a hook that gets called from GrContext::performDeferredCleanup.

Bug: skia:8452
Change-Id: I4e5f4d263528b21247fbc032a1b4881a23cbb2ff
Reviewed-on: https://skia-review.googlesource.com/c/167181
Reviewed-by: Brian Osman <brianosman@google.com>
Commit-Queue: Chris Dalton <csmartdalton@google.com>
diff --git a/src/gpu/GrContext.cpp b/src/gpu/GrContext.cpp
index 36ce9da..c3262c9 100644
--- a/src/gpu/GrContext.cpp
+++ b/src/gpu/GrContext.cpp
@@ -38,6 +38,7 @@
 #include "SkUnPreMultiplyPriv.h"
 #include "effects/GrConfigConversionEffect.h"
 #include "effects/GrSkSLFP.h"
+#include "ccpr/GrCoverageCountingPathRenderer.h"
 #include "text/GrTextBlobCache.h"
 
 #define ASSERT_OWNED_PROXY(P) \
@@ -305,8 +306,15 @@
 
 void GrContext::performDeferredCleanup(std::chrono::milliseconds msNotUsed) {
     ASSERT_SINGLE_OWNER
+
+    auto purgeTime = GrStdSteadyClock::now() - msNotUsed;
+
     fResourceCache->purgeAsNeeded();
-    fResourceCache->purgeResourcesNotUsedSince(GrStdSteadyClock::now() - msNotUsed);
+    fResourceCache->purgeResourcesNotUsedSince(purgeTime);
+
+    if (auto ccpr = fDrawingManager->getCoverageCountingPathRenderer()) {
+        ccpr->purgeCacheEntriesOlderThan(purgeTime);
+    }
 
     fTextBlobCache->purgeStaleBlobs();
 }
diff --git a/src/gpu/ccpr/GrCCPathCache.cpp b/src/gpu/ccpr/GrCCPathCache.cpp
index 4816858..a5b9a10 100644
--- a/src/gpu/ccpr/GrCCPathCache.cpp
+++ b/src/gpu/ccpr/GrCCPathCache.cpp
@@ -209,17 +209,18 @@
     if (HashNode* node = fHashTable.find(*fScratchKey)) {
         entry = node->entry();
         SkASSERT(fLRU.isInList(entry));
-        if (fuzzy_equals(m, entry->fMaskTransform)) {
-            ++entry->fHitCount;  // The path was reused with a compatible matrix.
-        } else if (CreateIfAbsent::kYes == createIfAbsent && entry->unique()) {
-            // This entry is unique: we can recycle it instead of deleting and malloc-ing a new one.
-            entry->fMaskTransform = m;
-            entry->fHitCount = 1;
-            entry->invalidateAtlas();
-            SkASSERT(!entry->fCurrFlushAtlas);  // Should be null because 'entry' is unique.
-        } else {
-            this->evict(*fScratchKey);
-            entry = nullptr;
+        if (!fuzzy_equals(m, entry->fMaskTransform)) {
+            // The path was reused with an incompatible matrix.
+            if (CreateIfAbsent::kYes == createIfAbsent && entry->unique()) {
+                // This entry is unique: recycle it instead of deleting and malloc-ing a new one.
+                entry->fMaskTransform = m;
+                entry->fHitCount = 0;
+                entry->invalidateAtlas();
+                SkASSERT(!entry->fCurrFlushAtlas);  // Should be null because 'entry' is unique.
+            } else {
+                this->evict(*fScratchKey);
+                entry = nullptr;
+            }
         }
     }
 
@@ -248,10 +249,41 @@
     SkDEBUGCODE(HashNode* node = fHashTable.find(*fScratchKey));
     SkASSERT(node && node->entry() == entry);
     fLRU.addToHead(entry);
+
+    entry->fTimestamp = this->quickPerFlushTimestamp();
+    ++entry->fHitCount;
     return sk_ref_sp(entry);
 }
 
-void GrCCPathCache::purgeAsNeeded() {
+void GrCCPathCache::doPostFlushProcessing() {
+    this->purgeInvalidatedKeys();
+
+    // Mark the per-flush timestamp as needing to be updated with a newer clock reading.
+    fPerFlushTimestamp = GrStdSteadyClock::time_point::min();
+}
+
+void GrCCPathCache::purgeEntriesOlderThan(const GrStdSteadyClock::time_point& purgeTime) {
+    this->purgeInvalidatedKeys();
+
+#ifdef SK_DEBUG
+    auto lastTimestamp = (fLRU.isEmpty())
+            ? GrStdSteadyClock::time_point::max()
+            : fLRU.tail()->fTimestamp;
+#endif
+
+    // Drop every cache entry whose timestamp is older than purgeTime.
+    while (!fLRU.isEmpty() && fLRU.tail()->fTimestamp < purgeTime) {
+#ifdef SK_DEBUG
+        // Verify that fLRU is sorted by timestamp.
+        auto timestamp = fLRU.tail()->fTimestamp;
+        SkASSERT(timestamp >= lastTimestamp);
+        lastTimestamp = timestamp;
+#endif
+        this->evict(*fLRU.tail()->fCacheKey);
+    }
+}
+
+void GrCCPathCache::purgeInvalidatedKeys() {
     SkTArray<sk_sp<Key>> invalidatedKeys;
     fInvalidatedKeysInbox.poll(&invalidatedKeys);
     for (const sk_sp<Key>& key : invalidatedKeys) {
diff --git a/src/gpu/ccpr/GrCCPathCache.h b/src/gpu/ccpr/GrCCPathCache.h
index 16f31d0..3b34fe2 100644
--- a/src/gpu/ccpr/GrCCPathCache.h
+++ b/src/gpu/ccpr/GrCCPathCache.h
@@ -86,7 +86,8 @@
     sk_sp<GrCCPathCacheEntry> find(const GrShape&, const MaskTransform&,
                                    CreateIfAbsent = CreateIfAbsent::kNo);
 
-    void purgeAsNeeded();
+    void doPostFlushProcessing();
+    void purgeEntriesOlderThan(const GrStdSteadyClock::time_point& purgeTime);
 
 private:
     // This is a special ref ptr for GrCCPathCacheEntry, used by the hash table. It provides static
@@ -118,14 +119,28 @@
         sk_sp<GrCCPathCacheEntry> fEntry;
     };
 
+    GrStdSteadyClock::time_point quickPerFlushTimestamp() {
+        // time_point::min() means it's time to update fPerFlushTimestamp with a newer clock read.
+        if (GrStdSteadyClock::time_point::min() == fPerFlushTimestamp) {
+            fPerFlushTimestamp = GrStdSteadyClock::now();
+        }
+        return fPerFlushTimestamp;
+    }
+
     void evict(const GrCCPathCache::Key& key) {
         fHashTable.remove(key);  // HashNode::willExitHashTable() takes care of the rest.
     }
 
+    void purgeInvalidatedKeys();
+
     SkTHashTable<HashNode, const GrCCPathCache::Key&> fHashTable;
     SkTInternalLList<GrCCPathCacheEntry> fLRU;
     SkMessageBus<sk_sp<Key>>::Inbox fInvalidatedKeysInbox;
     sk_sp<Key> fScratchKey;  // Reused for creating a temporary key in the find() method.
+
+    // We only read the clock once per flush, and cache it in this variable. This prevents us from
+    // excessive clock reads for cache timestamps that might degrade performance.
+    GrStdSteadyClock::time_point fPerFlushTimestamp = GrStdSteadyClock::time_point::min();
 };
 
 /**
@@ -201,8 +216,9 @@
 
     sk_sp<GrCCPathCache::Key> fCacheKey;
 
+    GrStdSteadyClock::time_point fTimestamp;
+    int fHitCount = 0;
     MaskTransform fMaskTransform;
-    int fHitCount = 1;
 
     GrUniqueKey fAtlasKey;
     SkIVector fAtlasOffset;
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
index 848a384..daf2cf1 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
@@ -309,12 +309,19 @@
     }
 
     if (fPathCache) {
-        fPathCache->purgeAsNeeded();
+        fPathCache->doPostFlushProcessing();
     }
 
     SkDEBUGCODE(fFlushing = false);
 }
 
+void GrCoverageCountingPathRenderer::purgeCacheEntriesOlderThan(
+        const GrStdSteadyClock::time_point& purgeTime) {
+    if (fPathCache) {
+        fPathCache->purgeEntriesOlderThan(purgeTime);
+    }
+}
+
 void GrCoverageCountingPathRenderer::CropPath(const SkPath& path, const SkIRect& cropbox,
                                               SkPath* out) {
     SkPath cropboxPath;
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
index 19e42a9..554404d 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
@@ -65,6 +65,8 @@
                   SkTArray<sk_sp<GrRenderTargetContext>>* out) override;
     void postFlush(GrDeferredUploadToken, const uint32_t* opListIDs, int numOpListIDs) override;
 
+    void purgeCacheEntriesOlderThan(const GrStdSteadyClock::time_point& purgeTime);
+
     void testingOnly_drawPathDirectly(const DrawPathArgs&);
     const GrUniqueKey& testingOnly_getStashedAtlasKey() const;