Revert "ccpr: Rework the path cache to support sporadic flushing"

This reverts commit d6fa45472cb82b7d8e58d0437f7723c672488b8b.

Reason for revert: Assertion failures

Original change's description:
> ccpr: Rework the path cache to support sporadic flushing
> 
> Removes the notion of a stashed atlas that we store from the previous
> flush. Now we just cache every atlas we ever render. Cached atlases
> can either be 16-bit or 8-bit.
> 
> The "reuse" and "animation" cases should both behave exactly the same
> as before: Where before we would copy from the stashed atlas to 8-bit
> atlases, we now copy from a cached 16-bit atlas and then invalidate
> it. Where before we would recycle the stashed atlas's backing texture
> object, we now recycle this same texture object from an invalidated
> 16-bit cached atlas.
> 
> The main difference is that cases like tiled rendering now work. If
> you draw your whole scene in one flush, you still get one big 16-bit
> cached atlas, just like the "stashed atlas" implementation. But if you
> draw your scene in tiles, you now get lots of little cached 16-bit
> atlases, which can be reused and eventually copied to 8-bit atlases.
> 
> Bug: skia:8462
> Change-Id: Ibae65febb948230aaaf1f1361eef9c8f06ebef18
> Reviewed-on: https://skia-review.googlesource.com/c/179991
> Commit-Queue: Chris Dalton <csmartdalton@google.com>
> Reviewed-by: Robert Phillips <robertphillips@google.com>

TBR=bsalomon@google.com,robertphillips@google.com,csmartdalton@google.com

Change-Id: Iad74a14fcb09da12f32b9b78f803b8472a5d60ae
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: skia:8462
Reviewed-on: https://skia-review.googlesource.com/c/181444
Reviewed-by: Chris Dalton <csmartdalton@google.com>
Commit-Queue: Chris Dalton <csmartdalton@google.com>
diff --git a/src/gpu/GrContext.cpp b/src/gpu/GrContext.cpp
index 14a65fa..0d42653 100644
--- a/src/gpu/GrContext.cpp
+++ b/src/gpu/GrContext.cpp
@@ -317,7 +317,7 @@
     fResourceCache->purgeResourcesNotUsedSince(purgeTime);
 
     if (auto ccpr = fDrawingManager->getCoverageCountingPathRenderer()) {
-        ccpr->purgeCacheEntriesOlderThan(fProxyProvider, purgeTime);
+        ccpr->purgeCacheEntriesOlderThan(purgeTime);
     }
 
     fTextBlobCache->purgeStaleBlobs();
diff --git a/src/gpu/GrPathRendererChain.cpp b/src/gpu/GrPathRendererChain.cpp
index 6e28c5d..a6ad513 100644
--- a/src/gpu/GrPathRendererChain.cpp
+++ b/src/gpu/GrPathRendererChain.cpp
@@ -40,8 +40,7 @@
     if (options.fGpuPathRenderers & GpuPathRenderers::kCoverageCounting) {
         using AllowCaching = GrCoverageCountingPathRenderer::AllowCaching;
         if (auto ccpr = GrCoverageCountingPathRenderer::CreateIfSupported(
-                                caps, AllowCaching(options.fAllowPathMaskCaching),
-                                context->uniqueID())) {
+                                caps, AllowCaching(options.fAllowPathMaskCaching))) {
             fCoverageCountingPathRenderer = ccpr.get();
             context->contextPriv().addOnFlushCallbackObject(fCoverageCountingPathRenderer);
             fChain.push_back(std::move(ccpr));
diff --git a/src/gpu/ccpr/GrCCAtlas.cpp b/src/gpu/ccpr/GrCCAtlas.cpp
index 4d147fd..a41eb38 100644
--- a/src/gpu/ccpr/GrCCAtlas.cpp
+++ b/src/gpu/ccpr/GrCCAtlas.cpp
@@ -16,7 +16,6 @@
 #include "GrTextureProxy.h"
 #include "SkMakeUnique.h"
 #include "SkMathPriv.h"
-#include "ccpr/GrCCPathCache.h"
 #include <atomic>
 
 class GrCCAtlas::Node {
@@ -48,9 +47,8 @@
     GrRectanizerSkyline fRectanizer;
 };
 
-GrCCAtlas::GrCCAtlas(CoverageType coverageType, const Specs& specs, const GrCaps& caps)
-        : fCoverageType(coverageType)
-        , fMaxTextureSize(SkTMax(SkTMax(specs.fMinHeight, specs.fMinWidth),
+GrCCAtlas::GrCCAtlas(GrPixelConfig pixelConfig, const Specs& specs, const GrCaps& caps)
+        : fMaxTextureSize(SkTMax(SkTMax(specs.fMinHeight, specs.fMinWidth),
                                  specs.fMaxPreferredTextureSize)) {
     // Caller should have cropped any paths to the destination render target instead of asking for
     // an atlas larger than maxRenderTargetSize.
@@ -75,12 +73,12 @@
 
     fTopNode = skstd::make_unique<Node>(nullptr, 0, 0, fWidth, fHeight);
 
-    GrColorType colorType = (CoverageType::kFP16_CoverageCount == fCoverageType)
-            ? GrColorType::kAlpha_F16 : GrColorType::kAlpha_8;
+    // TODO: don't have this rely on the GrPixelConfig
+    GrSRGBEncoded srgbEncoded = GrSRGBEncoded::kNo;
+    GrColorType colorType = GrPixelConfigToColorTypeAndEncoding(pixelConfig, &srgbEncoded);
+
     const GrBackendFormat format =
-            caps.getBackendFormatFromGrColorType(colorType, GrSRGBEncoded::kNo);
-    GrPixelConfig pixelConfig = (CoverageType::kFP16_CoverageCount == fCoverageType)
-            ? kAlpha_half_GrPixelConfig : kAlpha_8_GrPixelConfig;
+            caps.getBackendFormatFromGrColorType(colorType, srgbEncoded);
 
     fTextureProxy = GrProxyProvider::MakeFullyLazyProxy(
             [this, pixelConfig](GrResourceProvider* resourceProvider) {
@@ -161,23 +159,27 @@
     return nextID++;
 }
 
-sk_sp<GrCCCachedAtlas> GrCCAtlas::refOrMakeCachedAtlas(GrOnFlushResourceProvider* onFlushRP) {
-    if (!fCachedAtlas) {
-        static const GrUniqueKey::Domain kAtlasDomain = GrUniqueKey::GenerateDomain();
+const GrUniqueKey& GrCCAtlas::getOrAssignUniqueKey(GrOnFlushResourceProvider* onFlushRP) {
+    static const GrUniqueKey::Domain kAtlasDomain = GrUniqueKey::GenerateDomain();
 
-        GrUniqueKey atlasUniqueKey;
-        GrUniqueKey::Builder builder(&atlasUniqueKey, kAtlasDomain, 1, "CCPR Atlas");
+    if (!fUniqueKey.isValid()) {
+        GrUniqueKey::Builder builder(&fUniqueKey, kAtlasDomain, 1, "CCPR Atlas");
         builder[0] = next_atlas_unique_id();
         builder.finish();
 
-        onFlushRP->assignUniqueKeyToProxy(atlasUniqueKey, fTextureProxy.get());
-
-        fCachedAtlas = sk_make_sp<GrCCCachedAtlas>(fCoverageType, atlasUniqueKey, fTextureProxy);
+        if (fTextureProxy->isInstantiated()) {
+            onFlushRP->assignUniqueKeyToProxy(fUniqueKey, fTextureProxy.get());
+        }
     }
+    return fUniqueKey;
+}
 
-    SkASSERT(fCachedAtlas->coverageType() == fCoverageType);
-    SkASSERT(fCachedAtlas->getOnFlushProxy() == fTextureProxy.get());
-    return fCachedAtlas;
+sk_sp<GrCCAtlas::CachedAtlasInfo> GrCCAtlas::refOrMakeCachedAtlasInfo(uint32_t contextUniqueID) {
+    if (!fCachedAtlasInfo) {
+        fCachedAtlasInfo = sk_make_sp<CachedAtlasInfo>(contextUniqueID);
+    }
+    SkASSERT(fCachedAtlasInfo->fContextUniqueID == contextUniqueID);
+    return fCachedAtlasInfo;
 }
 
 sk_sp<GrRenderTargetContext> GrCCAtlas::makeRenderTargetContext(
@@ -203,6 +205,10 @@
         return nullptr;
     }
 
+    if (fUniqueKey.isValid()) {
+        onFlushRP->assignUniqueKeyToProxy(fUniqueKey, fTextureProxy.get());
+    }
+
     SkIRect clearRect = SkIRect::MakeSize(fDrawBounds);
     rtc->clear(&clearRect, SK_PMColor4fTRANSPARENT,
                GrRenderTargetContext::CanClearFullscreen::kYes);
@@ -214,7 +220,7 @@
     if (fAtlases.empty() || !fAtlases.back().addRect(devIBounds, devToAtlasOffset)) {
         // The retired atlas is out of room and can't grow any bigger.
         retiredAtlas = !fAtlases.empty() ? &fAtlases.back() : nullptr;
-        fAtlases.emplace_back(fCoverageType, fSpecs, *fCaps);
+        fAtlases.emplace_back(fPixelConfig, fSpecs, *fCaps);
         SkASSERT(devIBounds.width() <= fSpecs.fMinWidth);
         SkASSERT(devIBounds.height() <= fSpecs.fMinHeight);
         SkAssertResult(fAtlases.back().addRect(devIBounds, devToAtlasOffset));
diff --git a/src/gpu/ccpr/GrCCAtlas.h b/src/gpu/ccpr/GrCCAtlas.h
index 03eed8c..4a762bc 100644
--- a/src/gpu/ccpr/GrCCAtlas.h
+++ b/src/gpu/ccpr/GrCCAtlas.h
@@ -15,7 +15,6 @@
 #include "SkRefCnt.h"
 #include "SkSize.h"
 
-class GrCCCachedAtlas;
 class GrOnFlushResourceProvider;
 class GrRenderTargetContext;
 class GrTextureProxy;
@@ -46,12 +45,7 @@
         void accountForSpace(int width, int height);
     };
 
-    enum class CoverageType : bool {
-        kFP16_CoverageCount,
-        kA8_LiteralCoverage
-    };
-
-    GrCCAtlas(CoverageType, const Specs&, const GrCaps&);
+    GrCCAtlas(GrPixelConfig, const Specs&, const GrCaps&);
     ~GrCCAtlas();
 
     GrTextureProxy* textureProxy() const { return fTextureProxy.get(); }
@@ -70,7 +64,23 @@
     void setStrokeBatchID(int id);
     int getStrokeBatchID() const { return fStrokeBatchID; }
 
-    sk_sp<GrCCCachedAtlas> refOrMakeCachedAtlas(GrOnFlushResourceProvider*);
+    // Manages a unique resource cache key that gets assigned to the atlas texture. The unique key
+    // does not get assigned to the texture proxy until it is instantiated.
+    const GrUniqueKey& getOrAssignUniqueKey(GrOnFlushResourceProvider*);
+    const GrUniqueKey& uniqueKey() const { return fUniqueKey; }
+
+    // An object for simple bookkeeping on the atlas texture once it has a unique key. In practice,
+    // we use it to track the percentage of the original atlas pixels that could still ever
+    // potentially be reused (i.e., those which still represent an extant path). When the percentage
+    // of useful pixels drops below 50%, the entire texture is purged from the resource cache.
+    struct CachedAtlasInfo : public GrNonAtomicRef<CachedAtlasInfo> {
+        CachedAtlasInfo(uint32_t contextUniqueID) : fContextUniqueID(contextUniqueID) {}
+        const uint32_t fContextUniqueID;
+        int fNumPathPixels = 0;
+        int fNumInvalidatedPathPixels = 0;
+        bool fIsPurgedFromResourceCache = false;
+    };
+    sk_sp<CachedAtlasInfo> refOrMakeCachedAtlasInfo(uint32_t contextUniqueID);
 
     // Instantiates our texture proxy for the atlas and returns a pre-cleared GrRenderTargetContext
     // that the caller may use to render the content. After this call, it is no longer valid to call
@@ -87,7 +97,6 @@
 
     bool internalPlaceRect(int w, int h, SkIPoint16* loc);
 
-    const CoverageType fCoverageType;
     const int fMaxTextureSize;
     int fWidth, fHeight;
     std::unique_ptr<Node> fTopNode;
@@ -96,7 +105,11 @@
     int fFillBatchID;
     int fStrokeBatchID;
 
-    sk_sp<GrCCCachedAtlas> fCachedAtlas;
+    // Not every atlas will have a unique key -- a mainline CCPR one won't if we don't stash any
+    // paths, and only the first atlas in the stack is eligible to be stashed.
+    GrUniqueKey fUniqueKey;
+
+    sk_sp<CachedAtlasInfo> fCachedAtlasInfo;
     sk_sp<GrTextureProxy> fTextureProxy;
     sk_sp<GrTexture> fBackingTexture;
 };
@@ -107,10 +120,8 @@
  */
 class GrCCAtlasStack {
 public:
-    using CoverageType = GrCCAtlas::CoverageType;
-
-    GrCCAtlasStack(CoverageType coverageType, const GrCCAtlas::Specs& specs, const GrCaps* caps)
-            : fCoverageType(coverageType), fSpecs(specs), fCaps(caps) {}
+    GrCCAtlasStack(GrPixelConfig pixelConfig, const GrCCAtlas::Specs& specs, const GrCaps* caps)
+            : fPixelConfig(pixelConfig), fSpecs(specs), fCaps(caps) {}
 
     bool empty() const { return fAtlases.empty(); }
     const GrCCAtlas& front() const { SkASSERT(!this->empty()); return fAtlases.front(); }
@@ -136,7 +147,7 @@
     GrCCAtlas* addRect(const SkIRect& devIBounds, SkIVector* devToAtlasOffset);
 
 private:
-    const CoverageType fCoverageType;
+    const GrPixelConfig fPixelConfig;
     const GrCCAtlas::Specs fSpecs;
     const GrCaps* const fCaps;
     GrSTAllocator<4, GrCCAtlas> fAtlases;
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.cpp b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
index ba16f58..c1384fe 100644
--- a/src/gpu/ccpr/GrCCDrawPathsOp.cpp
+++ b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
@@ -6,7 +6,6 @@
  */
 
 #include "GrCCDrawPathsOp.h"
-
 #include "GrContext.h"
 #include "GrContextPriv.h"
 #include "GrMemoryPool.h"
@@ -158,6 +157,13 @@
 #endif
 }
 
+GrCCDrawPathsOp::SingleDraw::~SingleDraw() {
+    if (fCacheEntry) {
+        // All currFlushAtlas references must be reset back to null before the flush is finished.
+        fCacheEntry->setCurrFlushAtlas(nullptr);
+    }
+}
+
 GrDrawOp::RequiresDstTexture GrCCDrawPathsOp::finalize(const GrCaps& caps,
                                                        const GrAppliedClip* clip) {
     SkASSERT(1 == fNumDraws);  // There should only be one single path draw in this Op right now.
@@ -227,10 +233,10 @@
 
 void GrCCDrawPathsOp::accountForOwnPaths(GrCCPathCache* pathCache,
                                          GrOnFlushResourceProvider* onFlushRP,
+                                         const GrUniqueKey& stashedAtlasKey,
                                          GrCCPerFlushResourceSpecs* specs) {
     using CreateIfAbsent = GrCCPathCache::CreateIfAbsent;
     using MaskTransform = GrCCPathCache::MaskTransform;
-    using CoverageType = GrCCAtlas::CoverageType;
 
     for (SingleDraw& draw : fDraws) {
         SkPath path;
@@ -241,32 +247,41 @@
         if (pathCache) {
             MaskTransform m(draw.fMatrix, &draw.fCachedMaskShift);
             bool canStashPathMask = draw.fMaskVisibility >= Visibility::kMostlyComplete;
-            draw.fCacheEntry =
-                    pathCache->find(onFlushRP, draw.fShape, m, CreateIfAbsent(canStashPathMask));
+            draw.fCacheEntry = pathCache->find(draw.fShape, m, CreateIfAbsent(canStashPathMask));
         }
 
-        if (draw.fCacheEntry) {
-            if (const GrCCCachedAtlas* cachedAtlas = draw.fCacheEntry->cachedAtlas()) {
-                SkASSERT(cachedAtlas->getOnFlushProxy());
-                if (CoverageType::kA8_LiteralCoverage == cachedAtlas->coverageType()) {
+        if (auto cacheEntry = draw.fCacheEntry.get()) {
+            SkASSERT(!cacheEntry->currFlushAtlas());  // Shouldn't be set until setupResources().
+
+            if (cacheEntry->atlasKey().isValid()) {
+                // Does the path already exist in a cached atlas?
+                if (cacheEntry->hasCachedAtlas() &&
+                    (draw.fCachedAtlasProxy = onFlushRP->findOrCreateProxyByUniqueKey(
+                                                     cacheEntry->atlasKey(),
+                                                     GrCCAtlas::kTextureOrigin))) {
                     ++specs->fNumCachedPaths;
-                } else {
-                    // Suggest that this path be copied to a literal coverage atlas, to save memory.
-                    // (The client may decline this copy via DoCopiesToA8Coverage::kNo.)
+                    continue;
+                }
+
+                // Does the path exist in the atlas that we stashed away from last flush? If so we
+                // can copy it into a new 8-bit atlas and keep it in the resource cache.
+                if (stashedAtlasKey.isValid() && stashedAtlasKey == cacheEntry->atlasKey()) {
+                    SkASSERT(!cacheEntry->hasCachedAtlas());
                     int idx = (draw.fShape.style().strokeRec().isFillStyle())
                             ? GrCCPerFlushResourceSpecs::kFillIdx
                             : GrCCPerFlushResourceSpecs::kStrokeIdx;
                     ++specs->fNumCopiedPaths[idx];
                     specs->fCopyPathStats[idx].statPath(path);
-                    specs->fCopyAtlasSpecs.accountForSpace(
-                            draw.fCacheEntry->width(), draw.fCacheEntry->height());
-                    draw.fDoCopyToA8Coverage = true;
+                    specs->fCopyAtlasSpecs.accountForSpace(cacheEntry->width(),
+                                                           cacheEntry->height());
+                    continue;
                 }
-                continue;
+
+                // Whatever atlas the path used to reside in, it no longer exists.
+                cacheEntry->resetAtlasKeyAndInfo();
             }
 
-            if (Visibility::kMostlyComplete == draw.fMaskVisibility &&
-                    draw.fCacheEntry->hitCount() > 1) {
+            if (Visibility::kMostlyComplete == draw.fMaskVisibility && cacheEntry->hitCount() > 1) {
                 int shapeSize = SkTMax(draw.fShapeConservativeIBounds.height(),
                                        draw.fShapeConservativeIBounds.width());
                 if (shapeSize <= onFlushRP->caps()->maxRenderTargetSize()) {
@@ -288,9 +303,8 @@
     }
 }
 
-void GrCCDrawPathsOp::setupResources(
-        GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP,
-        GrCCPerFlushResources* resources, DoCopiesToA8Coverage doCopies) {
+void GrCCDrawPathsOp::setupResources(GrOnFlushResourceProvider* onFlushRP,
+                                     GrCCPerFlushResources* resources, DoCopiesToCache doCopies) {
     using DoEvenOddFill = GrCCPathProcessor::DoEvenOddFill;
     SkASSERT(fNumDraws > 0);
     SkASSERT(-1 == fBaseInstance);
@@ -307,29 +321,51 @@
 
         if (auto cacheEntry = draw.fCacheEntry.get()) {
             // Does the path already exist in a cached atlas texture?
-            if (cacheEntry->cachedAtlas()) {
-                SkASSERT(cacheEntry->cachedAtlas()->getOnFlushProxy());
-                if (DoCopiesToA8Coverage::kYes == doCopies && draw.fDoCopyToA8Coverage) {
-                    resources->upgradeEntryToLiteralCoverageAtlas(pathCache, onFlushRP, cacheEntry,
-                                                                  doEvenOddFill);
-                    SkASSERT(cacheEntry->cachedAtlas());
-                    SkASSERT(GrCCAtlas::CoverageType::kA8_LiteralCoverage
-                                     == cacheEntry->cachedAtlas()->coverageType());
-                    SkASSERT(cacheEntry->cachedAtlas()->getOnFlushProxy());
-                }
-                this->recordInstance(cacheEntry->cachedAtlas()->getOnFlushProxy(),
-                                     resources->nextPathInstanceIdx());
+            if (auto proxy = draw.fCachedAtlasProxy.get()) {
+                SkASSERT(!cacheEntry->currFlushAtlas());
+                this->recordInstance(proxy, resources->nextPathInstanceIdx());
                 // TODO4F: Preserve float colors
                 resources->appendDrawPathInstance().set(*cacheEntry, draw.fCachedMaskShift,
                                                         draw.fColor.toBytes_RGBA());
                 continue;
             }
+
+            // Have we already encountered this path during the flush? (i.e. was the same SkPath
+            // drawn more than once during the same flush, with a compatible matrix?)
+            if (auto atlas = cacheEntry->currFlushAtlas()) {
+                this->recordInstance(atlas->textureProxy(), resources->nextPathInstanceIdx());
+                // TODO4F: Preserve float colors
+                resources->appendDrawPathInstance().set(
+                        *cacheEntry, draw.fCachedMaskShift, draw.fColor.toBytes_RGBA(),
+                        cacheEntry->hasCachedAtlas() ? DoEvenOddFill::kNo : doEvenOddFill);
+                continue;
+            }
+
+            // If the cache entry still has a valid atlas key at this point, it means the path
+            // exists in the atlas that we stashed away from last flush. Copy it into a permanent
+            // 8-bit atlas in the resource cache.
+            if (DoCopiesToCache::kYes == doCopies && cacheEntry->atlasKey().isValid()) {
+                SkIVector newOffset;
+                GrCCAtlas* atlas =
+                        resources->copyPathToCachedAtlas(*cacheEntry, doEvenOddFill, &newOffset);
+                cacheEntry->updateToCachedAtlas(
+                        atlas->getOrAssignUniqueKey(onFlushRP), newOffset,
+                        atlas->refOrMakeCachedAtlasInfo(onFlushRP->contextUniqueID()));
+                this->recordInstance(atlas->textureProxy(), resources->nextPathInstanceIdx());
+                // TODO4F: Preserve float colors
+                resources->appendDrawPathInstance().set(*cacheEntry, draw.fCachedMaskShift,
+                                                        draw.fColor.toBytes_RGBA());
+                // Remember this atlas in case we encounter the path again during the same flush.
+                cacheEntry->setCurrFlushAtlas(atlas);
+                continue;
+            }
         }
 
-        // Render the raw path into a coverage count atlas. renderShapeInAtlas() gives us two tight
+        // Render the raw path into a coverage count atlas. renderPathInAtlas() gives us two tight
         // bounding boxes: One in device space, as well as a second one rotated an additional 45
         // degrees. The path vertex shader uses these two bounding boxes to generate an octagon that
         // circumscribes the path.
+        SkASSERT(!draw.fCachedAtlasProxy);
         SkRect devBounds, devBounds45;
         SkIRect devIBounds;
         SkIVector devToAtlasOffset;
@@ -344,7 +380,7 @@
             // If we have a spot in the path cache, try to make a note of where this mask is so we
             // can reuse it in the future.
             if (auto cacheEntry = draw.fCacheEntry.get()) {
-                SkASSERT(!cacheEntry->cachedAtlas());
+                SkASSERT(!cacheEntry->hasCachedAtlas());
 
                 if (Visibility::kComplete != draw.fMaskVisibility || cacheEntry->hitCount() <= 1) {
                     // Don't cache a path mask unless it's completely visible with a hit count > 1.
@@ -354,9 +390,19 @@
                     continue;
                 }
 
-                cacheEntry->setCoverageCountAtlas(onFlushRP, atlas, devToAtlasOffset, devBounds,
-                                                  devBounds45, devIBounds, draw.fCachedMaskShift);
+                if (resources->nextAtlasToStash() != atlas) {
+                    // This mask does not belong to the atlas that will be stashed for next flush.
+                    continue;
+                }
+
+                const GrUniqueKey& atlasKey =
+                        resources->nextAtlasToStash()->getOrAssignUniqueKey(onFlushRP);
+                cacheEntry->initAsStashedAtlas(atlasKey, devToAtlasOffset, devBounds, devBounds45,
+                                               devIBounds, draw.fCachedMaskShift);
+                // Remember this atlas in case we encounter the path again during the same flush.
+                cacheEntry->setCurrFlushAtlas(atlas);
             }
+            continue;
         }
     }
 
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.h b/src/gpu/ccpr/GrCCDrawPathsOp.h
index 76cde50..9eacbb0 100644
--- a/src/gpu/ccpr/GrCCDrawPathsOp.h
+++ b/src/gpu/ccpr/GrCCDrawPathsOp.h
@@ -11,13 +11,14 @@
 #include "GrShape.h"
 #include "SkTInternalLList.h"
 #include "ccpr/GrCCSTLList.h"
-#include "ccpr/GrCCPathCache.h"
 #include "ops/GrDrawOp.h"
 
 struct GrCCPerFlushResourceSpecs;
 struct GrCCPerOpListPaths;
 class GrCCAtlas;
 class GrOnFlushResourceProvider;
+class GrCCPathCache;
+class GrCCPathCacheEntry;
 class GrCCPerFlushResources;
 
 /**
@@ -44,24 +45,26 @@
     void addToOwningPerOpListPaths(sk_sp<GrCCPerOpListPaths> owningPerOpListPaths);
 
     // Makes decisions about how to draw each path (cached, copied, rendered, etc.), and
-    // increments/fills out the corresponding GrCCPerFlushResourceSpecs.
-    void accountForOwnPaths(GrCCPathCache*, GrOnFlushResourceProvider*, GrCCPerFlushResourceSpecs*);
+    // increments/fills out the corresponding GrCCPerFlushResourceSpecs. 'stashedAtlasKey', if
+    // valid, references the mainline coverage count atlas from the previous flush. Paths found in
+    // this atlas will be copied to more permanent atlases in the resource cache.
+    void accountForOwnPaths(GrCCPathCache*, GrOnFlushResourceProvider*,
+                            const GrUniqueKey& stashedAtlasKey, GrCCPerFlushResourceSpecs*);
 
-    // Allows the caller to decide whether to actually do the suggested copies from cached 16-bit
-    // coverage count atlases, and into 8-bit literal coverage atlases. Purely to save space.
-    enum class DoCopiesToA8Coverage : bool {
+    // Allows the caller to decide whether to copy paths out of the stashed atlas and into the
+    // resource cache, or to just re-render the paths from scratch. If there aren't many copies or
+    // the copies would only fill a small atlas, it's probably best to just re-render.
+    enum class DoCopiesToCache : bool {
         kNo = false,
         kYes = true
     };
 
     // Allocates the GPU resources indicated by accountForOwnPaths(), in preparation for drawing. If
-    // DoCopiesToA8Coverage is kNo, the paths slated for copy will instead be left in their 16-bit
-    // coverage count atlases.
+    // DoCopiesToCache is kNo, the paths slated for copy will instead be re-rendered from scratch.
     //
-    // NOTE: If using DoCopiesToA8Coverage::kNo, it is the caller's responsibility to have called
-    // cancelCopies() on the GrCCPerFlushResourceSpecs, prior to making this call.
-    void setupResources(GrCCPathCache*, GrOnFlushResourceProvider*, GrCCPerFlushResources*,
-                        DoCopiesToA8Coverage);
+    // NOTE: If using DoCopiesToCache::kNo, it is the caller's responsibility to call
+    //       convertCopiesToRenders() on the GrCCPerFlushResourceSpecs.
+    void setupResources(GrOnFlushResourceProvider*, GrCCPerFlushResources*, DoCopiesToCache);
 
     void onExecute(GrOpFlushState*, const SkRect& chainBounds) override;
 
@@ -91,6 +94,7 @@
         SingleDraw(const SkMatrix&, const GrShape&, float strokeDevWidth,
                    const SkIRect& shapeConservativeIBounds, const SkIRect& maskDevIBounds,
                    Visibility maskVisibility, const SkPMColor4f&);
+        ~SingleDraw();
 
         SkMatrix fMatrix;
         GrShape fShape;
@@ -100,9 +104,9 @@
         Visibility fMaskVisibility;
         SkPMColor4f fColor;
 
-        GrCCPathCache::OnFlushEntryRef fCacheEntry;
+        sk_sp<GrCCPathCacheEntry> fCacheEntry;
+        sk_sp<GrTextureProxy> fCachedAtlasProxy;
         SkIVector fCachedMaskShift;
-        bool fDoCopyToA8Coverage = false;
 
         SingleDraw* fNext = nullptr;
     };
diff --git a/src/gpu/ccpr/GrCCPathCache.cpp b/src/gpu/ccpr/GrCCPathCache.cpp
index 7a49e3e..a5b9a10 100644
--- a/src/gpu/ccpr/GrCCPathCache.cpp
+++ b/src/gpu/ccpr/GrCCPathCache.cpp
@@ -7,8 +7,7 @@
 
 #include "GrCCPathCache.h"
 
-#include "GrOnFlushResourceProvider.h"
-#include "GrProxyProvider.h"
+#include "GrShape.h"
 #include "SkNx.h"
 
 static constexpr int kMaxKeyDataCountU32 = 256;  // 1kB of uint32_t's.
@@ -85,33 +84,66 @@
     return reinterpret_cast<uint32_t*>(reinterpret_cast<char*>(this) + sizeof(Key));
 }
 
+inline bool GrCCPathCache::Key::operator==(const GrCCPathCache::Key& that) const {
+    return fDataSizeInBytes == that.fDataSizeInBytes &&
+           !memcmp(this->data(), that.data(), fDataSizeInBytes);
+}
+
 void GrCCPathCache::Key::onChange() {
     // Our key's corresponding path was invalidated. Post a thread-safe eviction message.
     SkMessageBus<sk_sp<Key>>::Post(sk_ref_sp(this));
 }
 
-GrCCPathCache::GrCCPathCache(uint32_t contextUniqueID)
-        : fContextUniqueID(contextUniqueID)
-        , fInvalidatedKeysInbox(next_path_cache_id())
+inline const GrCCPathCache::Key& GrCCPathCache::HashNode::GetKey(
+        const GrCCPathCache::HashNode& node) {
+    return *node.entry()->fCacheKey;
+}
+
+inline uint32_t GrCCPathCache::HashNode::Hash(const Key& key) {
+    return GrResourceKeyHash(key.data(), key.dataSizeInBytes());
+}
+
+inline GrCCPathCache::HashNode::HashNode(GrCCPathCache* pathCache, sk_sp<Key> key,
+                                         const MaskTransform& m, const GrShape& shape)
+        : fPathCache(pathCache)
+        , fEntry(new GrCCPathCacheEntry(key, m)) {
+    SkASSERT(shape.hasUnstyledKey());
+    shape.addGenIDChangeListener(std::move(key));
+}
+
+inline GrCCPathCache::HashNode::~HashNode() {
+    this->willExitHashTable();
+}
+
+inline GrCCPathCache::HashNode& GrCCPathCache::HashNode::operator=(HashNode&& node) {
+    this->willExitHashTable();
+    fPathCache = node.fPathCache;
+    fEntry = std::move(node.fEntry);
+    SkASSERT(!node.fEntry);
+    return *this;
+}
+
+inline void GrCCPathCache::HashNode::willExitHashTable() {
+    if (!fEntry) {
+        return;  // We were moved.
+    }
+
+    SkASSERT(fPathCache);
+    SkASSERT(fPathCache->fLRU.isInList(fEntry.get()));
+
+    fEntry->fCacheKey->markShouldUnregisterFromPath();  // Unregister the path listener.
+    fPathCache->fLRU.remove(fEntry.get());
+}
+
+
+GrCCPathCache::GrCCPathCache()
+        : fInvalidatedKeysInbox(next_path_cache_id())
         , fScratchKey(Key::Make(fInvalidatedKeysInbox.uniqueID(), kMaxKeyDataCountU32)) {
 }
 
 GrCCPathCache::~GrCCPathCache() {
-    while (!fLRU.isEmpty()) {
-        this->evict(*fLRU.tail()->fCacheKey, fLRU.tail());
-    }
-    SkASSERT(0 == fHashTable.count());  // Ensure the hash table and LRU list were coherent.
-
-    // Now take all the atlas textures we just invalidated and purge them from the GrResourceCache.
-    // We just purge via message bus since we don't have any access to the resource cache right now.
-    for (sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) {
-        SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(
-                GrUniqueKeyInvalidatedMessage(proxy->getUniqueKey(), fContextUniqueID));
-    }
-    for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) {
-        SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(
-                GrUniqueKeyInvalidatedMessage(key, fContextUniqueID));
-    }
+    fHashTable.reset();  // Must be cleared first; ~HashNode calls fLRU.remove() on us.
+    SkASSERT(fLRU.isEmpty());  // Ensure the hash table and LRU list were coherent.
 }
 
 namespace {
@@ -158,16 +190,15 @@
 
 }
 
-GrCCPathCache::OnFlushEntryRef GrCCPathCache::find(GrOnFlushResourceProvider* onFlushRP,
-                                                   const GrShape& shape, const MaskTransform& m,
-                                                   CreateIfAbsent createIfAbsent) {
+sk_sp<GrCCPathCacheEntry> GrCCPathCache::find(const GrShape& shape, const MaskTransform& m,
+                                              CreateIfAbsent createIfAbsent) {
     if (!shape.hasUnstyledKey()) {
-        return OnFlushEntryRef();
+        return nullptr;
     }
 
     WriteKeyHelper writeKeyHelper(shape);
     if (writeKeyHelper.allocCountU32() > kMaxKeyDataCountU32) {
-        return OnFlushEntryRef();
+        return nullptr;
     }
 
     SkASSERT(fScratchKey->unique());
@@ -178,15 +209,14 @@
     if (HashNode* node = fHashTable.find(*fScratchKey)) {
         entry = node->entry();
         SkASSERT(fLRU.isInList(entry));
-
         if (!fuzzy_equals(m, entry->fMaskTransform)) {
             // The path was reused with an incompatible matrix.
             if (CreateIfAbsent::kYes == createIfAbsent && entry->unique()) {
                 // This entry is unique: recycle it instead of deleting and malloc-ing a new one.
-                SkASSERT(0 == entry->fOnFlushRefCnt);  // Because we are unique.
                 entry->fMaskTransform = m;
                 entry->fHitCount = 0;
-                entry->releaseCachedAtlas(this);
+                entry->invalidateAtlas();
+                SkASSERT(!entry->fCurrFlushAtlas);  // Should be null because 'entry' is unique.
             } else {
                 this->evict(*fScratchKey);
                 entry = nullptr;
@@ -196,7 +226,7 @@
 
     if (!entry) {
         if (CreateIfAbsent::kNo == createIfAbsent) {
-            return OnFlushEntryRef();
+            return nullptr;
         }
         if (fHashTable.count() >= kMaxCacheCount) {
             SkDEBUGCODE(HashNode* node = fHashTable.find(*fLRU.tail()->fCacheKey));
@@ -220,54 +250,20 @@
     SkASSERT(node && node->entry() == entry);
     fLRU.addToHead(entry);
 
-    if (0 == entry->fOnFlushRefCnt) {
-        // Only update the time stamp and hit count if we haven't seen this entry yet during the
-        // current flush.
-        entry->fTimestamp = this->quickPerFlushTimestamp();
-        ++entry->fHitCount;
-
-        if (entry->fCachedAtlas) {
-            SkASSERT(SkToBool(entry->fCachedAtlas->peekOnFlushRefCnt())
-                             == SkToBool(entry->fCachedAtlas->getOnFlushProxy()));
-            if (!entry->fCachedAtlas->getOnFlushProxy()) {
-                entry->fCachedAtlas->setOnFlushProxy(
-                    onFlushRP->findOrCreateProxyByUniqueKey(entry->fCachedAtlas->textureKey(),
-                                                            GrCCAtlas::kTextureOrigin));
-            }
-            if (!entry->fCachedAtlas->getOnFlushProxy()) {
-                // Our atlas's backing texture got purged from the GrResourceCache. Release the
-                // cached atlas.
-                entry->releaseCachedAtlas(this);
-            }
-        }
-    }
-    SkASSERT(!entry->fCachedAtlas || entry->fCachedAtlas->getOnFlushProxy());
-    return OnFlushEntryRef::OnFlushRef(entry);
+    entry->fTimestamp = this->quickPerFlushTimestamp();
+    ++entry->fHitCount;
+    return sk_ref_sp(entry);
 }
 
-void GrCCPathCache::evict(const GrCCPathCache::Key& key, GrCCPathCacheEntry* entry) {
-    if (!entry) {
-        HashNode* node = fHashTable.find(key);
-        SkASSERT(node);
-        entry = node->entry();
-    }
-    SkASSERT(*entry->fCacheKey == key);
-    entry->fCacheKey->markShouldUnregisterFromPath();  // Unregister the path listener.
-    entry->releaseCachedAtlas(this);
-    fLRU.remove(entry);
-    fHashTable.remove(key);
-}
-
-void GrCCPathCache::doPreFlushProcessing() {
-    this->evictInvalidatedCacheKeys();
+void GrCCPathCache::doPostFlushProcessing() {
+    this->purgeInvalidatedKeys();
 
     // Mark the per-flush timestamp as needing to be updated with a newer clock reading.
     fPerFlushTimestamp = GrStdSteadyClock::time_point::min();
 }
 
-void GrCCPathCache::purgeEntriesOlderThan(GrProxyProvider* proxyProvider,
-                                          const GrStdSteadyClock::time_point& purgeTime) {
-    this->evictInvalidatedCacheKeys();
+void GrCCPathCache::purgeEntriesOlderThan(const GrStdSteadyClock::time_point& purgeTime) {
+    this->purgeInvalidatedKeys();
 
 #ifdef SK_DEBUG
     auto lastTimestamp = (fLRU.isEmpty())
@@ -275,7 +271,7 @@
             : fLRU.tail()->fTimestamp;
 #endif
 
-    // Evict every entry from our local path cache whose timestamp is older than purgeTime.
+    // Drop every cache entry whose timestamp is older than purgeTime.
     while (!fLRU.isEmpty() && fLRU.tail()->fTimestamp < purgeTime) {
 #ifdef SK_DEBUG
         // Verify that fLRU is sorted by timestamp.
@@ -285,37 +281,9 @@
 #endif
         this->evict(*fLRU.tail()->fCacheKey);
     }
-
-    // Now take all the atlas textures we just invalidated and purge them from the GrResourceCache.
-    this->purgeInvalidatedAtlasTextures(proxyProvider);
 }
 
-void GrCCPathCache::purgeInvalidatedAtlasTextures(GrOnFlushResourceProvider* onFlushRP) {
-    for (sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) {
-        onFlushRP->removeUniqueKeyFromProxy(proxy.get());
-    }
-    fInvalidatedProxies.reset();
-
-    for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) {
-        onFlushRP->processInvalidUniqueKey(key);
-    }
-    fInvalidatedProxyUniqueKeys.reset();
-}
-
-void GrCCPathCache::purgeInvalidatedAtlasTextures(GrProxyProvider* proxyProvider) {
-    for (sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) {
-        proxyProvider->removeUniqueKeyFromProxy(proxy.get());
-    }
-    fInvalidatedProxies.reset();
-
-    for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) {
-        proxyProvider->processInvalidUniqueKey(key, nullptr,
-                                               GrProxyProvider::InvalidateGPUResource::kYes);
-    }
-    fInvalidatedProxyUniqueKeys.reset();
-}
-
-void GrCCPathCache::evictInvalidatedCacheKeys() {
+void GrCCPathCache::purgeInvalidatedKeys() {
     SkTArray<sk_sp<Key>> invalidatedKeys;
     fInvalidatedKeysInbox.poll(&invalidatedKeys);
     for (const sk_sp<Key>& key : invalidatedKeys) {
@@ -326,41 +294,17 @@
     }
 }
 
-GrCCPathCache::OnFlushEntryRef
-GrCCPathCache::OnFlushEntryRef::OnFlushRef(GrCCPathCacheEntry* entry) {
-    entry->ref();
-    ++entry->fOnFlushRefCnt;
-    if (entry->fCachedAtlas) {
-        entry->fCachedAtlas->incrOnFlushRefCnt();
-    }
-    return OnFlushEntryRef(entry);
-}
 
-GrCCPathCache::OnFlushEntryRef::~OnFlushEntryRef() {
-    if (!fEntry) {
-        return;
-    }
-    --fEntry->fOnFlushRefCnt;
-    SkASSERT(fEntry->fOnFlushRefCnt >= 0);
-    if (fEntry->fCachedAtlas) {
-        fEntry->fCachedAtlas->decrOnFlushRefCnt();
-    }
-    fEntry->unref();
-}
+void GrCCPathCacheEntry::initAsStashedAtlas(const GrUniqueKey& atlasKey,
+                                            const SkIVector& atlasOffset, const SkRect& devBounds,
+                                            const SkRect& devBounds45, const SkIRect& devIBounds,
+                                            const SkIVector& maskShift) {
+    SkASSERT(atlasKey.isValid());
+    SkASSERT(!fCurrFlushAtlas);  // Otherwise we should reuse the atlas from last time.
 
-
-void GrCCPathCacheEntry::setCoverageCountAtlas(
-        GrOnFlushResourceProvider* onFlushRP, GrCCAtlas* atlas, const SkIVector& atlasOffset,
-        const SkRect& devBounds, const SkRect& devBounds45, const SkIRect& devIBounds,
-        const SkIVector& maskShift) {
-    SkASSERT(fOnFlushRefCnt > 0);
-    SkASSERT(!fCachedAtlas);  // Otherwise we would need to call releaseCachedAtlas().
-
-    fCachedAtlas = atlas->refOrMakeCachedAtlas(onFlushRP);
-    fCachedAtlas->incrOnFlushRefCnt(fOnFlushRefCnt);
-    fCachedAtlas->addPathPixels(devIBounds.height() * devIBounds.width());
-
+    fAtlasKey = atlasKey;
     fAtlasOffset = atlasOffset + maskShift;
+    SkASSERT(!fCachedAtlasInfo);  // Otherwise they should have reused the cached atlas instead.
 
     float dx = (float)maskShift.fX, dy = (float)maskShift.fY;
     fDevBounds = devBounds.makeOffset(-dx, -dy);
@@ -368,65 +312,34 @@
     fDevIBounds = devIBounds.makeOffset(-maskShift.fX, -maskShift.fY);
 }
 
-GrCCPathCacheEntry::ReleaseAtlasResult GrCCPathCacheEntry::upgradeToLiteralCoverageAtlas(
-        GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP, GrCCAtlas* atlas,
-        const SkIVector& newAtlasOffset) {
-    SkASSERT(fOnFlushRefCnt > 0);
-    SkASSERT(fCachedAtlas);
-    SkASSERT(GrCCAtlas::CoverageType::kFP16_CoverageCount == fCachedAtlas->coverageType());
+void GrCCPathCacheEntry::updateToCachedAtlas(const GrUniqueKey& atlasKey,
+                                             const SkIVector& newAtlasOffset,
+                                             sk_sp<GrCCAtlas::CachedAtlasInfo> info) {
+    SkASSERT(atlasKey.isValid());
+    SkASSERT(!fCurrFlushAtlas);  // Otherwise we should reuse the atlas from last time.
 
-    ReleaseAtlasResult releaseAtlasResult = this->releaseCachedAtlas(pathCache);
-
-    fCachedAtlas = atlas->refOrMakeCachedAtlas(onFlushRP);
-    fCachedAtlas->incrOnFlushRefCnt(fOnFlushRefCnt);
-    fCachedAtlas->addPathPixels(this->height() * this->width());
-
+    fAtlasKey = atlasKey;
     fAtlasOffset = newAtlasOffset;
-    return releaseAtlasResult;
+
+    SkASSERT(!fCachedAtlasInfo);  // Otherwise we need to invalidate our pixels in the old info.
+    fCachedAtlasInfo = std::move(info);
+    fCachedAtlasInfo->fNumPathPixels += this->height() * this->width();
 }
 
-GrCCPathCacheEntry::ReleaseAtlasResult GrCCPathCacheEntry::releaseCachedAtlas(
-        GrCCPathCache* pathCache) {
-    ReleaseAtlasResult result = ReleaseAtlasResult::kNone;
-    if (fCachedAtlas) {
-        result = fCachedAtlas->invalidatePathPixels(pathCache, this->height() * this->width());
-        if (fOnFlushRefCnt) {
-            SkASSERT(fOnFlushRefCnt > 0);
-            fCachedAtlas->decrOnFlushRefCnt(fOnFlushRefCnt);
+void GrCCPathCacheEntry::invalidateAtlas() {
+    if (fCachedAtlasInfo) {
+        // Mark our own pixels invalid in the cached atlas texture.
+        fCachedAtlasInfo->fNumInvalidatedPathPixels += this->height() * this->width();
+        if (!fCachedAtlasInfo->fIsPurgedFromResourceCache &&
+            fCachedAtlasInfo->fNumInvalidatedPathPixels >= fCachedAtlasInfo->fNumPathPixels / 2) {
+            // Too many invalidated pixels: purge the atlas texture from the resource cache.
+            // The GrContext and CCPR path cache both share the same unique ID.
+            SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(
+                    GrUniqueKeyInvalidatedMessage(fAtlasKey, fCachedAtlasInfo->fContextUniqueID));
+            fCachedAtlasInfo->fIsPurgedFromResourceCache = true;
         }
-        fCachedAtlas = nullptr;
     }
-    return result;
-}
 
-GrCCPathCacheEntry::ReleaseAtlasResult GrCCCachedAtlas::invalidatePathPixels(
-        GrCCPathCache* pathCache, int numPixels) {
-    // Mark the pixels invalid in the cached atlas texture.
-    fNumInvalidatedPathPixels += numPixels;
-    SkASSERT(fNumInvalidatedPathPixels <= fNumPathPixels);
-    if (!fIsInvalidatedFromResourceCache && fNumInvalidatedPathPixels >= fNumPathPixels / 2) {
-        // Too many invalidated pixels: purge the atlas texture from the resource cache.
-        if (fOnFlushProxy) {
-            // Don't clear (or std::move) fOnFlushProxy. Other path cache entries might still have a
-            // reference on this atlas and expect to use our proxy during the current flush.
-            // fOnFlushProxy will be cleared once fOnFlushRefCnt decrements to zero.
-            pathCache->fInvalidatedProxies.push_back(fOnFlushProxy);
-        } else {
-            pathCache->fInvalidatedProxyUniqueKeys.push_back(fTextureKey);
-        }
-        fIsInvalidatedFromResourceCache = true;
-        return ReleaseAtlasResult::kDidInvalidateFromCache;
-    }
-    return ReleaseAtlasResult::kNone;
-}
-
-void GrCCCachedAtlas::decrOnFlushRefCnt(int count) const {
-    SkASSERT(count > 0);
-    fOnFlushRefCnt -= count;
-    SkASSERT(fOnFlushRefCnt >= 0);
-    if (0 == fOnFlushRefCnt) {
-        // Don't hold the actual proxy past the end of the current flush.
-        SkASSERT(fOnFlushProxy);
-        fOnFlushProxy = nullptr;
-    }
+    fAtlasKey.reset();
+    fCachedAtlasInfo = nullptr;
 }
diff --git a/src/gpu/ccpr/GrCCPathCache.h b/src/gpu/ccpr/GrCCPathCache.h
index 9aef2e8..3b34fe2 100644
--- a/src/gpu/ccpr/GrCCPathCache.h
+++ b/src/gpu/ccpr/GrCCPathCache.h
@@ -8,7 +8,6 @@
 #ifndef GrCCPathCache_DEFINED
 #define GrCCPathCache_DEFINED
 
-#include "GrShape.h"
 #include "SkExchange.h"
 #include "SkTHash.h"
 #include "SkTInternalLList.h"
@@ -25,7 +24,7 @@
  */
 class GrCCPathCache {
 public:
-    GrCCPathCache(uint32_t contextUniqueID);
+    GrCCPathCache();
     ~GrCCPathCache();
 
     class Key : public SkPathRef::GenIDChangeListener {
@@ -44,10 +43,7 @@
         }
         uint32_t* data();
 
-        bool operator==(const Key& that) const {
-            return fDataSizeInBytes == that.fDataSizeInBytes &&
-                   !memcmp(this->data(), that.data(), fDataSizeInBytes);
-        }
+        bool operator==(const Key&) const;
 
         // Called when our corresponding path is modified or deleted. Not threadsafe.
         void onChange() override;
@@ -80,25 +76,6 @@
 #endif
     };
 
-    // Represents a ref on a GrCCPathCacheEntry that should only be used during the current flush.
-    class OnFlushEntryRef : SkNoncopyable {
-    public:
-        static OnFlushEntryRef OnFlushRef(GrCCPathCacheEntry*);
-        OnFlushEntryRef() = default;
-        OnFlushEntryRef(OnFlushEntryRef&& ref) : fEntry(skstd::exchange(ref.fEntry, nullptr)) {}
-        ~OnFlushEntryRef();
-
-        GrCCPathCacheEntry* get() const { return fEntry; }
-        GrCCPathCacheEntry* operator->() const { return fEntry; }
-        GrCCPathCacheEntry& operator*() const { return *fEntry; }
-        explicit operator bool() const { return fEntry; }
-        void operator=(OnFlushEntryRef&& ref) { fEntry = skstd::exchange(ref.fEntry, nullptr); }
-
-    private:
-        OnFlushEntryRef(GrCCPathCacheEntry* entry) : fEntry(entry) {}
-        GrCCPathCacheEntry* fEntry = nullptr;
-    };
-
     enum class CreateIfAbsent : bool {
         kNo = false,
         kYes = true
@@ -106,19 +83,11 @@
 
     // Finds an entry in the cache. Shapes are only given one entry, so any time they are accessed
     // with a different MaskTransform, the old entry gets evicted.
-    OnFlushEntryRef find(GrOnFlushResourceProvider*, const GrShape&, const MaskTransform&,
-                         CreateIfAbsent = CreateIfAbsent::kNo);
+    sk_sp<GrCCPathCacheEntry> find(const GrShape&, const MaskTransform&,
+                                   CreateIfAbsent = CreateIfAbsent::kNo);
 
-    void doPreFlushProcessing();
-
-    void purgeEntriesOlderThan(GrProxyProvider*, const GrStdSteadyClock::time_point& purgeTime);
-
-    // As we evict entries from our local path cache, we accumulate a list of invalidated atlas
-    // textures. This call purges the invalidated atlas textures from the mainline GrResourceCache.
-    // This call is available with two different "provider" objects, to accomodate whatever might
-    // be available at the callsite.
-    void purgeInvalidatedAtlasTextures(GrOnFlushResourceProvider*);
-    void purgeInvalidatedAtlasTextures(GrProxyProvider*);
+    void doPostFlushProcessing();
+    void purgeEntriesOlderThan(const GrStdSteadyClock::time_point& purgeTime);
 
 private:
     // This is a special ref ptr for GrCCPathCacheEntry, used by the hash table. It provides static
@@ -128,9 +97,7 @@
     class HashNode : SkNoncopyable {
     public:
         static const Key& GetKey(const HashNode&);
-        inline static uint32_t Hash(const Key& key) {
-            return GrResourceKeyHash(key.data(), key.dataSizeInBytes());
-        }
+        static uint32_t Hash(const Key&);
 
         HashNode() = default;
         HashNode(GrCCPathCache*, sk_sp<Key>, const MaskTransform&, const GrShape&);
@@ -141,11 +108,13 @@
 
         ~HashNode();
 
-        void operator=(HashNode&& node);
+        HashNode& operator=(HashNode&& node);
 
         GrCCPathCacheEntry* entry() const { return fEntry.get(); }
 
     private:
+        void willExitHashTable();
+
         GrCCPathCache* fPathCache = nullptr;
         sk_sp<GrCCPathCacheEntry> fEntry;
     };
@@ -158,15 +127,13 @@
         return fPerFlushTimestamp;
     }
 
-    void evict(const GrCCPathCache::Key&, GrCCPathCacheEntry* = nullptr);
+    void evict(const GrCCPathCache::Key& key) {
+        fHashTable.remove(key);  // HashNode::willExitHashTable() takes care of the rest.
+    }
 
-    // Evicts all the cache entries whose keys have been queued up in fInvalidatedKeysInbox via
-    // SkPath listeners.
-    void evictInvalidatedCacheKeys();
+    void purgeInvalidatedKeys();
 
-    const uint32_t fContextUniqueID;
-
-    SkTHashTable<HashNode, const Key&> fHashTable;
+    SkTHashTable<HashNode, const GrCCPathCache::Key&> fHashTable;
     SkTInternalLList<GrCCPathCacheEntry> fLRU;
     SkMessageBus<sk_sp<Key>>::Inbox fInvalidatedKeysInbox;
     sk_sp<Key> fScratchKey;  // Reused for creating a temporary key in the find() method.
@@ -174,18 +141,6 @@
     // We only read the clock once per flush, and cache it in this variable. This prevents us from
     // excessive clock reads for cache timestamps that might degrade performance.
     GrStdSteadyClock::time_point fPerFlushTimestamp = GrStdSteadyClock::time_point::min();
-
-    // As we evict entries from our local path cache, we accumulate lists of invalidated atlas
-    // textures in these two members. We hold these until we purge them from the GrResourceCache
-    // (e.g. via purgeInvalidatedAtlasTextures().)
-    SkSTArray<4, sk_sp<GrTextureProxy>> fInvalidatedProxies;
-    SkSTArray<4, GrUniqueKey> fInvalidatedProxyUniqueKeys;
-
-    friend class GrCCCachedAtlas;  // To append to fInvalidatedProxies, fInvalidatedProxyUniqueKeys.
-
-public:
-    const SkTHashTable<HashNode, const Key&>& testingOnly_getHashTable() const;
-    const SkTInternalLList<GrCCPathCacheEntry>& testingOnly_getLRU() const;
 };
 
 /**
@@ -197,12 +152,10 @@
     SK_DECLARE_INTERNAL_LLIST_INTERFACE(GrCCPathCacheEntry);
 
     ~GrCCPathCacheEntry() {
-        SkASSERT(0 == fOnFlushRefCnt);
-        SkASSERT(!fCachedAtlas);  // Should have called GrCCPathCache::evict().
+        SkASSERT(!fCurrFlushAtlas);  // Client is required to reset fCurrFlushAtlas back to null.
+        this->invalidateAtlas();
     }
 
-    const GrCCPathCache::Key& cacheKey() const { SkASSERT(fCacheKey); return *fCacheKey; }
-
     // The number of times this specific entry (path + matrix combination) has been pulled from
     // the path cache. As long as the caller does exactly one lookup per draw, this translates to
     // the number of times the path has been drawn with a compatible matrix.
@@ -211,28 +164,44 @@
     // GrCCPathCache::find(.., CreateIfAbsent::kYes), its hit count will be 1.
     int hitCount() const { return fHitCount; }
 
-    const GrCCCachedAtlas* cachedAtlas() const { return fCachedAtlas.get(); }
+    // Does this entry reference a permanent, 8-bit atlas that resides in the resource cache?
+    // (i.e. not a temporarily-stashed, fp16 coverage count atlas.)
+    bool hasCachedAtlas() const { return SkToBool(fCachedAtlasInfo); }
 
     const SkIRect& devIBounds() const { return fDevIBounds; }
     int width() const { return fDevIBounds.width(); }
     int height() const { return fDevIBounds.height(); }
 
-    enum class ReleaseAtlasResult : bool {
-        kNone,
-        kDidInvalidateFromCache
-    };
-
     // Called once our path has been rendered into the mainline CCPR (fp16, coverage count) atlas.
     // The caller will stash this atlas texture away after drawing, and during the next flush,
     // recover it and attempt to copy any paths that got reused into permanent 8-bit atlases.
-    void setCoverageCountAtlas(GrOnFlushResourceProvider*, GrCCAtlas*, const SkIVector& atlasOffset,
-                               const SkRect& devBounds, const SkRect& devBounds45,
-                               const SkIRect& devIBounds, const SkIVector& maskShift);
+    void initAsStashedAtlas(const GrUniqueKey& atlasKey, const SkIVector& atlasOffset,
+                            const SkRect& devBounds, const SkRect& devBounds45,
+                            const SkIRect& devIBounds, const SkIVector& maskShift);
 
     // Called once our path mask has been copied into a permanent, 8-bit atlas. This method points
-    // the entry at the new atlas and updates the GrCCCCachedAtlas data.
-    ReleaseAtlasResult upgradeToLiteralCoverageAtlas(GrCCPathCache*, GrOnFlushResourceProvider*,
-                                                     GrCCAtlas*, const SkIVector& newAtlasOffset);
+    // the entry at the new atlas and updates the CachedAtlasInfo data.
+    void updateToCachedAtlas(const GrUniqueKey& atlasKey, const SkIVector& newAtlasOffset,
+                             sk_sp<GrCCAtlas::CachedAtlasInfo>);
+
+    const GrUniqueKey& atlasKey() const { return fAtlasKey; }
+
+    void resetAtlasKeyAndInfo() {
+        fAtlasKey.reset();
+        fCachedAtlasInfo.reset();
+    }
+
+    // This is a utility for the caller to detect when a path gets drawn more than once during the
+    // same flush, with compatible matrices. Before adding a path to an atlas, the caller may check
+    // here to see if they have already placed the path previously during the same flush. The caller
+    // is required to reset all currFlushAtlas references back to null before any subsequent flush.
+    void setCurrFlushAtlas(const GrCCAtlas* currFlushAtlas) {
+        // This should not get called more than once in a single flush. Once fCurrFlushAtlas is
+        // non-null, it can only be set back to null (once the flush is over).
+        SkASSERT(!fCurrFlushAtlas || !currFlushAtlas);
+        fCurrFlushAtlas = currFlushAtlas;
+    }
+    const GrCCAtlas* currFlushAtlas() const { return fCurrFlushAtlas; }
 
 private:
     using MaskTransform = GrCCPathCache::MaskTransform;
@@ -243,115 +212,32 @@
 
     // Resets this entry back to not having an atlas, and purges its previous atlas texture from the
     // resource cache if needed.
-    ReleaseAtlasResult releaseCachedAtlas(GrCCPathCache*);
+    void invalidateAtlas();
 
     sk_sp<GrCCPathCache::Key> fCacheKey;
+
     GrStdSteadyClock::time_point fTimestamp;
     int fHitCount = 0;
+    MaskTransform fMaskTransform;
 
-    sk_sp<GrCCCachedAtlas> fCachedAtlas;
+    GrUniqueKey fAtlasKey;
     SkIVector fAtlasOffset;
 
-    MaskTransform fMaskTransform;
     SkRect fDevBounds;
     SkRect fDevBounds45;
     SkIRect fDevIBounds;
 
-    int fOnFlushRefCnt = 0;
+    // If null, then we are referencing a "stashed" atlas (see initAsStashedAtlas()).
+    sk_sp<GrCCAtlas::CachedAtlasInfo> fCachedAtlasInfo;
+
+    // This field is for when a path gets drawn more than once during the same flush.
+    const GrCCAtlas* fCurrFlushAtlas = nullptr;
 
     friend class GrCCPathCache;
     friend void GrCCPathProcessor::Instance::set(const GrCCPathCacheEntry&, const SkIVector&,
                                                  GrColor, DoEvenOddFill);  // To access data.
-
-public:
-    int testingOnly_peekOnFlushRefCnt() const;
 };
 
-/**
- * Encapsulates the data for an atlas whose texture is stored in the mainline GrResourceCache. Many
- * instances of GrCCPathCacheEntry will reference the same GrCCCachedAtlas.
- *
- * We use this object to track the percentage of the original atlas pixels that could still ever
- * potentially be reused (i.e., those which still represent an extant path). When the percentage
- * of useful pixels drops below 50%, we purge the entire texture from the resource cache.
- *
- * This object also holds a ref on the atlas's actual texture proxy during flush. When
- * fOnFlushRefCnt decrements back down to zero, we release fOnFlushProxy and reset it back to null.
- */
-class GrCCCachedAtlas : public GrNonAtomicRef<GrCCCachedAtlas> {
-public:
-    using ReleaseAtlasResult = GrCCPathCacheEntry::ReleaseAtlasResult;
-
-    GrCCCachedAtlas(GrCCAtlas::CoverageType type, const GrUniqueKey& textureKey,
-                    sk_sp<GrTextureProxy> onFlushProxy)
-            : fCoverageType(type)
-            , fTextureKey(textureKey)
-            , fOnFlushProxy(std::move(onFlushProxy)) {}
-
-    ~GrCCCachedAtlas() {
-        SkASSERT(!fOnFlushProxy);
-        SkASSERT(!fOnFlushRefCnt);
-    }
-
-    GrCCAtlas::CoverageType coverageType() const  { return fCoverageType; }
-    const GrUniqueKey& textureKey() const { return fTextureKey; }
-
-    GrTextureProxy* getOnFlushProxy() const { return fOnFlushProxy.get(); }
-
-    void setOnFlushProxy(sk_sp<GrTextureProxy> proxy) {
-        SkASSERT(!fOnFlushProxy);
-        fOnFlushProxy = std::move(proxy);
-    }
-
-    void addPathPixels(int numPixels) { fNumPathPixels += numPixels; }
-    ReleaseAtlasResult invalidatePathPixels(GrCCPathCache*, int numPixels);
-
-    int peekOnFlushRefCnt() const { return fOnFlushRefCnt; }
-    void incrOnFlushRefCnt(int count = 1) const {
-        SkASSERT(count > 0);
-        SkASSERT(fOnFlushProxy);
-        fOnFlushRefCnt += count;
-    }
-    void decrOnFlushRefCnt(int count = 1) const;
-
-private:
-    const GrCCAtlas::CoverageType fCoverageType;
-    const GrUniqueKey fTextureKey;
-
-    int fNumPathPixels = 0;
-    int fNumInvalidatedPathPixels = 0;
-    bool fIsInvalidatedFromResourceCache = false;
-
-    mutable sk_sp<GrTextureProxy> fOnFlushProxy;
-    mutable int fOnFlushRefCnt = 0;
-
-public:
-    int testingOnly_peekOnFlushRefCnt() const;
-};
-
-
-inline GrCCPathCache::HashNode::HashNode(GrCCPathCache* pathCache, sk_sp<Key> key,
-                                         const MaskTransform& m, const GrShape& shape)
-        : fPathCache(pathCache)
-        , fEntry(new GrCCPathCacheEntry(key, m)) {
-    SkASSERT(shape.hasUnstyledKey());
-    shape.addGenIDChangeListener(std::move(key));
-}
-
-inline const GrCCPathCache::Key& GrCCPathCache::HashNode::GetKey(
-        const GrCCPathCache::HashNode& node) {
-    return *node.entry()->fCacheKey;
-}
-
-inline GrCCPathCache::HashNode::~HashNode() {
-    SkASSERT(!fEntry || !fEntry->fCachedAtlas);  // Should have called GrCCPathCache::evict().
-}
-
-inline void GrCCPathCache::HashNode::operator=(HashNode&& node) {
-    SkASSERT(!fEntry || !fEntry->fCachedAtlas);  // Should have called GrCCPathCache::evict().
-    fEntry = skstd::exchange(node.fEntry, nullptr);
-}
-
 inline void GrCCPathProcessor::Instance::set(const GrCCPathCacheEntry& entry,
                                              const SkIVector& shift, GrColor color,
                                              DoEvenOddFill doEvenOddFill) {
diff --git a/src/gpu/ccpr/GrCCPerFlushResources.cpp b/src/gpu/ccpr/GrCCPerFlushResources.cpp
index e6cf8bb..41cd2e2 100644
--- a/src/gpu/ccpr/GrCCPerFlushResources.cpp
+++ b/src/gpu/ccpr/GrCCPerFlushResources.cpp
@@ -33,9 +33,8 @@
         return RequiresDstTexture::kNo;
     }
     CombineResult onCombineIfPossible(GrOp* other, const GrCaps&) override {
-        // We will only make multiple copy ops if they have different source proxies.
-        // TODO: make use of texture chaining.
-        return CombineResult::kCannotCombine;
+        SK_ABORT("Only expected one Op per CCPR atlas.");
+        return CombineResult::kMerged;
     }
     void onPrepare(GrOpFlushState*) override {}
 
@@ -51,7 +50,7 @@
     const sk_sp<const GrCCPerFlushResources> fResources;
 };
 
-// Copies paths from a cached coverage count atlas into an 8-bit literal-coverage atlas.
+// Copies paths from a stashed coverage count atlas into an 8-bit literal-coverage atlas.
 class CopyAtlasOp : public AtlasOp {
 public:
     DEFINE_OP_CLASS_ID
@@ -67,16 +66,18 @@
     }
 
     const char* name() const override { return "CopyAtlasOp (CCPR)"; }
-    void visitProxies(const VisitProxyFunc& fn, VisitorType) const override { fn(fSrcProxy.get()); }
+    void visitProxies(const VisitProxyFunc& fn, VisitorType) const override {
+        fn(fStashedAtlasProxy.get());
+    }
 
     void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
-        SkASSERT(fSrcProxy);
+        SkASSERT(fStashedAtlasProxy);
         GrPipeline::FixedDynamicState dynamicState;
-        auto srcProxy = fSrcProxy.get();
-        dynamicState.fPrimitiveProcessorTextures = &srcProxy;
+        auto atlasProxy = fStashedAtlasProxy.get();
+        dynamicState.fPrimitiveProcessorTextures = &atlasProxy;
 
         GrPipeline pipeline(flushState->proxy(), GrScissorTest::kDisabled, SkBlendMode::kSrc);
-        GrCCPathProcessor pathProc(srcProxy);
+        GrCCPathProcessor pathProc(atlasProxy);
         pathProc.drawPaths(flushState, pipeline, &dynamicState, *fResources, fBaseInstance,
                            fEndInstance, this->bounds());
     }
@@ -84,14 +85,15 @@
 private:
     friend class ::GrOpMemoryPool; // for ctor
 
-    CopyAtlasOp(sk_sp<const GrCCPerFlushResources> resources, sk_sp<GrTextureProxy> srcProxy,
+    CopyAtlasOp(sk_sp<const GrCCPerFlushResources> resources, sk_sp<GrTextureProxy> copyProxy,
                 int baseInstance, int endInstance, const SkISize& drawBounds)
             : AtlasOp(ClassID(), std::move(resources), drawBounds)
-            , fSrcProxy(srcProxy)
+            , fStashedAtlasProxy(copyProxy)
             , fBaseInstance(baseInstance)
             , fEndInstance(endInstance) {
     }
-    sk_sp<GrTextureProxy> fSrcProxy;
+
+    sk_sp<GrTextureProxy> fStashedAtlasProxy;
     const int fBaseInstance;
     const int fEndInstance;
 };
@@ -159,10 +161,9 @@
         , fStroker(specs.fNumRenderedPaths[kStrokeIdx],
                    specs.fRenderedPathStats[kStrokeIdx].fNumTotalSkPoints,
                    specs.fRenderedPathStats[kStrokeIdx].fNumTotalSkVerbs)
-        , fCopyAtlasStack(GrCCAtlas::CoverageType::kA8_LiteralCoverage, specs.fCopyAtlasSpecs,
-                          onFlushRP->caps())
-        , fRenderedAtlasStack(GrCCAtlas::CoverageType::kFP16_CoverageCount,
-                              specs.fRenderedAtlasSpecs, onFlushRP->caps())
+        , fCopyAtlasStack(kAlpha_8_GrPixelConfig, specs.fCopyAtlasSpecs, onFlushRP->caps())
+        , fRenderedAtlasStack(kAlpha_half_GrPixelConfig, specs.fRenderedAtlasSpecs,
+                              onFlushRP->caps())
         , fIndexBuffer(GrCCPathProcessor::FindIndexBuffer(onFlushRP))
         , fVertexBuffer(GrCCPathProcessor::FindVertexBuffer(onFlushRP))
         , fInstanceBuffer(onFlushRP->makeBuffer(kVertex_GrBufferType,
@@ -189,84 +190,21 @@
     SkDEBUGCODE(fEndPathInstance = inst_buffer_count(specs));
 }
 
-void GrCCPerFlushResources::upgradeEntryToLiteralCoverageAtlas(
-        GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP, GrCCPathCacheEntry* entry,
-        GrCCPathProcessor::DoEvenOddFill evenOdd) {
-    using ReleaseAtlasResult = GrCCPathCacheEntry::ReleaseAtlasResult;
+GrCCAtlas* GrCCPerFlushResources::copyPathToCachedAtlas(const GrCCPathCacheEntry& entry,
+                                                        GrCCPathProcessor::DoEvenOddFill evenOdd,
+                                                        SkIVector* newAtlasOffset) {
     SkASSERT(this->isMapped());
     SkASSERT(fNextCopyInstanceIdx < fEndCopyInstance);
+    SkASSERT(!entry.hasCachedAtlas());  // Unexpected, but not necessarily a problem.
 
-    const GrCCCachedAtlas* cachedAtlas = entry->cachedAtlas();
-    SkASSERT(cachedAtlas);
-    SkASSERT(cachedAtlas->getOnFlushProxy());
-
-    if (GrCCAtlas::CoverageType::kA8_LiteralCoverage == cachedAtlas->coverageType()) {
-        // This entry has already been upgraded to literal coverage. The path must have been drawn
-        // multiple times during the flush.
-        SkDEBUGCODE(--fEndCopyInstance);
-        return;
+    if (GrCCAtlas* retiredAtlas = fCopyAtlasStack.addRect(entry.devIBounds(), newAtlasOffset)) {
+        // We did not fit in the previous copy atlas and it was retired. We will render the copies
+        // up until fNextCopyInstanceIdx into the retired atlas during finalize().
+        retiredAtlas->setFillBatchID(fNextCopyInstanceIdx);
     }
 
-    SkIVector newAtlasOffset;
-    if (GrCCAtlas* retiredAtlas = fCopyAtlasStack.addRect(entry->devIBounds(), &newAtlasOffset)) {
-        // We did not fit in the previous copy atlas and it was retired. We will render the ranges
-        // up until fCopyPathRanges.count() into the retired atlas during finalize().
-        retiredAtlas->setFillBatchID(fCopyPathRanges.count());
-        fCurrCopyAtlasRangesIdx = fCopyPathRanges.count();
-    }
-
-    this->recordCopyPathInstance(*entry, newAtlasOffset, evenOdd,
-                                 sk_ref_sp(cachedAtlas->getOnFlushProxy()));
-
-    sk_sp<GrTexture> previousAtlasTexture =
-            sk_ref_sp(cachedAtlas->getOnFlushProxy()->peekTexture());
-    GrCCAtlas* newAtlas = &fCopyAtlasStack.current();
-    if (ReleaseAtlasResult::kDidInvalidateFromCache ==
-            entry->upgradeToLiteralCoverageAtlas(pathCache, onFlushRP, newAtlas, newAtlasOffset)) {
-        // This texture just got booted out of the cache. Keep it around, in case we might be able
-        // to recycle it for a new atlas. We can recycle it because copying happens before rendering
-        // new paths, and every path from the atlas that we're planning to use this flush will be
-        // copied to a new atlas. We'll never copy some and leave others.
-        fRecyclableAtlasTextures.push_back(std::move(previousAtlasTexture));
-    }
-}
-
-template<typename T, typename... Args>
-static void emplace_at_memcpy(SkTArray<T>* array, int idx, Args&&... args) {
-    if (int moveCount = array->count() - idx) {
-        array->push_back();
-        T* location = array->begin() + idx;
-        memcpy(location+1, location, moveCount * sizeof(T));
-        new (location) T(std::forward<Args>(args)...);
-    } else {
-        array->emplace_back(std::forward<Args>(args)...);
-    }
-}
-
-void GrCCPerFlushResources::recordCopyPathInstance(const GrCCPathCacheEntry& entry,
-                                                   const SkIVector& newAtlasOffset,
-                                                   GrCCPathProcessor::DoEvenOddFill evenOdd,
-                                                   sk_sp<GrTextureProxy> srcProxy) {
-    SkASSERT(fNextCopyInstanceIdx < fEndCopyInstance);
-
-    // Write the instance at the back of the array.
-    int currentInstanceIdx = fNextCopyInstanceIdx++;
-    fPathInstanceData[currentInstanceIdx].set(entry, newAtlasOffset, GrColor_WHITE, evenOdd);
-
-    // Percolate the instance forward until it's contiguous with other instances that share the same
-    // proxy.
-    for (int i = fCopyPathRanges.count() - 1; i >= fCurrCopyAtlasRangesIdx; --i) {
-        if (fCopyPathRanges[i].fSrcProxy == srcProxy) {
-            ++fCopyPathRanges[i].fCount;
-            return;
-        }
-        int rangeFirstInstanceIdx = currentInstanceIdx - fCopyPathRanges[i].fCount;
-        std::swap(fPathInstanceData[rangeFirstInstanceIdx], fPathInstanceData[currentInstanceIdx]);
-        currentInstanceIdx = rangeFirstInstanceIdx;
-    }
-
-    // An instance with this particular proxy did not yet exist in the array. Add a range for it.
-    emplace_at_memcpy(&fCopyPathRanges, fCurrCopyAtlasRangesIdx, std::move(srcProxy), 1);
+    fPathInstanceData[fNextCopyInstanceIdx++].set(entry, *newAtlasOffset, GrColor_WHITE, evenOdd);
+    return &fCopyAtlasStack.current();
 }
 
 static bool transform_path_pts(const SkMatrix& m, const SkPath& path,
@@ -325,7 +263,7 @@
     return true;
 }
 
-GrCCAtlas* GrCCPerFlushResources::renderShapeInAtlas(
+const GrCCAtlas* GrCCPerFlushResources::renderShapeInAtlas(
         const SkIRect& clipIBounds, const SkMatrix& m, const GrShape& shape, float strokeDevWidth,
         SkRect* devBounds, SkRect* devBounds45, SkIRect* devIBounds, SkIVector* devToAtlasOffset) {
     SkASSERT(this->isMapped());
@@ -423,17 +361,17 @@
 }
 
 bool GrCCPerFlushResources::finalize(GrOnFlushResourceProvider* onFlushRP,
+                                     sk_sp<GrTextureProxy> stashedAtlasProxy,
                                      SkTArray<sk_sp<GrRenderTargetContext>>* out) {
     SkASSERT(this->isMapped());
     SkASSERT(fNextPathInstanceIdx == fEndPathInstance);
-    SkASSERT(fNextCopyInstanceIdx == fEndCopyInstance);
+    // No assert for fEndCopyInstance because the caller may have detected and skipped duplicates.
 
     fInstanceBuffer->unmap();
     fPathInstanceData = nullptr;
 
     if (!fCopyAtlasStack.empty()) {
-        fCopyAtlasStack.current().setFillBatchID(fCopyPathRanges.count());
-        fCurrCopyAtlasRangesIdx = fCopyPathRanges.count();
+        fCopyAtlasStack.current().setFillBatchID(fNextCopyInstanceIdx);
     }
     if (!fRenderedAtlasStack.empty()) {
         fRenderedAtlasStack.current().setFillBatchID(fFiller.closeCurrentBatch());
@@ -449,44 +387,38 @@
         return false;
     }
 
-    // Draw the copies from 16-bit literal coverage atlas(es) into 8-bit cached atlas(es).
-    int copyRangeIdx = 0;
+    // Draw the copies from the stashed atlas into 8-bit cached atlas(es).
     int baseCopyInstance = 0;
     for (GrCCAtlasStack::Iter atlas(fCopyAtlasStack); atlas.next();) {
-        int endCopyRange = atlas->getFillBatchID();
-        SkASSERT(endCopyRange > copyRangeIdx);
-
-        sk_sp<GrRenderTargetContext> rtc = atlas->makeRenderTargetContext(onFlushRP);
-        for (; copyRangeIdx < endCopyRange; ++copyRangeIdx) {
-            const CopyPathRange& copyRange = fCopyPathRanges[copyRangeIdx];
-            int endCopyInstance = baseCopyInstance + copyRange.fCount;
-            if (rtc) {
-                auto op = CopyAtlasOp::Make(rtc->surfPriv().getContext(), sk_ref_sp(this),
-                                            copyRange.fSrcProxy, baseCopyInstance, endCopyInstance,
-                                            atlas->drawBounds());
-                rtc->addDrawOp(GrNoClip(), std::move(op));
-            }
-            baseCopyInstance = endCopyInstance;
+        int endCopyInstance = atlas->getFillBatchID();
+        if (endCopyInstance <= baseCopyInstance) {
+            SkASSERT(endCopyInstance == baseCopyInstance);
+            continue;
         }
-        out->push_back(std::move(rtc));
+        if (auto rtc = atlas->makeRenderTargetContext(onFlushRP)) {
+            GrContext* ctx = rtc->surfPriv().getContext();
+            auto op = CopyAtlasOp::Make(ctx, sk_ref_sp(this), stashedAtlasProxy, baseCopyInstance,
+                                        endCopyInstance, atlas->drawBounds());
+            rtc->addDrawOp(GrNoClip(), std::move(op));
+            out->push_back(std::move(rtc));
+        }
+        baseCopyInstance = endCopyInstance;
     }
-    SkASSERT(fCopyPathRanges.count() == copyRangeIdx);
-    SkASSERT(fNextCopyInstanceIdx == baseCopyInstance);
-    SkASSERT(baseCopyInstance == fEndCopyInstance);
 
     // Render the coverage count atlas(es).
     for (GrCCAtlasStack::Iter atlas(fRenderedAtlasStack); atlas.next();) {
-        // Copies will be finished by the time we get to rendering new atlases. See if we can
-        // recycle any previous invalidated atlas textures instead of creating new ones.
+        // Copies will be finished by the time we get to this atlas. See if we can recycle the
+        // stashed atlas texture instead of creating a new one.
         sk_sp<GrTexture> backingTexture;
-        for (sk_sp<GrTexture>& texture : fRecyclableAtlasTextures) {
-            if (texture && atlas->currentHeight() == texture->height() &&
-                    atlas->currentWidth() == texture->width()) {
-                backingTexture = skstd::exchange(texture, nullptr);
-                break;
-            }
+        if (stashedAtlasProxy && atlas->currentWidth() == stashedAtlasProxy->width() &&
+            atlas->currentHeight() == stashedAtlasProxy->height()) {
+            backingTexture = sk_ref_sp(stashedAtlasProxy->peekTexture());
         }
 
+        // Delete the stashed proxy here. That way, if we can't recycle the stashed atlas texture,
+        // we free this memory prior to allocating a new backing texture.
+        stashedAtlasProxy = nullptr;
+
         if (auto rtc = atlas->makeRenderTargetContext(onFlushRP, std::move(backingTexture))) {
             auto op = RenderAtlasOp::Make(rtc->surfPriv().getContext(), sk_ref_sp(this),
                                           atlas->getFillBatchID(), atlas->getStrokeBatchID(),
@@ -499,10 +431,23 @@
     return true;
 }
 
-void GrCCPerFlushResourceSpecs::cancelCopies() {
-    // Convert copies to cached draws.
-    fNumCachedPaths += fNumCopiedPaths[kFillIdx] + fNumCopiedPaths[kStrokeIdx];
-    fNumCopiedPaths[kFillIdx] = fNumCopiedPaths[kStrokeIdx] = 0;
-    fCopyPathStats[kFillIdx] = fCopyPathStats[kStrokeIdx] = GrCCRenderedPathStats();
+void GrCCPerFlushResourceSpecs::convertCopiesToRenders() {
+    for (int i = 0; i < 2; ++i) {
+        fNumRenderedPaths[i] += fNumCopiedPaths[i];
+        fNumCopiedPaths[i] = 0;
+
+        fRenderedPathStats[i].fMaxPointsPerPath =
+               SkTMax(fRenderedPathStats[i].fMaxPointsPerPath, fCopyPathStats[i].fMaxPointsPerPath);
+        fRenderedPathStats[i].fNumTotalSkPoints += fCopyPathStats[i].fNumTotalSkPoints;
+        fRenderedPathStats[i].fNumTotalSkVerbs += fCopyPathStats[i].fNumTotalSkVerbs;
+        fRenderedPathStats[i].fNumTotalConicWeights += fCopyPathStats[i].fNumTotalConicWeights;
+        fCopyPathStats[i] = GrCCRenderedPathStats();
+    }
+
+    fRenderedAtlasSpecs.fApproxNumPixels += fCopyAtlasSpecs.fApproxNumPixels;
+    fRenderedAtlasSpecs.fMinWidth =
+            SkTMax(fRenderedAtlasSpecs.fMinWidth, fCopyAtlasSpecs.fMinWidth);
+    fRenderedAtlasSpecs.fMinHeight =
+            SkTMax(fRenderedAtlasSpecs.fMinHeight, fCopyAtlasSpecs.fMinHeight);
     fCopyAtlasSpecs = GrCCAtlas::Specs();
 }
diff --git a/src/gpu/ccpr/GrCCPerFlushResources.h b/src/gpu/ccpr/GrCCPerFlushResources.h
index f363c16..132068f 100644
--- a/src/gpu/ccpr/GrCCPerFlushResources.h
+++ b/src/gpu/ccpr/GrCCPerFlushResources.h
@@ -14,7 +14,6 @@
 #include "ccpr/GrCCStroker.h"
 #include "ccpr/GrCCPathProcessor.h"
 
-class GrCCPathCache;
 class GrCCPathCacheEntry;
 class GrOnFlushResourceProvider;
 class GrShape;
@@ -54,8 +53,7 @@
         return 0 == fNumCachedPaths + fNumCopiedPaths[kFillIdx] + fNumCopiedPaths[kStrokeIdx] +
                     fNumRenderedPaths[kFillIdx] + fNumRenderedPaths[kStrokeIdx] + fNumClipPaths;
     }
-    // Converts the copies to normal cached draws.
-    void cancelCopies();
+    void convertCopiesToRenders();
 };
 
 /**
@@ -69,19 +67,22 @@
 
     bool isMapped() const { return SkToBool(fPathInstanceData); }
 
-    // Copies a coverage-counted path out of the given texture proxy, and into a cached, 8-bit,
-    // literal coverage atlas. Updates the cache entry to reference the new atlas.
-    void upgradeEntryToLiteralCoverageAtlas(GrCCPathCache*, GrOnFlushResourceProvider*,
-                                            GrCCPathCacheEntry*, GrCCPathProcessor::DoEvenOddFill);
+    // Copies a path out of the the previous flush's stashed mainline coverage count atlas, and into
+    // a cached, 8-bit, literal-coverage atlas. The actual source texture to copy from will be
+    // provided at the time finalize() is called.
+    GrCCAtlas* copyPathToCachedAtlas(const GrCCPathCacheEntry&, GrCCPathProcessor::DoEvenOddFill,
+                                     SkIVector* newAtlasOffset);
 
     // These two methods render a path into a temporary coverage count atlas. See
-    // GrCCPathProcessor::Instance for a description of the outputs.
+    // GrCCPathProcessor::Instance for a description of the outputs. The returned atlases are
+    // "const" to prevent the caller from assigning a unique key.
     //
     // strokeDevWidth must be 0 for fills, 1 for hairlines, or the stroke width in device-space
     // pixels for non-hairline strokes (implicitly requiring a rigid-body transform).
-    GrCCAtlas* renderShapeInAtlas(const SkIRect& clipIBounds, const SkMatrix&, const GrShape&,
-                                  float strokeDevWidth, SkRect* devBounds, SkRect* devBounds45,
-                                  SkIRect* devIBounds, SkIVector* devToAtlasOffset);
+    const GrCCAtlas* renderShapeInAtlas(const SkIRect& clipIBounds, const SkMatrix&, const GrShape&,
+                                        float strokeDevWidth, SkRect* devBounds,
+                                        SkRect* devBounds45, SkIRect* devIBounds,
+                                        SkIVector* devToAtlasOffset);
     const GrCCAtlas* renderDeviceSpacePathInAtlas(const SkIRect& clipIBounds, const SkPath& devPath,
                                                   const SkIRect& devPathIBounds,
                                                   SkIVector* devToAtlasOffset);
@@ -99,8 +100,11 @@
         return fPathInstanceData[fNextPathInstanceIdx++];
     }
 
-    // Finishes off the GPU buffers and renders the atlas(es).
-    bool finalize(GrOnFlushResourceProvider*, SkTArray<sk_sp<GrRenderTargetContext>>* out);
+    // Finishes off the GPU buffers and renders the atlas(es). 'stashedAtlasProxy', if provided, is
+    // the mainline coverage count atlas from the previous flush. It will be used as the source
+    // texture for any copies setup by copyStashedPathToAtlas().
+    bool finalize(GrOnFlushResourceProvider*, sk_sp<GrTextureProxy> stashedAtlasProxy,
+                  SkTArray<sk_sp<GrRenderTargetContext>>* out);
 
     // Accessors used by draw calls, once the resources have been finalized.
     const GrCCFiller& filler() const { SkASSERT(!this->isMapped()); return fFiller; }
@@ -109,9 +113,23 @@
     const GrBuffer* vertexBuffer() const { SkASSERT(!this->isMapped()); return fVertexBuffer.get();}
     GrBuffer* instanceBuffer() const { SkASSERT(!this->isMapped()); return fInstanceBuffer.get(); }
 
+    // Returns the mainline coverage count atlas that the client may stash for next flush, if any.
+    // The caller is responsible to call getOrAssignUniqueKey() on this atlas if they wish to
+    // actually stash it in order to copy paths into cached atlases.
+    GrCCAtlas* nextAtlasToStash() {
+        return fRenderedAtlasStack.empty() ? nullptr : &fRenderedAtlasStack.front();
+    }
+
+    // Returs true if the client has called getOrAssignUniqueKey() on our nextAtlasToStash().
+    bool hasStashedAtlas() const {
+        return !fRenderedAtlasStack.empty() && fRenderedAtlasStack.front().uniqueKey().isValid();
+    }
+    const GrUniqueKey& stashedAtlasKey() const  {
+        SkASSERT(this->hasStashedAtlas());
+        return fRenderedAtlasStack.front().uniqueKey();
+    }
+
 private:
-    void recordCopyPathInstance(const GrCCPathCacheEntry&, const SkIVector& newAtlasOffset,
-                                GrCCPathProcessor::DoEvenOddFill, sk_sp<GrTextureProxy> srcProxy);
     bool placeRenderedPathInAtlas(const SkIRect& clipIBounds, const SkIRect& pathIBounds,
                                   GrScissorTest*, SkIRect* clippedPathIBounds,
                                   SkIVector* devToAtlasOffset);
@@ -131,30 +149,6 @@
     SkDEBUGCODE(int fEndCopyInstance);
     int fNextPathInstanceIdx;
     SkDEBUGCODE(int fEndPathInstance);
-
-    // Represents a range of copy-path instances that all share the same source proxy. (i.e. Draw
-    // instances that copy a path mask from a 16-bit coverage count atlas into an 8-bit literal
-    // coverage atlas.)
-    struct CopyPathRange {
-        CopyPathRange() = default;
-        CopyPathRange(sk_sp<GrTextureProxy> srcProxy, int count)
-                : fSrcProxy(std::move(srcProxy)), fCount(count) {}
-        sk_sp<GrTextureProxy> fSrcProxy;
-        int fCount;
-    };
-
-    SkSTArray<4, CopyPathRange> fCopyPathRanges;
-    int fCurrCopyAtlasRangesIdx = 0;
-
-    // This is a list of coverage count atlas textures that have been invalidated due to us copying
-    // their paths into new 8-bit literal coverage atlases. Since copying is finished by the time
-    // we begin rendering new atlases, we can recycle these textures for the rendered atlases rather
-    // than allocating new texture objects upon instantiation.
-    SkSTArray<2, sk_sp<GrTexture>> fRecyclableAtlasTextures;
-
-public:
-    const GrTexture* testingOnly_frontCopyAtlasTexture() const;
-    const GrTexture* testingOnly_frontRenderedAtlasTexture() const;
 };
 
 inline void GrCCRenderedPathStats::statPath(const SkPath& path) {
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
index 901ca38..293d8c2 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
@@ -30,16 +30,15 @@
 }
 
 sk_sp<GrCoverageCountingPathRenderer> GrCoverageCountingPathRenderer::CreateIfSupported(
-        const GrCaps& caps, AllowCaching allowCaching, uint32_t contextUniqueID) {
+        const GrCaps& caps, AllowCaching allowCaching) {
     return sk_sp<GrCoverageCountingPathRenderer>((IsSupported(caps))
-            ? new GrCoverageCountingPathRenderer(allowCaching, contextUniqueID)
+            ? new GrCoverageCountingPathRenderer(allowCaching)
             : nullptr);
 }
 
-GrCoverageCountingPathRenderer::GrCoverageCountingPathRenderer(AllowCaching allowCaching,
-                                                               uint32_t contextUniqueID) {
+GrCoverageCountingPathRenderer::GrCoverageCountingPathRenderer(AllowCaching allowCaching) {
     if (AllowCaching::kYes == allowCaching) {
-        fPathCache = skstd::make_unique<GrCCPathCache>(contextUniqueID);
+        fPathCache = skstd::make_unique<GrCCPathCache>();
     }
 }
 
@@ -189,16 +188,29 @@
 void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlushRP,
                                               const uint32_t* opListIDs, int numOpListIDs,
                                               SkTArray<sk_sp<GrRenderTargetContext>>* out) {
-    using DoCopiesToA8Coverage = GrCCDrawPathsOp::DoCopiesToA8Coverage;
+    using DoCopiesToCache = GrCCDrawPathsOp::DoCopiesToCache;
     SkASSERT(!fFlushing);
     SkASSERT(fFlushingPaths.empty());
     SkDEBUGCODE(fFlushing = true);
 
-    if (fPathCache) {
-        fPathCache->doPreFlushProcessing();
+    // Dig up the stashed atlas from the previous flush (if any) so we can attempt to copy any
+    // reusable paths out of it and into the resource cache. We also need to clear its unique key.
+    sk_sp<GrTextureProxy> stashedAtlasProxy;
+    if (fStashedAtlasKey.isValid()) {
+        stashedAtlasProxy = onFlushRP->findOrCreateProxyByUniqueKey(fStashedAtlasKey,
+                                                                    GrCCAtlas::kTextureOrigin);
+        if (stashedAtlasProxy) {
+            // Instantiate the proxy so we can clear the underlying texture's unique key.
+            onFlushRP->instatiateProxy(stashedAtlasProxy.get());
+            SkASSERT(fStashedAtlasKey == stashedAtlasProxy->getUniqueKey());
+            onFlushRP->removeUniqueKeyFromProxy(stashedAtlasProxy.get());
+        } else {
+            fStashedAtlasKey.reset();  // Indicate there is no stashed atlas to copy from.
+        }
     }
 
     if (fPendingPaths.empty()) {
+        fStashedAtlasKey.reset();
         return;  // Nothing to draw.
     }
 
@@ -222,12 +234,13 @@
         fPendingPaths.erase(iter);
 
         for (GrCCDrawPathsOp* op : fFlushingPaths.back()->fDrawOps) {
-            op->accountForOwnPaths(fPathCache.get(), onFlushRP, &specs);
+            op->accountForOwnPaths(fPathCache.get(), onFlushRP, fStashedAtlasKey, &specs);
         }
         for (const auto& clipsIter : fFlushingPaths.back()->fClipPaths) {
             clipsIter.second.accountForOwnPath(&specs);
         }
     }
+    fStashedAtlasKey.reset();
 
     if (specs.isEmpty()) {
         return;  // Nothing to draw.
@@ -237,10 +250,12 @@
     // copy them to cached atlas(es).
     int numCopies = specs.fNumCopiedPaths[GrCCPerFlushResourceSpecs::kFillIdx] +
                     specs.fNumCopiedPaths[GrCCPerFlushResourceSpecs::kStrokeIdx];
-    auto doCopies = DoCopiesToA8Coverage(numCopies > 100 ||
-                                         specs.fCopyAtlasSpecs.fApproxNumPixels > 256 * 256);
-    if (numCopies && DoCopiesToA8Coverage::kNo == doCopies) {
-        specs.cancelCopies();
+    DoCopiesToCache doCopies = DoCopiesToCache(numCopies > 100 ||
+                                               specs.fCopyAtlasSpecs.fApproxNumPixels > 256 * 256);
+    if (numCopies && DoCopiesToCache::kNo == doCopies) {
+        specs.convertCopiesToRenders();
+        SkASSERT(!specs.fNumCopiedPaths[GrCCPerFlushResourceSpecs::kFillIdx]);
+        SkASSERT(!specs.fNumCopiedPaths[GrCCPerFlushResourceSpecs::kStrokeIdx]);
     }
 
     auto resources = sk_make_sp<GrCCPerFlushResources>(onFlushRP, specs);
@@ -251,24 +266,21 @@
     // Layout the atlas(es) and parse paths.
     for (const auto& flushingPaths : fFlushingPaths) {
         for (GrCCDrawPathsOp* op : flushingPaths->fDrawOps) {
-            op->setupResources(fPathCache.get(), onFlushRP, resources.get(), doCopies);
+            op->setupResources(onFlushRP, resources.get(), doCopies);
         }
         for (auto& clipsIter : flushingPaths->fClipPaths) {
             clipsIter.second.renderPathInAtlas(resources.get(), onFlushRP);
         }
     }
 
-    if (fPathCache) {
-        // Purge invalidated textures from previous atlases *before* calling finalize(). That way,
-        // the underlying textures objects can be freed up and reused for the next atlases.
-        fPathCache->purgeInvalidatedAtlasTextures(onFlushRP);
-    }
-
     // Allocate resources and then render the atlas(es).
-    if (!resources->finalize(onFlushRP, out)) {
+    if (!resources->finalize(onFlushRP, std::move(stashedAtlasProxy), out)) {
         return;
     }
 
+    // Verify the stashed atlas got released so its texture could be recycled.
+    SkASSERT(!stashedAtlasProxy);  // NOLINT(bugprone-use-after-move)
+
     // Commit flushing paths to the resources once they are successfully completed.
     for (auto& flushingPaths : fFlushingPaths) {
         SkASSERT(!flushingPaths->fFlushResources);
@@ -279,8 +291,15 @@
 void GrCoverageCountingPathRenderer::postFlush(GrDeferredUploadToken, const uint32_t* opListIDs,
                                                int numOpListIDs) {
     SkASSERT(fFlushing);
+    SkASSERT(!fStashedAtlasKey.isValid());  // Should have been cleared in preFlush().
 
     if (!fFlushingPaths.empty()) {
+        // Note the stashed atlas's key for next flush, if any.
+        auto resources = fFlushingPaths.front()->fFlushResources.get();
+        if (resources && resources->hasStashedAtlas()) {
+            fStashedAtlasKey = resources->stashedAtlasKey();
+        }
+
         // In DDL mode these aren't guaranteed to be deleted so we must clear out the perFlush
         // resources manually.
         for (auto& flushingPaths : fFlushingPaths) {
@@ -291,13 +310,17 @@
         fFlushingPaths.reset();
     }
 
+    if (fPathCache) {
+        fPathCache->doPostFlushProcessing();
+    }
+
     SkDEBUGCODE(fFlushing = false);
 }
 
 void GrCoverageCountingPathRenderer::purgeCacheEntriesOlderThan(
-        GrProxyProvider* proxyProvider, const GrStdSteadyClock::time_point& purgeTime) {
+        const GrStdSteadyClock::time_point& purgeTime) {
     if (fPathCache) {
-        fPathCache->purgeEntriesOlderThan(proxyProvider, purgeTime);
+        fPathCache->purgeEntriesOlderThan(purgeTime);
     }
 }
 
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
index b5fb321..554404d 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
@@ -34,8 +34,7 @@
         kYes = true
     };
 
-    static sk_sp<GrCoverageCountingPathRenderer> CreateIfSupported(const GrCaps&, AllowCaching,
-                                                                   uint32_t contextUniqueID);
+    static sk_sp<GrCoverageCountingPathRenderer> CreateIfSupported(const GrCaps&, AllowCaching);
 
     using PendingPathsMap = std::map<uint32_t, sk_sp<GrCCPerOpListPaths>>;
 
@@ -66,7 +65,10 @@
                   SkTArray<sk_sp<GrRenderTargetContext>>* out) override;
     void postFlush(GrDeferredUploadToken, const uint32_t* opListIDs, int numOpListIDs) override;
 
-    void purgeCacheEntriesOlderThan(GrProxyProvider*, const GrStdSteadyClock::time_point&);
+    void purgeCacheEntriesOlderThan(const GrStdSteadyClock::time_point& purgeTime);
+
+    void testingOnly_drawPathDirectly(const DrawPathArgs&);
+    const GrUniqueKey& testingOnly_getStashedAtlasKey() const;
 
     // If a path spans more pixels than this, we need to crop it or else analytic AA can run out of
     // fp32 precision.
@@ -82,7 +84,7 @@
                                    float* inflationRadius = nullptr);
 
 private:
-    GrCoverageCountingPathRenderer(AllowCaching, uint32_t contextUniqueID);
+    GrCoverageCountingPathRenderer(AllowCaching);
 
     // GrPathRenderer overrides.
     StencilSupport onGetStencilSupport(const GrShape&) const override {
@@ -104,13 +106,9 @@
     SkSTArray<4, sk_sp<GrCCPerOpListPaths>> fFlushingPaths;
 
     std::unique_ptr<GrCCPathCache> fPathCache;
+    GrUniqueKey fStashedAtlasKey;
 
     SkDEBUGCODE(bool fFlushing = false);
-
-public:
-    void testingOnly_drawPathDirectly(const DrawPathArgs&);
-    const GrCCPerFlushResources* testingOnly_getCurrentFlushResources();
-    const GrCCPathCache* testingOnly_getPathCache() const;
 };
 
 #endif
diff --git a/src/gpu/mock/GrMockGpu.cpp b/src/gpu/mock/GrMockGpu.cpp
index b98af80..4f2c4cd 100644
--- a/src/gpu/mock/GrMockGpu.cpp
+++ b/src/gpu/mock/GrMockGpu.cpp
@@ -15,11 +15,7 @@
 
 int GrMockGpu::NextInternalTextureID() {
     static std::atomic<int> nextID{1};
-    int id;
-    do {
-        id = nextID.fetch_add(1);
-    } while (0 == id);  // Reserve 0 for an invalid ID.
-    return id;
+    return nextID++;
 }
 
 int GrMockGpu::NextExternalTextureID() {
diff --git a/tests/GrCCPRTest.cpp b/tests/GrCCPRTest.cpp
index 22c6112..a400985 100644
--- a/tests/GrCCPRTest.cpp
+++ b/tests/GrCCPRTest.cpp
@@ -18,13 +18,11 @@
 #include "GrRenderTargetContextPriv.h"
 #include "GrShape.h"
 #include "GrTexture.h"
-#include "SkExchange.h"
 #include "SkMatrix.h"
 #include "SkPathPriv.h"
 #include "SkRect.h"
 #include "sk_tool_utils.h"
 #include "ccpr/GrCoverageCountingPathRenderer.h"
-#include "ccpr/GrCCPathCache.h"
 #include "mock/GrMockTypes.h"
 
 #include <cmath>
@@ -58,7 +56,7 @@
 
 class CCPRPathDrawer {
 public:
-    CCPRPathDrawer(sk_sp<GrContext> ctx, skiatest::Reporter* reporter, bool doStroke)
+    CCPRPathDrawer(GrContext* ctx, skiatest::Reporter* reporter, bool doStroke)
             : fCtx(ctx)
             , fCCPR(fCtx->contextPriv().drawingManager()->getCoverageCountingPathRenderer())
             , fRTC(fCtx->contextPriv().makeDeferredRenderTargetContext(
@@ -74,19 +72,13 @@
         }
     }
 
-    GrContext* ctx() const { return fCtx.get(); }
+    GrContext* ctx() const { return fCtx; }
     GrCoverageCountingPathRenderer* ccpr() const { return fCCPR; }
 
     bool valid() const { return fCCPR && fRTC; }
     void clear() const { fRTC->clear(nullptr, SK_PMColor4fTRANSPARENT,
                                      GrRenderTargetContext::CanClearFullscreen::kYes); }
-    void destroyGrContext() {
-        SkASSERT(fRTC->unique());
-        SkASSERT(fCtx->unique());
-        fRTC.reset();
-        fCCPR = nullptr;
-        fCtx.reset();
-    }
+    void abandonGrContext() { fCtx = nullptr; fCCPR = nullptr; fRTC = nullptr; }
 
     void drawPath(const SkPath& path, const SkMatrix& matrix = SkMatrix::I()) const {
         SkASSERT(this->valid());
@@ -110,7 +102,7 @@
         }
 
         fCCPR->testingOnly_drawPathDirectly({
-                fCtx.get(), std::move(paint), &GrUserStencilSettings::kUnused, fRTC.get(), &noClip,
+                fCtx, std::move(paint), &GrUserStencilSettings::kUnused, fRTC.get(), &noClip,
                 &clipBounds, &matrix, &shape, GrAAType::kCoverage, false});
     }
 
@@ -130,7 +122,7 @@
     }
 
 private:
-    sk_sp<GrContext> fCtx;
+    GrContext* fCtx;
     GrCoverageCountingPathRenderer* fCCPR;
     sk_sp<GrRenderTargetContext> fRTC;
     const bool fDoStroke;
@@ -158,17 +150,17 @@
 
         this->customizeOptions(&mockOptions, &ctxOptions);
 
-        sk_sp<GrContext> mockContext = GrContext::MakeMock(&mockOptions, ctxOptions);
-        if (!mockContext) {
+        fMockContext = GrContext::MakeMock(&mockOptions, ctxOptions);
+        if (!fMockContext) {
             ERRORF(reporter, "could not create mock context");
             return;
         }
-        if (!mockContext->unique()) {
+        if (!fMockContext->unique()) {
             ERRORF(reporter, "mock context is not unique");
             return;
         }
 
-        CCPRPathDrawer ccpr(skstd::exchange(mockContext, nullptr), reporter, doStroke);
+        CCPRPathDrawer ccpr(fMockContext.get(), reporter, doStroke);
         if (!ccpr.valid()) {
             return;
         }
@@ -184,6 +176,7 @@
     virtual void customizeOptions(GrMockOptions*, GrContextOptions*) {}
     virtual void onRun(skiatest::Reporter* reporter, CCPRPathDrawer& ccpr) = 0;
 
+    sk_sp<GrContext> fMockContext;
     SkPath fPath;
 };
 
@@ -194,7 +187,7 @@
         test.run(reporter, true); \
     }
 
-class CCPR_cleanup : public CCPRTest {
+class GrCCPRTest_cleanup : public CCPRTest {
     void onRun(skiatest::Reporter* reporter, CCPRPathDrawer& ccpr) override {
         REPORTER_ASSERT(reporter, SkPathPriv::TestingOnly_unique(fPath));
 
@@ -219,22 +212,22 @@
             ccpr.drawPath(fPath);
             ccpr.clipFullscreenRect(fPath);
         }
+        ccpr.abandonGrContext();
         REPORTER_ASSERT(reporter, !SkPathPriv::TestingOnly_unique(fPath));
-
-        ccpr.destroyGrContext();
+        fMockContext.reset();
         REPORTER_ASSERT(reporter, SkPathPriv::TestingOnly_unique(fPath));
     }
 };
-DEF_CCPR_TEST(CCPR_cleanup)
+DEF_CCPR_TEST(GrCCPRTest_cleanup)
 
-class CCPR_cleanupWithTexAllocFail : public CCPR_cleanup {
+class GrCCPRTest_cleanupWithTexAllocFail : public GrCCPRTest_cleanup {
     void customizeOptions(GrMockOptions* mockOptions, GrContextOptions*) override {
         mockOptions->fFailTextureAllocations = true;
     }
 };
-DEF_CCPR_TEST(CCPR_cleanupWithTexAllocFail)
+DEF_CCPR_TEST(GrCCPRTest_cleanupWithTexAllocFail)
 
-class CCPR_unregisterCulledOps : public CCPRTest {
+class GrCCPRTest_unregisterCulledOps : public CCPRTest {
     void onRun(skiatest::Reporter* reporter, CCPRPathDrawer& ccpr) override {
         REPORTER_ASSERT(reporter, SkPathPriv::TestingOnly_unique(fPath));
 
@@ -250,12 +243,13 @@
         REPORTER_ASSERT(reporter, !SkPathPriv::TestingOnly_unique(fPath));
         ccpr.clear(); // Clear should delete the CCPR DrawPathsOp.
         REPORTER_ASSERT(reporter, SkPathPriv::TestingOnly_unique(fPath));
-        ccpr.destroyGrContext(); // Should not crash (DrawPathsOp should have unregistered itself).
+        ccpr.abandonGrContext();
+        fMockContext.reset(); // Should not crash (DrawPathsOp should have unregistered itself).
     }
 };
-DEF_CCPR_TEST(CCPR_unregisterCulledOps)
+DEF_CCPR_TEST(GrCCPRTest_unregisterCulledOps)
 
-class CCPR_parseEmptyPath : public CCPRTest {
+class GrCCPRTest_parseEmptyPath : public CCPRTest {
     void onRun(skiatest::Reporter* reporter, CCPRPathDrawer& ccpr) override {
         REPORTER_ASSERT(reporter, SkPathPriv::TestingOnly_unique(fPath));
 
@@ -289,496 +283,119 @@
         ccpr.flush();
     }
 };
-DEF_CCPR_TEST(CCPR_parseEmptyPath)
-
-static int get_mock_texture_id(const GrTexture* texture) {
-    const GrBackendTexture& backingTexture = texture->getBackendTexture();
-    SkASSERT(GrBackendApi::kMock == backingTexture.backend());
-
-    if (!backingTexture.isValid()) {
-        return 0;
-    }
-
-    GrMockTextureInfo info;
-    backingTexture.getMockTextureInfo(&info);
-    return info.fID;
-}
-
-// Base class for cache path unit tests.
-class CCPRCacheTest : public CCPRTest {
-protected:
-    // Registers as an onFlush callback in order to snag the CCPR per-flush resources and note the
-    // texture IDs.
-    class RecordLastMockAtlasIDs : public GrOnFlushCallbackObject {
-    public:
-        RecordLastMockAtlasIDs(sk_sp<GrCoverageCountingPathRenderer> ccpr) : fCCPR(ccpr) {}
-
-        int lastCopyAtlasID() const { return fLastCopyAtlasID; }
-        int lastRenderedAtlasID() const { return fLastRenderedAtlasID; }
-
-        void preFlush(GrOnFlushResourceProvider*, const uint32_t* opListIDs, int numOpListIDs,
-                      SkTArray<sk_sp<GrRenderTargetContext>>* out) override {
-            fLastRenderedAtlasID = fLastCopyAtlasID = 0;
-
-            const GrCCPerFlushResources* resources = fCCPR->testingOnly_getCurrentFlushResources();
-            if (!resources) {
-                return;
-            }
-
-            if (const GrTexture* tex = resources->testingOnly_frontCopyAtlasTexture()) {
-                fLastCopyAtlasID = get_mock_texture_id(tex);
-            }
-            if (const GrTexture* tex = resources->testingOnly_frontRenderedAtlasTexture()) {
-                fLastRenderedAtlasID = get_mock_texture_id(tex);
-            }
-        }
-
-        void postFlush(GrDeferredUploadToken, const uint32_t*, int) override {}
-
-    private:
-        sk_sp<GrCoverageCountingPathRenderer> fCCPR;
-        int fLastCopyAtlasID = 0;
-        int fLastRenderedAtlasID = 0;
-    };
-
-    CCPRCacheTest() {
-        static constexpr int primes[11] = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31};
-
-        SkRandom rand;
-        for (size_t i = 0; i < SK_ARRAY_COUNT(fPaths); ++i) {
-            int numPts = rand.nextRangeU(GrShape::kMaxKeyFromDataVerbCnt + 1,
-                                         GrShape::kMaxKeyFromDataVerbCnt * 2);
-            int step;
-            do {
-                step = primes[rand.nextU() % SK_ARRAY_COUNT(primes)];
-            } while (step == numPts);
-            fPaths[i] = sk_tool_utils::make_star(SkRect::MakeLTRB(0,0,1,1), numPts, step);
-        }
-    }
-
-    void drawPathsAndFlush(CCPRPathDrawer& ccpr, const SkMatrix& m) {
-        this->drawPathsAndFlush(ccpr, &m, 1);
-    }
-    void drawPathsAndFlush(CCPRPathDrawer& ccpr, const SkMatrix* matrices, int numMatrices) {
-        // Draw all the paths.
-        for (size_t i = 0; i < SK_ARRAY_COUNT(fPaths); ++i) {
-            ccpr.drawPath(fPaths[i], matrices[i % numMatrices]);
-        }
-        // Re-draw a few paths, to test the case where a cache entry is hit more than once in a
-        // single flush.
-        SkRandom rand;
-        int duplicateIndices[10];
-        for (size_t i = 0; i < SK_ARRAY_COUNT(duplicateIndices); ++i) {
-            duplicateIndices[i] = rand.nextULessThan(SK_ARRAY_COUNT(fPaths));
-        }
-        for (size_t i = 0; i < SK_ARRAY_COUNT(duplicateIndices); ++i) {
-            for (size_t j = 0; j <= i; ++j) {
-                int idx = duplicateIndices[j];
-                ccpr.drawPath(fPaths[idx], matrices[idx % numMatrices]);
-            }
-        }
-        ccpr.flush();
-    }
-
-private:
-    void customizeOptions(GrMockOptions*, GrContextOptions* ctxOptions) override {
-        ctxOptions->fAllowPathMaskCaching = true;
-    }
-
-    void onRun(skiatest::Reporter* reporter, CCPRPathDrawer& ccpr) final {
-        RecordLastMockAtlasIDs atlasIDRecorder(sk_ref_sp(ccpr.ccpr()));
-        ccpr.ctx()->contextPriv().addOnFlushCallbackObject(&atlasIDRecorder);
-
-        this->onRun(reporter, ccpr, atlasIDRecorder);
-
-        ccpr.ctx()->contextPriv().testingOnly_flushAndRemoveOnFlushCallbackObject(&atlasIDRecorder);
-    }
-
-    virtual void onRun(skiatest::Reporter* reporter, CCPRPathDrawer& ccpr,
-                       const RecordLastMockAtlasIDs&) = 0;
-
-protected:
-    SkPath fPaths[350];
-};
-
-// Ensures ccpr always reuses the same atlas texture in the animation use case.
-class CCPR_cache_animationAtlasReuse : public CCPRCacheTest {
-    void onRun(skiatest::Reporter* reporter, CCPRPathDrawer& ccpr,
-               const RecordLastMockAtlasIDs& atlasIDRecorder) override {
-        SkMatrix m = SkMatrix::MakeTrans(kCanvasSize/2, kCanvasSize/2);
-        m.preScale(80, 80);
-        m.preTranslate(-.5,-.5);
-        this->drawPathsAndFlush(ccpr, m);
-
-        REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastCopyAtlasID());
-        REPORTER_ASSERT(reporter, 0 != atlasIDRecorder.lastRenderedAtlasID());
-        const int atlasID = atlasIDRecorder.lastRenderedAtlasID();
-
-        // Ensures we always reuse the same atlas texture in the animation use case.
-        for (int i = 0; i < 12; ++i) {
-            // 59 is prime, so we will hit every integer modulo 360 before repeating.
-            m.preRotate(59, .5, .5);
-
-            // Go twice. Paths have to get drawn twice with the same matrix before we cache their
-            // atlas. This makes sure that on the subsequent draw, after an atlas has been cached
-            // and is then invalidated since the matrix will change, that the same underlying
-            // texture object is still reused for the next atlas.
-            for (int j = 0; j < 2; ++j) {
-                this->drawPathsAndFlush(ccpr, m);
-                // Nothing should be copied to an 8-bit atlas after just two draws.
-                REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastCopyAtlasID());
-                REPORTER_ASSERT(reporter, atlasIDRecorder.lastRenderedAtlasID() == atlasID);
-            }
-        }
-
-        // Do the last draw again. (On draw 3 they should get copied to an 8-bit atlas.)
-        this->drawPathsAndFlush(ccpr, m);
-        REPORTER_ASSERT(reporter, 0 != atlasIDRecorder.lastCopyAtlasID());
-        REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastRenderedAtlasID());
-
-        // Now double-check that everything continues to hit the cache as expected when the matrix
-        // doesn't change.
-        for (int i = 0; i < 10; ++i) {
-            this->drawPathsAndFlush(ccpr, m);
-            REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastCopyAtlasID());
-            REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastRenderedAtlasID());
-        }
-    }
-};
-DEF_CCPR_TEST(CCPR_cache_animationAtlasReuse)
-
-class CCPR_cache_recycleEntries : public CCPRCacheTest {
-    void onRun(skiatest::Reporter* reporter, CCPRPathDrawer& ccpr,
-               const RecordLastMockAtlasIDs& atlasIDRecorder) override {
-        SkMatrix m = SkMatrix::MakeTrans(kCanvasSize/2, kCanvasSize/2);
-        m.preScale(80, 80);
-        m.preTranslate(-.5,-.5);
-
-        auto cache = ccpr.ccpr()->testingOnly_getPathCache();
-        REPORTER_ASSERT(reporter, cache);
-
-        const auto& lru = cache->testingOnly_getLRU();
-
-        SkTArray<const void*> expectedPtrs;
-
-        // Ensures we always reuse the same atlas texture in the animation use case.
-        for (int i = 0; i < 5; ++i) {
-            // 59 is prime, so we will hit every integer modulo 360 before repeating.
-            m.preRotate(59, .5, .5);
-
-            // Go twice. Paths have to get drawn twice with the same matrix before we cache their
-            // atlas.
-            for (int j = 0; j < 2; ++j) {
-                this->drawPathsAndFlush(ccpr, m);
-                // Nothing should be copied to an 8-bit atlas after just two draws.
-                REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastCopyAtlasID());
-                REPORTER_ASSERT(reporter, 0 != atlasIDRecorder.lastRenderedAtlasID());
-            }
-
-            int idx = 0;
-            for (const GrCCPathCacheEntry* entry : lru) {
-                if (0 == i) {
-                    expectedPtrs.push_back(entry);
-                } else {
-                    // The same pointer should have been recycled for the new matrix.
-                    REPORTER_ASSERT(reporter, entry == expectedPtrs[idx]);
-                }
-                ++idx;
-            }
-        }
-    }
-};
-DEF_CCPR_TEST(CCPR_cache_recycleEntries)
-
-// Ensures mostly-visible paths get their full mask cached.
-class CCPR_cache_mostlyVisible : public CCPRCacheTest {
-    void onRun(skiatest::Reporter* reporter, CCPRPathDrawer& ccpr,
-               const RecordLastMockAtlasIDs& atlasIDRecorder) override {
-        SkMatrix matrices[3] = {
-            SkMatrix::MakeScale(kCanvasSize/2, kCanvasSize/2), // Fully visible.
-            SkMatrix::MakeScale(kCanvasSize * 1.25, kCanvasSize * 1.25), // Mostly visible.
-            SkMatrix::MakeScale(kCanvasSize * 1.5, kCanvasSize * 1.5), // Mostly NOT visible.
-        };
-
-        for (int i = 0; i < 10; ++i) {
-            this->drawPathsAndFlush(ccpr, matrices, 3);
-            if (2 == i) {
-                // The mostly-visible paths should still get cached.
-                REPORTER_ASSERT(reporter, 0 != atlasIDRecorder.lastCopyAtlasID());
-            } else {
-                REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastCopyAtlasID());
-            }
-            // Ensure mostly NOT-visible paths never get cached.
-            REPORTER_ASSERT(reporter, 0 != atlasIDRecorder.lastRenderedAtlasID());
-        }
-
-        // Clear the path cache.
-        this->drawPathsAndFlush(ccpr, SkMatrix::I());
-
-        // Now only draw the fully/mostly visible ones.
-        for (int i = 0; i < 2; ++i) {
-            this->drawPathsAndFlush(ccpr, matrices, 2);
-            REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastCopyAtlasID());
-            REPORTER_ASSERT(reporter, 0 != atlasIDRecorder.lastRenderedAtlasID());
-        }
-
-        // On draw 3 they should get copied to an 8-bit atlas.
-        this->drawPathsAndFlush(ccpr, matrices, 2);
-        REPORTER_ASSERT(reporter, 0 != atlasIDRecorder.lastCopyAtlasID());
-        REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastRenderedAtlasID());
-
-        for (int i = 0; i < 10; ++i) {
-            this->drawPathsAndFlush(ccpr, matrices, 2);
-            REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastCopyAtlasID());
-            REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastRenderedAtlasID());
-        }
-
-        // Draw a different part of the path to ensure the full mask was cached.
-        matrices[1].postTranslate(SkScalarFloorToInt(kCanvasSize * -.25f),
-                                  SkScalarFloorToInt(kCanvasSize * -.25f));
-        for (int i = 0; i < 10; ++i) {
-            this->drawPathsAndFlush(ccpr, matrices, 2);
-            REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastCopyAtlasID());
-            REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastRenderedAtlasID());
-        }
-    }
-};
-DEF_CCPR_TEST(CCPR_cache_mostlyVisible)
-
-// Ensures GrContext::performDeferredCleanup works.
-class CCPR_cache_deferredCleanup : public CCPRCacheTest {
-    void onRun(skiatest::Reporter* reporter, CCPRPathDrawer& ccpr,
-               const RecordLastMockAtlasIDs& atlasIDRecorder) override {
-        SkMatrix m = SkMatrix::MakeScale(20, 20);
-        int lastRenderedAtlasID = 0;
-
-        for (int i = 0; i < 5; ++i) {
-            this->drawPathsAndFlush(ccpr, m);
-            REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastCopyAtlasID());
-            REPORTER_ASSERT(reporter, 0 != atlasIDRecorder.lastRenderedAtlasID());
-            int renderedAtlasID = atlasIDRecorder.lastRenderedAtlasID();
-            REPORTER_ASSERT(reporter, renderedAtlasID != lastRenderedAtlasID);
-            lastRenderedAtlasID = renderedAtlasID;
-
-            this->drawPathsAndFlush(ccpr, m);
-            REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastCopyAtlasID());
-            REPORTER_ASSERT(reporter, lastRenderedAtlasID == atlasIDRecorder.lastRenderedAtlasID());
-
-            // On draw 3 they should get copied to an 8-bit atlas.
-            this->drawPathsAndFlush(ccpr, m);
-            REPORTER_ASSERT(reporter, 0 != atlasIDRecorder.lastCopyAtlasID());
-            REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastRenderedAtlasID());
-
-            for (int i = 0; i < 10; ++i) {
-                this->drawPathsAndFlush(ccpr, m);
-                REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastCopyAtlasID());
-                REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastRenderedAtlasID());
-            }
-
-            ccpr.ctx()->performDeferredCleanup(std::chrono::milliseconds(0));
-        }
-    }
-};
-DEF_CCPR_TEST(CCPR_cache_deferredCleanup)
-
-// Verifies the cache/hash table internals.
-class CCPR_cache_hashTable : public CCPRCacheTest {
-    void onRun(skiatest::Reporter* reporter, CCPRPathDrawer& ccpr,
-               const RecordLastMockAtlasIDs& atlasIDRecorder) override {
-        using CoverageType = GrCCAtlas::CoverageType;
-        SkMatrix m = SkMatrix::MakeScale(20, 20);
-
-        for (int i = 0; i < 5; ++i) {
-            this->drawPathsAndFlush(ccpr, m);
-            if (2 == i) {
-                REPORTER_ASSERT(reporter, 0 != atlasIDRecorder.lastCopyAtlasID());
-            } else {
-                REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastCopyAtlasID());
-            }
-            if (i < 2) {
-                REPORTER_ASSERT(reporter, 0 != atlasIDRecorder.lastRenderedAtlasID());
-            } else {
-                REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastRenderedAtlasID());
-            }
-
-            auto cache = ccpr.ccpr()->testingOnly_getPathCache();
-            REPORTER_ASSERT(reporter, cache);
-
-            const auto& hash = cache->testingOnly_getHashTable();
-            const auto& lru = cache->testingOnly_getLRU();
-            int count = 0;
-            for (GrCCPathCacheEntry* entry : lru) {
-                auto* node = hash.find(entry->cacheKey());
-                REPORTER_ASSERT(reporter, node);
-                REPORTER_ASSERT(reporter, node->entry() == entry);
-                REPORTER_ASSERT(reporter, 0 == entry->testingOnly_peekOnFlushRefCnt());
-                if (0 == i) {
-                    REPORTER_ASSERT(reporter, !entry->cachedAtlas());
-                } else {
-                    const GrCCCachedAtlas* cachedAtlas = entry->cachedAtlas();
-                    REPORTER_ASSERT(reporter, cachedAtlas);
-                    if (1 == i) {
-                        REPORTER_ASSERT(reporter, CoverageType::kFP16_CoverageCount
-                                                          == cachedAtlas->coverageType());
-                    } else {
-                        REPORTER_ASSERT(reporter, CoverageType::kA8_LiteralCoverage
-                                                          == cachedAtlas->coverageType());
-                    }
-                    REPORTER_ASSERT(reporter, cachedAtlas->textureKey().isValid());
-                    // The actual proxy should not be held past the end of a flush.
-                    REPORTER_ASSERT(reporter, !cachedAtlas->getOnFlushProxy());
-                    REPORTER_ASSERT(reporter, 0 == cachedAtlas->testingOnly_peekOnFlushRefCnt());
-                }
-                ++count;
-            }
-            REPORTER_ASSERT(reporter, hash.count() == count);
-        }
-    }
-};
-DEF_CCPR_TEST(CCPR_cache_hashTable)
-
-// Ensures paths get cached even when using a sporadic flushing pattern and drawing out of order
-// (a la Chrome tiles).
-class CCPR_cache_multiFlush : public CCPRCacheTest {
-    void onRun(skiatest::Reporter* reporter, CCPRPathDrawer& ccpr,
-               const RecordLastMockAtlasIDs& atlasIDRecorder) override {
-        static constexpr int kNumPaths = SK_ARRAY_COUNT(fPaths);
-        static constexpr int kBigPrimes[] = {
-                9323, 11059, 22993, 38749, 45127, 53147, 64853, 77969, 83269, 99989};
-
-        SkRandom rand;
-        SkMatrix m = SkMatrix::I();
-
-        for (size_t i = 0; i < SK_ARRAY_COUNT(kBigPrimes); ++i) {
-            int prime = kBigPrimes[i];
-            int endPathIdx = (int)rand.nextULessThan(kNumPaths);
-            int pathIdx = endPathIdx;
-            int nextFlush = rand.nextRangeU(1, 47);
-            for (int j = 0; j < kNumPaths; ++j) {
-                pathIdx = (pathIdx + prime) % kNumPaths;
-                int repeat = rand.nextRangeU(1, 3);
-                for (int k = 0; k < repeat; ++k) {
-                    ccpr.drawPath(fPaths[pathIdx], m);
-                }
-                if (nextFlush == j) {
-                    ccpr.flush();
-                    // The paths are small enough that we should never copy to an A8 atlas.
-                    REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastCopyAtlasID());
-                    if (i < 2) {
-                        REPORTER_ASSERT(reporter, 0 != atlasIDRecorder.lastRenderedAtlasID());
-                    } else {
-                        REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastRenderedAtlasID());
-                    }
-                    nextFlush = SkTMin(j + (int)rand.nextRangeU(1, 29), kNumPaths - 1);
-                }
-            }
-            SkASSERT(endPathIdx == pathIdx % kNumPaths);
-        }
-    }
-};
-DEF_CCPR_TEST(CCPR_cache_multiFlush)
+DEF_CCPR_TEST(GrCCPRTest_parseEmptyPath)
 
 // This test exercises CCPR's cache capabilities by drawing many paths with two different
 // transformation matrices. We then vary the matrices independently by whole and partial pixels,
 // and verify the caching behaved as expected.
-class CCPR_cache_partialInvalidate : public CCPRCacheTest {
+class GrCCPRTest_cache : public CCPRTest {
     void customizeOptions(GrMockOptions*, GrContextOptions* ctxOptions) override {
         ctxOptions->fAllowPathMaskCaching = true;
     }
 
-    static constexpr int kPathSize = 4;
+    void onRun(skiatest::Reporter* reporter, CCPRPathDrawer& ccpr) override {
+        static constexpr int kPathSize = 20;
+        SkRandom rand;
 
-    void onRun(skiatest::Reporter* reporter, CCPRPathDrawer& ccpr,
-               const RecordLastMockAtlasIDs& atlasIDRecorder) override {
+        SkPath paths[300];
+        int primes[11] = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31};
+        for (size_t i = 0; i < SK_ARRAY_COUNT(paths); ++i) {
+            int numPts = rand.nextRangeU(GrShape::kMaxKeyFromDataVerbCnt + 1,
+                                         GrShape::kMaxKeyFromDataVerbCnt * 2);
+            paths[i] = sk_tool_utils::make_star(SkRect::MakeIWH(kPathSize, kPathSize), numPts,
+                                                primes[rand.nextU() % SK_ARRAY_COUNT(primes)]);
+        }
+
         SkMatrix matrices[2] = {
             SkMatrix::MakeTrans(5, 5),
             SkMatrix::MakeTrans(kCanvasSize - kPathSize - 5, kCanvasSize - kPathSize - 5)
         };
-        matrices[0].preScale(kPathSize, kPathSize);
-        matrices[1].preScale(kPathSize, kPathSize);
 
-        int firstAtlasID = 0;
+        int firstAtlasID = -1;
 
-        for (int iterIdx = 0; iterIdx < 4*3*2; ++iterIdx) {
-            this->drawPathsAndFlush(ccpr, matrices, 2);
+        for (int iterIdx = 0; iterIdx < 10; ++iterIdx) {
+            static constexpr int kNumHitsBeforeStash = 2;
+            static const GrUniqueKey gInvalidUniqueKey;
+
+            // Draw all the paths then flush. Repeat until a new stash occurs.
+            const GrUniqueKey* stashedAtlasKey = &gInvalidUniqueKey;
+            for (int j = 0; j < kNumHitsBeforeStash; ++j) {
+                // Nothing should be stashed until its hit count reaches kNumHitsBeforeStash.
+                REPORTER_ASSERT(reporter, !stashedAtlasKey->isValid());
+
+                for (size_t i = 0; i < SK_ARRAY_COUNT(paths); ++i) {
+                    ccpr.drawPath(paths[i], matrices[i % 2]);
+                }
+                ccpr.flush();
+
+                stashedAtlasKey = &ccpr.ccpr()->testingOnly_getStashedAtlasKey();
+            }
+
+            // Figure out the mock backend ID of the atlas texture stashed away by CCPR.
+            GrMockTextureInfo stashedAtlasInfo;
+            stashedAtlasInfo.fID = -1;
+            if (stashedAtlasKey->isValid()) {
+                GrResourceProvider* rp = ccpr.ctx()->contextPriv().resourceProvider();
+                sk_sp<GrSurface> stashedAtlas = rp->findByUniqueKey<GrSurface>(*stashedAtlasKey);
+                REPORTER_ASSERT(reporter, stashedAtlas);
+                if (stashedAtlas) {
+                    const auto& backendTexture = stashedAtlas->asTexture()->getBackendTexture();
+                    backendTexture.getMockTextureInfo(&stashedAtlasInfo);
+                }
+            }
 
             if (0 == iterIdx) {
                 // First iteration: just note the ID of the stashed atlas and continue.
-                firstAtlasID = atlasIDRecorder.lastRenderedAtlasID();
-                REPORTER_ASSERT(reporter, 0 != firstAtlasID);
+                REPORTER_ASSERT(reporter, stashedAtlasKey->isValid());
+                firstAtlasID = stashedAtlasInfo.fID;
                 continue;
             }
 
-            int testIdx = (iterIdx/2) % 3;
-            int repetitionIdx = iterIdx % 2;
-            switch (testIdx) {
-                case 0:
-                    if (0 == repetitionIdx) {
-                        // This is the big test. New paths were drawn twice last round. On hit 2
-                        // (last time), 'firstAtlasID' was cached as a 16-bit atlas. Now, on hit 3,
-                        // these paths should be copied out of 'firstAtlasID', and into an A8 atlas.
-                        // THEN: we should recycle 'firstAtlasID' and reuse that same texture to
-                        // render the new masks.
-                        REPORTER_ASSERT(reporter, 0 != atlasIDRecorder.lastCopyAtlasID());
-                        REPORTER_ASSERT(reporter,
-                                        atlasIDRecorder.lastRenderedAtlasID() == firstAtlasID);
-                    } else {
-                        REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastCopyAtlasID());
-                        // This is hit 2 for the new masks. Next time they will be copied to an A8
-                        // atlas.
-                        REPORTER_ASSERT(reporter,
-                                        atlasIDRecorder.lastRenderedAtlasID() == firstAtlasID);
-                    }
-
-                    if (1 == repetitionIdx) {
-                        // Integer translates: all path masks stay valid.
-                        matrices[0].preTranslate(-1, -1);
-                        matrices[1].preTranslate(1, 1);
-                    }
-                    break;
-
+            switch (iterIdx % 3) {
                 case 1:
-                    if (0 == repetitionIdx) {
-                        // New paths were drawn twice last round. The third hit (now) they should be
-                        // copied to an A8 atlas.
-                        REPORTER_ASSERT(reporter, 0 != atlasIDRecorder.lastCopyAtlasID());
-                    } else {
-                        REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastCopyAtlasID());
-                    }
-
                     // This draw should have gotten 100% cache hits; we only did integer translates
-                    // last time (or none if it was the first flush). Therefore, everything should
-                    // have been cached.
-                    REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastRenderedAtlasID());
+                    // last time (or none if it was the first flush). Therefore, no atlas should
+                    // have been stashed away.
+                    REPORTER_ASSERT(reporter, !stashedAtlasKey->isValid());
 
-                    if (1 == repetitionIdx) {
-                        // Invalidate even path masks.
-                        matrices[0].preTranslate(1.6f, 1.4f);
-                    }
+                    // Invalidate even path masks.
+                    matrices[0].preTranslate(1.6f, 1.4f);
                     break;
 
                 case 2:
-                    // No new masks to copy from last time; it had 100% cache hits.
-                    REPORTER_ASSERT(reporter, 0 == atlasIDRecorder.lastCopyAtlasID());
+                    // Even path masks were invalidated last iteration by a subpixel translate. They
+                    // should have been re-rendered this time and stashed away in the CCPR atlas.
+                    REPORTER_ASSERT(reporter, stashedAtlasKey->isValid());
 
-                    // Even path masks were invalidated last iteration by a subpixel translate.
-                    // They should have been re-rendered this time in the original 'firstAtlasID'
-                    // texture.
-                    REPORTER_ASSERT(reporter,
-                                    atlasIDRecorder.lastRenderedAtlasID() == firstAtlasID);
+                    // 'firstAtlasID' should be kept as a scratch texture in the resource cache.
+                    REPORTER_ASSERT(reporter, stashedAtlasInfo.fID == firstAtlasID);
 
-                    if (1 == repetitionIdx) {
-                        // Invalidate odd path masks.
-                        matrices[1].preTranslate(-1.4f, -1.6f);
-                    }
+                    // Invalidate odd path masks.
+                    matrices[1].preTranslate(-1.4f, -1.6f);
+                    break;
+
+                case 0:
+                    // Odd path masks were invalidated last iteration by a subpixel translate. They
+                    // should have been re-rendered this time and stashed away in the CCPR atlas.
+                    REPORTER_ASSERT(reporter, stashedAtlasKey->isValid());
+
+                    // 'firstAtlasID' is the same texture that got stashed away last time (assuming
+                    // no assertion failures). So if it also got stashed this time, it means we
+                    // first copied the even paths out of it, then recycled the exact same texture
+                    // to render the odd paths. This is the expected behavior.
+                    REPORTER_ASSERT(reporter, stashedAtlasInfo.fID == firstAtlasID);
+
+                    // Integer translates: all path masks stay valid.
+                    matrices[0].preTranslate(-1, -1);
+                    matrices[1].preTranslate(1, 1);
                     break;
             }
         }
     }
 };
-DEF_CCPR_TEST(CCPR_cache_partialInvalidate)
+DEF_CCPR_TEST(GrCCPRTest_cache)
 
-class CCPR_unrefPerOpListPathsBeforeOps : public CCPRTest {
+class GrCCPRTest_unrefPerOpListPathsBeforeOps : public CCPRTest {
     void onRun(skiatest::Reporter* reporter, CCPRPathDrawer& ccpr) override {
         REPORTER_ASSERT(reporter, SkPathPriv::TestingOnly_unique(fPath));
         for (int i = 0; i < 10000; ++i) {
@@ -796,7 +413,7 @@
         REPORTER_ASSERT(reporter, SkPathPriv::TestingOnly_unique(fPath));
     }
 };
-DEF_CCPR_TEST(CCPR_unrefPerOpListPathsBeforeOps)
+DEF_CCPR_TEST(GrCCPRTest_unrefPerOpListPathsBeforeOps)
 
 class CCPRRenderingTest {
 public:
@@ -804,7 +421,7 @@
         if (!ctx->contextPriv().drawingManager()->getCoverageCountingPathRenderer()) {
             return; // CCPR is not enabled on this GPU.
         }
-        CCPRPathDrawer ccpr(sk_ref_sp(ctx), reporter, doStroke);
+        CCPRPathDrawer ccpr(ctx, reporter, doStroke);
         if (!ccpr.valid()) {
             return;
         }
@@ -824,7 +441,7 @@
         test.run(reporter, ctxInfo.grContext(), true); \
     }
 
-class CCPR_busyPath : public CCPRRenderingTest {
+class GrCCPRTest_busyPath : public CCPRRenderingTest {
     void onRun(skiatest::Reporter* reporter, const CCPRPathDrawer& ccpr) const override {
         static constexpr int kNumBusyVerbs = 1 << 17;
         ccpr.clear();
@@ -842,4 +459,4 @@
                       // your platform's GrGLCaps.
     }
 };
-DEF_CCPR_RENDERING_TEST(CCPR_busyPath)
+DEF_CCPR_RENDERING_TEST(GrCCPRTest_busyPath)
diff --git a/tools/gpu/GrTest.cpp b/tools/gpu/GrTest.cpp
index 43b9b38..89dbaa5 100644
--- a/tools/gpu/GrTest.cpp
+++ b/tools/gpu/GrTest.cpp
@@ -27,7 +27,6 @@
 #include "SkString.h"
 #include "SkTo.h"
 #include "ccpr/GrCoverageCountingPathRenderer.h"
-#include "ccpr/GrCCPathCache.h"
 #include "ops/GrMeshDrawOp.h"
 #include "text/GrGlyphCache.h"
 #include "text/GrTextBlobCache.h"
@@ -279,55 +278,10 @@
     this->onDrawPath(args);
 }
 
-const GrCCPerFlushResources*
-GrCoverageCountingPathRenderer::testingOnly_getCurrentFlushResources() {
-    SkASSERT(fFlushing);
-    if (fFlushingPaths.empty()) {
-        return nullptr;
-    }
-    // All pending paths should share the same resources.
-    const GrCCPerFlushResources* resources = fFlushingPaths.front()->fFlushResources.get();
-#ifdef SK_DEBUG
-    for (const auto& flushingPaths : fFlushingPaths) {
-        SkASSERT(flushingPaths->fFlushResources.get() == resources);
-    }
-#endif
-    return resources;
+const GrUniqueKey& GrCoverageCountingPathRenderer::testingOnly_getStashedAtlasKey() const {
+    return fStashedAtlasKey;
 }
 
-const GrCCPathCache* GrCoverageCountingPathRenderer::testingOnly_getPathCache() const {
-    return fPathCache.get();
-}
-
-const GrTexture* GrCCPerFlushResources::testingOnly_frontCopyAtlasTexture() const {
-    if (fCopyAtlasStack.empty()) {
-        return nullptr;
-    }
-    const GrTextureProxy* proxy = fCopyAtlasStack.front().textureProxy();
-    return (proxy) ? proxy->peekTexture() : nullptr;
-}
-
-const GrTexture* GrCCPerFlushResources::testingOnly_frontRenderedAtlasTexture() const {
-    if (fRenderedAtlasStack.empty()) {
-        return nullptr;
-    }
-    const GrTextureProxy* proxy = fRenderedAtlasStack.front().textureProxy();
-    return (proxy) ? proxy->peekTexture() : nullptr;
-}
-
-const SkTHashTable<GrCCPathCache::HashNode, const GrCCPathCache::Key&>&
-GrCCPathCache::testingOnly_getHashTable() const {
-    return fHashTable;
-}
-
-const SkTInternalLList<GrCCPathCacheEntry>& GrCCPathCache::testingOnly_getLRU() const {
-    return fLRU;
-}
-
-int GrCCPathCacheEntry::testingOnly_peekOnFlushRefCnt() const { return fOnFlushRefCnt; }
-
-int GrCCCachedAtlas::testingOnly_peekOnFlushRefCnt() const { return fOnFlushRefCnt; }
-
 //////////////////////////////////////////////////////////////////////////////
 
 #define DRAW_OP_TEST_EXTERN(Op) \
diff --git a/tools/sk_tool_utils.cpp b/tools/sk_tool_utils.cpp
index a50dffc..374d00f 100644
--- a/tools/sk_tool_utils.cpp
+++ b/tools/sk_tool_utils.cpp
@@ -175,12 +175,11 @@
 }
 
 SkPath make_star(const SkRect& bounds, int numPts, int step) {
-    SkASSERT(numPts != step);
     SkPath path;
     path.setFillType(SkPath::kEvenOdd_FillType);
     path.moveTo(0,-1);
     for (int i = 1; i < numPts; ++i) {
-        int idx = i*step % numPts;
+        int idx = i*step;
         SkScalar theta = idx * 2*SK_ScalarPI/numPts + SK_ScalarPI/2;
         SkScalar x = SkScalarCos(theta);
         SkScalar y = -SkScalarSin(theta);