ccpr: Rework the path cache to support sporadic flushing

Removes the notion of a stashed atlas that we store from the previous
flush. Now we just cache every atlas we ever render. Cached atlases
can either be 16-bit or 8-bit.

The "reuse" and "animation" cases should both behave exactly the same
as before: Where before we would copy from the stashed atlas to 8-bit
atlases, we now copy from a cached 16-bit atlas and then invalidate
it. Where before we would recycle the stashed atlas's backing texture
object, we now recycle this same texture object from an invalidated
16-bit cached atlas.

The main difference is that cases like tiled rendering now work. If
you draw your whole scene in one flush, you still get one big 16-bit
cached atlas, just like the "stashed atlas" implementation. But if you
draw your scene in tiles, you now get lots of little cached 16-bit
atlases, which can be reused and eventually copied to 8-bit atlases.

Bug: skia:8462
Change-Id: Ibae65febb948230aaaf1f1361eef9c8f06ebef18
Reviewed-on: https://skia-review.googlesource.com/c/179991
Commit-Queue: Chris Dalton <csmartdalton@google.com>
Reviewed-by: Robert Phillips <robertphillips@google.com>
diff --git a/src/gpu/GrContext.cpp b/src/gpu/GrContext.cpp
index 0d42653..14a65fa 100644
--- a/src/gpu/GrContext.cpp
+++ b/src/gpu/GrContext.cpp
@@ -317,7 +317,7 @@
     fResourceCache->purgeResourcesNotUsedSince(purgeTime);
 
     if (auto ccpr = fDrawingManager->getCoverageCountingPathRenderer()) {
-        ccpr->purgeCacheEntriesOlderThan(purgeTime);
+        ccpr->purgeCacheEntriesOlderThan(fProxyProvider, purgeTime);
     }
 
     fTextBlobCache->purgeStaleBlobs();
diff --git a/src/gpu/GrPathRendererChain.cpp b/src/gpu/GrPathRendererChain.cpp
index a6ad513..6e28c5d 100644
--- a/src/gpu/GrPathRendererChain.cpp
+++ b/src/gpu/GrPathRendererChain.cpp
@@ -40,7 +40,8 @@
     if (options.fGpuPathRenderers & GpuPathRenderers::kCoverageCounting) {
         using AllowCaching = GrCoverageCountingPathRenderer::AllowCaching;
         if (auto ccpr = GrCoverageCountingPathRenderer::CreateIfSupported(
-                                caps, AllowCaching(options.fAllowPathMaskCaching))) {
+                                caps, AllowCaching(options.fAllowPathMaskCaching),
+                                context->uniqueID())) {
             fCoverageCountingPathRenderer = ccpr.get();
             context->contextPriv().addOnFlushCallbackObject(fCoverageCountingPathRenderer);
             fChain.push_back(std::move(ccpr));
diff --git a/src/gpu/ccpr/GrCCAtlas.cpp b/src/gpu/ccpr/GrCCAtlas.cpp
index a41eb38..4d147fd 100644
--- a/src/gpu/ccpr/GrCCAtlas.cpp
+++ b/src/gpu/ccpr/GrCCAtlas.cpp
@@ -16,6 +16,7 @@
 #include "GrTextureProxy.h"
 #include "SkMakeUnique.h"
 #include "SkMathPriv.h"
+#include "ccpr/GrCCPathCache.h"
 #include <atomic>
 
 class GrCCAtlas::Node {
@@ -47,8 +48,9 @@
     GrRectanizerSkyline fRectanizer;
 };
 
-GrCCAtlas::GrCCAtlas(GrPixelConfig pixelConfig, const Specs& specs, const GrCaps& caps)
-        : fMaxTextureSize(SkTMax(SkTMax(specs.fMinHeight, specs.fMinWidth),
+GrCCAtlas::GrCCAtlas(CoverageType coverageType, const Specs& specs, const GrCaps& caps)
+        : fCoverageType(coverageType)
+        , fMaxTextureSize(SkTMax(SkTMax(specs.fMinHeight, specs.fMinWidth),
                                  specs.fMaxPreferredTextureSize)) {
     // Caller should have cropped any paths to the destination render target instead of asking for
     // an atlas larger than maxRenderTargetSize.
@@ -73,12 +75,12 @@
 
     fTopNode = skstd::make_unique<Node>(nullptr, 0, 0, fWidth, fHeight);
 
-    // TODO: don't have this rely on the GrPixelConfig
-    GrSRGBEncoded srgbEncoded = GrSRGBEncoded::kNo;
-    GrColorType colorType = GrPixelConfigToColorTypeAndEncoding(pixelConfig, &srgbEncoded);
-
+    GrColorType colorType = (CoverageType::kFP16_CoverageCount == fCoverageType)
+            ? GrColorType::kAlpha_F16 : GrColorType::kAlpha_8;
     const GrBackendFormat format =
-            caps.getBackendFormatFromGrColorType(colorType, srgbEncoded);
+            caps.getBackendFormatFromGrColorType(colorType, GrSRGBEncoded::kNo);
+    GrPixelConfig pixelConfig = (CoverageType::kFP16_CoverageCount == fCoverageType)
+            ? kAlpha_half_GrPixelConfig : kAlpha_8_GrPixelConfig;
 
     fTextureProxy = GrProxyProvider::MakeFullyLazyProxy(
             [this, pixelConfig](GrResourceProvider* resourceProvider) {
@@ -159,27 +161,23 @@
     return nextID++;
 }
 
-const GrUniqueKey& GrCCAtlas::getOrAssignUniqueKey(GrOnFlushResourceProvider* onFlushRP) {
-    static const GrUniqueKey::Domain kAtlasDomain = GrUniqueKey::GenerateDomain();
+sk_sp<GrCCCachedAtlas> GrCCAtlas::refOrMakeCachedAtlas(GrOnFlushResourceProvider* onFlushRP) {
+    if (!fCachedAtlas) {
+        static const GrUniqueKey::Domain kAtlasDomain = GrUniqueKey::GenerateDomain();
 
-    if (!fUniqueKey.isValid()) {
-        GrUniqueKey::Builder builder(&fUniqueKey, kAtlasDomain, 1, "CCPR Atlas");
+        GrUniqueKey atlasUniqueKey;
+        GrUniqueKey::Builder builder(&atlasUniqueKey, kAtlasDomain, 1, "CCPR Atlas");
         builder[0] = next_atlas_unique_id();
         builder.finish();
 
-        if (fTextureProxy->isInstantiated()) {
-            onFlushRP->assignUniqueKeyToProxy(fUniqueKey, fTextureProxy.get());
-        }
-    }
-    return fUniqueKey;
-}
+        onFlushRP->assignUniqueKeyToProxy(atlasUniqueKey, fTextureProxy.get());
 
-sk_sp<GrCCAtlas::CachedAtlasInfo> GrCCAtlas::refOrMakeCachedAtlasInfo(uint32_t contextUniqueID) {
-    if (!fCachedAtlasInfo) {
-        fCachedAtlasInfo = sk_make_sp<CachedAtlasInfo>(contextUniqueID);
+        fCachedAtlas = sk_make_sp<GrCCCachedAtlas>(fCoverageType, atlasUniqueKey, fTextureProxy);
     }
-    SkASSERT(fCachedAtlasInfo->fContextUniqueID == contextUniqueID);
-    return fCachedAtlasInfo;
+
+    SkASSERT(fCachedAtlas->coverageType() == fCoverageType);
+    SkASSERT(fCachedAtlas->getOnFlushProxy() == fTextureProxy.get());
+    return fCachedAtlas;
 }
 
 sk_sp<GrRenderTargetContext> GrCCAtlas::makeRenderTargetContext(
@@ -205,10 +203,6 @@
         return nullptr;
     }
 
-    if (fUniqueKey.isValid()) {
-        onFlushRP->assignUniqueKeyToProxy(fUniqueKey, fTextureProxy.get());
-    }
-
     SkIRect clearRect = SkIRect::MakeSize(fDrawBounds);
     rtc->clear(&clearRect, SK_PMColor4fTRANSPARENT,
                GrRenderTargetContext::CanClearFullscreen::kYes);
@@ -220,7 +214,7 @@
     if (fAtlases.empty() || !fAtlases.back().addRect(devIBounds, devToAtlasOffset)) {
         // The retired atlas is out of room and can't grow any bigger.
         retiredAtlas = !fAtlases.empty() ? &fAtlases.back() : nullptr;
-        fAtlases.emplace_back(fPixelConfig, fSpecs, *fCaps);
+        fAtlases.emplace_back(fCoverageType, fSpecs, *fCaps);
         SkASSERT(devIBounds.width() <= fSpecs.fMinWidth);
         SkASSERT(devIBounds.height() <= fSpecs.fMinHeight);
         SkAssertResult(fAtlases.back().addRect(devIBounds, devToAtlasOffset));
diff --git a/src/gpu/ccpr/GrCCAtlas.h b/src/gpu/ccpr/GrCCAtlas.h
index 4a762bc..03eed8c 100644
--- a/src/gpu/ccpr/GrCCAtlas.h
+++ b/src/gpu/ccpr/GrCCAtlas.h
@@ -15,6 +15,7 @@
 #include "SkRefCnt.h"
 #include "SkSize.h"
 
+class GrCCCachedAtlas;
 class GrOnFlushResourceProvider;
 class GrRenderTargetContext;
 class GrTextureProxy;
@@ -45,7 +46,12 @@
         void accountForSpace(int width, int height);
     };
 
-    GrCCAtlas(GrPixelConfig, const Specs&, const GrCaps&);
+    enum class CoverageType : bool {
+        kFP16_CoverageCount,
+        kA8_LiteralCoverage
+    };
+
+    GrCCAtlas(CoverageType, const Specs&, const GrCaps&);
     ~GrCCAtlas();
 
     GrTextureProxy* textureProxy() const { return fTextureProxy.get(); }
@@ -64,23 +70,7 @@
     void setStrokeBatchID(int id);
     int getStrokeBatchID() const { return fStrokeBatchID; }
 
-    // Manages a unique resource cache key that gets assigned to the atlas texture. The unique key
-    // does not get assigned to the texture proxy until it is instantiated.
-    const GrUniqueKey& getOrAssignUniqueKey(GrOnFlushResourceProvider*);
-    const GrUniqueKey& uniqueKey() const { return fUniqueKey; }
-
-    // An object for simple bookkeeping on the atlas texture once it has a unique key. In practice,
-    // we use it to track the percentage of the original atlas pixels that could still ever
-    // potentially be reused (i.e., those which still represent an extant path). When the percentage
-    // of useful pixels drops below 50%, the entire texture is purged from the resource cache.
-    struct CachedAtlasInfo : public GrNonAtomicRef<CachedAtlasInfo> {
-        CachedAtlasInfo(uint32_t contextUniqueID) : fContextUniqueID(contextUniqueID) {}
-        const uint32_t fContextUniqueID;
-        int fNumPathPixels = 0;
-        int fNumInvalidatedPathPixels = 0;
-        bool fIsPurgedFromResourceCache = false;
-    };
-    sk_sp<CachedAtlasInfo> refOrMakeCachedAtlasInfo(uint32_t contextUniqueID);
+    sk_sp<GrCCCachedAtlas> refOrMakeCachedAtlas(GrOnFlushResourceProvider*);
 
     // Instantiates our texture proxy for the atlas and returns a pre-cleared GrRenderTargetContext
     // that the caller may use to render the content. After this call, it is no longer valid to call
@@ -97,6 +87,7 @@
 
     bool internalPlaceRect(int w, int h, SkIPoint16* loc);
 
+    const CoverageType fCoverageType;
     const int fMaxTextureSize;
     int fWidth, fHeight;
     std::unique_ptr<Node> fTopNode;
@@ -105,11 +96,7 @@
     int fFillBatchID;
     int fStrokeBatchID;
 
-    // Not every atlas will have a unique key -- a mainline CCPR one won't if we don't stash any
-    // paths, and only the first atlas in the stack is eligible to be stashed.
-    GrUniqueKey fUniqueKey;
-
-    sk_sp<CachedAtlasInfo> fCachedAtlasInfo;
+    sk_sp<GrCCCachedAtlas> fCachedAtlas;
     sk_sp<GrTextureProxy> fTextureProxy;
     sk_sp<GrTexture> fBackingTexture;
 };
@@ -120,8 +107,10 @@
  */
 class GrCCAtlasStack {
 public:
-    GrCCAtlasStack(GrPixelConfig pixelConfig, const GrCCAtlas::Specs& specs, const GrCaps* caps)
-            : fPixelConfig(pixelConfig), fSpecs(specs), fCaps(caps) {}
+    using CoverageType = GrCCAtlas::CoverageType;
+
+    GrCCAtlasStack(CoverageType coverageType, const GrCCAtlas::Specs& specs, const GrCaps* caps)
+            : fCoverageType(coverageType), fSpecs(specs), fCaps(caps) {}
 
     bool empty() const { return fAtlases.empty(); }
     const GrCCAtlas& front() const { SkASSERT(!this->empty()); return fAtlases.front(); }
@@ -147,7 +136,7 @@
     GrCCAtlas* addRect(const SkIRect& devIBounds, SkIVector* devToAtlasOffset);
 
 private:
-    const GrPixelConfig fPixelConfig;
+    const CoverageType fCoverageType;
     const GrCCAtlas::Specs fSpecs;
     const GrCaps* const fCaps;
     GrSTAllocator<4, GrCCAtlas> fAtlases;
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.cpp b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
index c1384fe..ba16f58 100644
--- a/src/gpu/ccpr/GrCCDrawPathsOp.cpp
+++ b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
@@ -6,6 +6,7 @@
  */
 
 #include "GrCCDrawPathsOp.h"
+
 #include "GrContext.h"
 #include "GrContextPriv.h"
 #include "GrMemoryPool.h"
@@ -157,13 +158,6 @@
 #endif
 }
 
-GrCCDrawPathsOp::SingleDraw::~SingleDraw() {
-    if (fCacheEntry) {
-        // All currFlushAtlas references must be reset back to null before the flush is finished.
-        fCacheEntry->setCurrFlushAtlas(nullptr);
-    }
-}
-
 GrDrawOp::RequiresDstTexture GrCCDrawPathsOp::finalize(const GrCaps& caps,
                                                        const GrAppliedClip* clip) {
     SkASSERT(1 == fNumDraws);  // There should only be one single path draw in this Op right now.
@@ -233,10 +227,10 @@
 
 void GrCCDrawPathsOp::accountForOwnPaths(GrCCPathCache* pathCache,
                                          GrOnFlushResourceProvider* onFlushRP,
-                                         const GrUniqueKey& stashedAtlasKey,
                                          GrCCPerFlushResourceSpecs* specs) {
     using CreateIfAbsent = GrCCPathCache::CreateIfAbsent;
     using MaskTransform = GrCCPathCache::MaskTransform;
+    using CoverageType = GrCCAtlas::CoverageType;
 
     for (SingleDraw& draw : fDraws) {
         SkPath path;
@@ -247,41 +241,32 @@
         if (pathCache) {
             MaskTransform m(draw.fMatrix, &draw.fCachedMaskShift);
             bool canStashPathMask = draw.fMaskVisibility >= Visibility::kMostlyComplete;
-            draw.fCacheEntry = pathCache->find(draw.fShape, m, CreateIfAbsent(canStashPathMask));
+            draw.fCacheEntry =
+                    pathCache->find(onFlushRP, draw.fShape, m, CreateIfAbsent(canStashPathMask));
         }
 
-        if (auto cacheEntry = draw.fCacheEntry.get()) {
-            SkASSERT(!cacheEntry->currFlushAtlas());  // Shouldn't be set until setupResources().
-
-            if (cacheEntry->atlasKey().isValid()) {
-                // Does the path already exist in a cached atlas?
-                if (cacheEntry->hasCachedAtlas() &&
-                    (draw.fCachedAtlasProxy = onFlushRP->findOrCreateProxyByUniqueKey(
-                                                     cacheEntry->atlasKey(),
-                                                     GrCCAtlas::kTextureOrigin))) {
+        if (draw.fCacheEntry) {
+            if (const GrCCCachedAtlas* cachedAtlas = draw.fCacheEntry->cachedAtlas()) {
+                SkASSERT(cachedAtlas->getOnFlushProxy());
+                if (CoverageType::kA8_LiteralCoverage == cachedAtlas->coverageType()) {
                     ++specs->fNumCachedPaths;
-                    continue;
-                }
-
-                // Does the path exist in the atlas that we stashed away from last flush? If so we
-                // can copy it into a new 8-bit atlas and keep it in the resource cache.
-                if (stashedAtlasKey.isValid() && stashedAtlasKey == cacheEntry->atlasKey()) {
-                    SkASSERT(!cacheEntry->hasCachedAtlas());
+                } else {
+                    // Suggest that this path be copied to a literal coverage atlas, to save memory.
+                    // (The client may decline this copy via DoCopiesToA8Coverage::kNo.)
                     int idx = (draw.fShape.style().strokeRec().isFillStyle())
                             ? GrCCPerFlushResourceSpecs::kFillIdx
                             : GrCCPerFlushResourceSpecs::kStrokeIdx;
                     ++specs->fNumCopiedPaths[idx];
                     specs->fCopyPathStats[idx].statPath(path);
-                    specs->fCopyAtlasSpecs.accountForSpace(cacheEntry->width(),
-                                                           cacheEntry->height());
-                    continue;
+                    specs->fCopyAtlasSpecs.accountForSpace(
+                            draw.fCacheEntry->width(), draw.fCacheEntry->height());
+                    draw.fDoCopyToA8Coverage = true;
                 }
-
-                // Whatever atlas the path used to reside in, it no longer exists.
-                cacheEntry->resetAtlasKeyAndInfo();
+                continue;
             }
 
-            if (Visibility::kMostlyComplete == draw.fMaskVisibility && cacheEntry->hitCount() > 1) {
+            if (Visibility::kMostlyComplete == draw.fMaskVisibility &&
+                    draw.fCacheEntry->hitCount() > 1) {
                 int shapeSize = SkTMax(draw.fShapeConservativeIBounds.height(),
                                        draw.fShapeConservativeIBounds.width());
                 if (shapeSize <= onFlushRP->caps()->maxRenderTargetSize()) {
@@ -303,8 +288,9 @@
     }
 }
 
-void GrCCDrawPathsOp::setupResources(GrOnFlushResourceProvider* onFlushRP,
-                                     GrCCPerFlushResources* resources, DoCopiesToCache doCopies) {
+void GrCCDrawPathsOp::setupResources(
+        GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP,
+        GrCCPerFlushResources* resources, DoCopiesToA8Coverage doCopies) {
     using DoEvenOddFill = GrCCPathProcessor::DoEvenOddFill;
     SkASSERT(fNumDraws > 0);
     SkASSERT(-1 == fBaseInstance);
@@ -321,51 +307,29 @@
 
         if (auto cacheEntry = draw.fCacheEntry.get()) {
             // Does the path already exist in a cached atlas texture?
-            if (auto proxy = draw.fCachedAtlasProxy.get()) {
-                SkASSERT(!cacheEntry->currFlushAtlas());
-                this->recordInstance(proxy, resources->nextPathInstanceIdx());
+            if (cacheEntry->cachedAtlas()) {
+                SkASSERT(cacheEntry->cachedAtlas()->getOnFlushProxy());
+                if (DoCopiesToA8Coverage::kYes == doCopies && draw.fDoCopyToA8Coverage) {
+                    resources->upgradeEntryToLiteralCoverageAtlas(pathCache, onFlushRP, cacheEntry,
+                                                                  doEvenOddFill);
+                    SkASSERT(cacheEntry->cachedAtlas());
+                    SkASSERT(GrCCAtlas::CoverageType::kA8_LiteralCoverage
+                                     == cacheEntry->cachedAtlas()->coverageType());
+                    SkASSERT(cacheEntry->cachedAtlas()->getOnFlushProxy());
+                }
+                this->recordInstance(cacheEntry->cachedAtlas()->getOnFlushProxy(),
+                                     resources->nextPathInstanceIdx());
                 // TODO4F: Preserve float colors
                 resources->appendDrawPathInstance().set(*cacheEntry, draw.fCachedMaskShift,
                                                         draw.fColor.toBytes_RGBA());
                 continue;
             }
-
-            // Have we already encountered this path during the flush? (i.e. was the same SkPath
-            // drawn more than once during the same flush, with a compatible matrix?)
-            if (auto atlas = cacheEntry->currFlushAtlas()) {
-                this->recordInstance(atlas->textureProxy(), resources->nextPathInstanceIdx());
-                // TODO4F: Preserve float colors
-                resources->appendDrawPathInstance().set(
-                        *cacheEntry, draw.fCachedMaskShift, draw.fColor.toBytes_RGBA(),
-                        cacheEntry->hasCachedAtlas() ? DoEvenOddFill::kNo : doEvenOddFill);
-                continue;
-            }
-
-            // If the cache entry still has a valid atlas key at this point, it means the path
-            // exists in the atlas that we stashed away from last flush. Copy it into a permanent
-            // 8-bit atlas in the resource cache.
-            if (DoCopiesToCache::kYes == doCopies && cacheEntry->atlasKey().isValid()) {
-                SkIVector newOffset;
-                GrCCAtlas* atlas =
-                        resources->copyPathToCachedAtlas(*cacheEntry, doEvenOddFill, &newOffset);
-                cacheEntry->updateToCachedAtlas(
-                        atlas->getOrAssignUniqueKey(onFlushRP), newOffset,
-                        atlas->refOrMakeCachedAtlasInfo(onFlushRP->contextUniqueID()));
-                this->recordInstance(atlas->textureProxy(), resources->nextPathInstanceIdx());
-                // TODO4F: Preserve float colors
-                resources->appendDrawPathInstance().set(*cacheEntry, draw.fCachedMaskShift,
-                                                        draw.fColor.toBytes_RGBA());
-                // Remember this atlas in case we encounter the path again during the same flush.
-                cacheEntry->setCurrFlushAtlas(atlas);
-                continue;
-            }
         }
 
-        // Render the raw path into a coverage count atlas. renderPathInAtlas() gives us two tight
+        // Render the raw path into a coverage count atlas. renderShapeInAtlas() gives us two tight
         // bounding boxes: One in device space, as well as a second one rotated an additional 45
         // degrees. The path vertex shader uses these two bounding boxes to generate an octagon that
         // circumscribes the path.
-        SkASSERT(!draw.fCachedAtlasProxy);
         SkRect devBounds, devBounds45;
         SkIRect devIBounds;
         SkIVector devToAtlasOffset;
@@ -380,7 +344,7 @@
             // If we have a spot in the path cache, try to make a note of where this mask is so we
             // can reuse it in the future.
             if (auto cacheEntry = draw.fCacheEntry.get()) {
-                SkASSERT(!cacheEntry->hasCachedAtlas());
+                SkASSERT(!cacheEntry->cachedAtlas());
 
                 if (Visibility::kComplete != draw.fMaskVisibility || cacheEntry->hitCount() <= 1) {
                     // Don't cache a path mask unless it's completely visible with a hit count > 1.
@@ -390,19 +354,9 @@
                     continue;
                 }
 
-                if (resources->nextAtlasToStash() != atlas) {
-                    // This mask does not belong to the atlas that will be stashed for next flush.
-                    continue;
-                }
-
-                const GrUniqueKey& atlasKey =
-                        resources->nextAtlasToStash()->getOrAssignUniqueKey(onFlushRP);
-                cacheEntry->initAsStashedAtlas(atlasKey, devToAtlasOffset, devBounds, devBounds45,
-                                               devIBounds, draw.fCachedMaskShift);
-                // Remember this atlas in case we encounter the path again during the same flush.
-                cacheEntry->setCurrFlushAtlas(atlas);
+                cacheEntry->setCoverageCountAtlas(onFlushRP, atlas, devToAtlasOffset, devBounds,
+                                                  devBounds45, devIBounds, draw.fCachedMaskShift);
             }
-            continue;
         }
     }
 
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.h b/src/gpu/ccpr/GrCCDrawPathsOp.h
index 9eacbb0..76cde50 100644
--- a/src/gpu/ccpr/GrCCDrawPathsOp.h
+++ b/src/gpu/ccpr/GrCCDrawPathsOp.h
@@ -11,14 +11,13 @@
 #include "GrShape.h"
 #include "SkTInternalLList.h"
 #include "ccpr/GrCCSTLList.h"
+#include "ccpr/GrCCPathCache.h"
 #include "ops/GrDrawOp.h"
 
 struct GrCCPerFlushResourceSpecs;
 struct GrCCPerOpListPaths;
 class GrCCAtlas;
 class GrOnFlushResourceProvider;
-class GrCCPathCache;
-class GrCCPathCacheEntry;
 class GrCCPerFlushResources;
 
 /**
@@ -45,26 +44,24 @@
     void addToOwningPerOpListPaths(sk_sp<GrCCPerOpListPaths> owningPerOpListPaths);
 
     // Makes decisions about how to draw each path (cached, copied, rendered, etc.), and
-    // increments/fills out the corresponding GrCCPerFlushResourceSpecs. 'stashedAtlasKey', if
-    // valid, references the mainline coverage count atlas from the previous flush. Paths found in
-    // this atlas will be copied to more permanent atlases in the resource cache.
-    void accountForOwnPaths(GrCCPathCache*, GrOnFlushResourceProvider*,
-                            const GrUniqueKey& stashedAtlasKey, GrCCPerFlushResourceSpecs*);
+    // increments/fills out the corresponding GrCCPerFlushResourceSpecs.
+    void accountForOwnPaths(GrCCPathCache*, GrOnFlushResourceProvider*, GrCCPerFlushResourceSpecs*);
 
-    // Allows the caller to decide whether to copy paths out of the stashed atlas and into the
-    // resource cache, or to just re-render the paths from scratch. If there aren't many copies or
-    // the copies would only fill a small atlas, it's probably best to just re-render.
-    enum class DoCopiesToCache : bool {
+    // Allows the caller to decide whether to actually do the suggested copies from cached 16-bit
+    // coverage count atlases, and into 8-bit literal coverage atlases. Purely to save space.
+    enum class DoCopiesToA8Coverage : bool {
         kNo = false,
         kYes = true
     };
 
     // Allocates the GPU resources indicated by accountForOwnPaths(), in preparation for drawing. If
-    // DoCopiesToCache is kNo, the paths slated for copy will instead be re-rendered from scratch.
+    // DoCopiesToA8Coverage is kNo, the paths slated for copy will instead be left in their 16-bit
+    // coverage count atlases.
     //
-    // NOTE: If using DoCopiesToCache::kNo, it is the caller's responsibility to call
-    //       convertCopiesToRenders() on the GrCCPerFlushResourceSpecs.
-    void setupResources(GrOnFlushResourceProvider*, GrCCPerFlushResources*, DoCopiesToCache);
+    // NOTE: If using DoCopiesToA8Coverage::kNo, it is the caller's responsibility to have called
+    // cancelCopies() on the GrCCPerFlushResourceSpecs, prior to making this call.
+    void setupResources(GrCCPathCache*, GrOnFlushResourceProvider*, GrCCPerFlushResources*,
+                        DoCopiesToA8Coverage);
 
     void onExecute(GrOpFlushState*, const SkRect& chainBounds) override;
 
@@ -94,7 +91,6 @@
         SingleDraw(const SkMatrix&, const GrShape&, float strokeDevWidth,
                    const SkIRect& shapeConservativeIBounds, const SkIRect& maskDevIBounds,
                    Visibility maskVisibility, const SkPMColor4f&);
-        ~SingleDraw();
 
         SkMatrix fMatrix;
         GrShape fShape;
@@ -104,9 +100,9 @@
         Visibility fMaskVisibility;
         SkPMColor4f fColor;
 
-        sk_sp<GrCCPathCacheEntry> fCacheEntry;
-        sk_sp<GrTextureProxy> fCachedAtlasProxy;
+        GrCCPathCache::OnFlushEntryRef fCacheEntry;
         SkIVector fCachedMaskShift;
+        bool fDoCopyToA8Coverage = false;
 
         SingleDraw* fNext = nullptr;
     };
diff --git a/src/gpu/ccpr/GrCCPathCache.cpp b/src/gpu/ccpr/GrCCPathCache.cpp
index a5b9a10..7a49e3e 100644
--- a/src/gpu/ccpr/GrCCPathCache.cpp
+++ b/src/gpu/ccpr/GrCCPathCache.cpp
@@ -7,7 +7,8 @@
 
 #include "GrCCPathCache.h"
 
-#include "GrShape.h"
+#include "GrOnFlushResourceProvider.h"
+#include "GrProxyProvider.h"
 #include "SkNx.h"
 
 static constexpr int kMaxKeyDataCountU32 = 256;  // 1kB of uint32_t's.
@@ -84,66 +85,33 @@
     return reinterpret_cast<uint32_t*>(reinterpret_cast<char*>(this) + sizeof(Key));
 }
 
-inline bool GrCCPathCache::Key::operator==(const GrCCPathCache::Key& that) const {
-    return fDataSizeInBytes == that.fDataSizeInBytes &&
-           !memcmp(this->data(), that.data(), fDataSizeInBytes);
-}
-
 void GrCCPathCache::Key::onChange() {
     // Our key's corresponding path was invalidated. Post a thread-safe eviction message.
     SkMessageBus<sk_sp<Key>>::Post(sk_ref_sp(this));
 }
 
-inline const GrCCPathCache::Key& GrCCPathCache::HashNode::GetKey(
-        const GrCCPathCache::HashNode& node) {
-    return *node.entry()->fCacheKey;
-}
-
-inline uint32_t GrCCPathCache::HashNode::Hash(const Key& key) {
-    return GrResourceKeyHash(key.data(), key.dataSizeInBytes());
-}
-
-inline GrCCPathCache::HashNode::HashNode(GrCCPathCache* pathCache, sk_sp<Key> key,
-                                         const MaskTransform& m, const GrShape& shape)
-        : fPathCache(pathCache)
-        , fEntry(new GrCCPathCacheEntry(key, m)) {
-    SkASSERT(shape.hasUnstyledKey());
-    shape.addGenIDChangeListener(std::move(key));
-}
-
-inline GrCCPathCache::HashNode::~HashNode() {
-    this->willExitHashTable();
-}
-
-inline GrCCPathCache::HashNode& GrCCPathCache::HashNode::operator=(HashNode&& node) {
-    this->willExitHashTable();
-    fPathCache = node.fPathCache;
-    fEntry = std::move(node.fEntry);
-    SkASSERT(!node.fEntry);
-    return *this;
-}
-
-inline void GrCCPathCache::HashNode::willExitHashTable() {
-    if (!fEntry) {
-        return;  // We were moved.
-    }
-
-    SkASSERT(fPathCache);
-    SkASSERT(fPathCache->fLRU.isInList(fEntry.get()));
-
-    fEntry->fCacheKey->markShouldUnregisterFromPath();  // Unregister the path listener.
-    fPathCache->fLRU.remove(fEntry.get());
-}
-
-
-GrCCPathCache::GrCCPathCache()
-        : fInvalidatedKeysInbox(next_path_cache_id())
+GrCCPathCache::GrCCPathCache(uint32_t contextUniqueID)
+        : fContextUniqueID(contextUniqueID)
+        , fInvalidatedKeysInbox(next_path_cache_id())
         , fScratchKey(Key::Make(fInvalidatedKeysInbox.uniqueID(), kMaxKeyDataCountU32)) {
 }
 
 GrCCPathCache::~GrCCPathCache() {
-    fHashTable.reset();  // Must be cleared first; ~HashNode calls fLRU.remove() on us.
-    SkASSERT(fLRU.isEmpty());  // Ensure the hash table and LRU list were coherent.
+    while (!fLRU.isEmpty()) {
+        this->evict(*fLRU.tail()->fCacheKey, fLRU.tail());
+    }
+    SkASSERT(0 == fHashTable.count());  // Ensure the hash table and LRU list were coherent.
+
+    // Now take all the atlas textures we just invalidated and purge them from the GrResourceCache.
+    // We just purge via message bus since we don't have any access to the resource cache right now.
+    for (sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) {
+        SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(
+                GrUniqueKeyInvalidatedMessage(proxy->getUniqueKey(), fContextUniqueID));
+    }
+    for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) {
+        SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(
+                GrUniqueKeyInvalidatedMessage(key, fContextUniqueID));
+    }
 }
 
 namespace {
@@ -190,15 +158,16 @@
 
 }
 
-sk_sp<GrCCPathCacheEntry> GrCCPathCache::find(const GrShape& shape, const MaskTransform& m,
-                                              CreateIfAbsent createIfAbsent) {
+GrCCPathCache::OnFlushEntryRef GrCCPathCache::find(GrOnFlushResourceProvider* onFlushRP,
+                                                   const GrShape& shape, const MaskTransform& m,
+                                                   CreateIfAbsent createIfAbsent) {
     if (!shape.hasUnstyledKey()) {
-        return nullptr;
+        return OnFlushEntryRef();
     }
 
     WriteKeyHelper writeKeyHelper(shape);
     if (writeKeyHelper.allocCountU32() > kMaxKeyDataCountU32) {
-        return nullptr;
+        return OnFlushEntryRef();
     }
 
     SkASSERT(fScratchKey->unique());
@@ -209,14 +178,15 @@
     if (HashNode* node = fHashTable.find(*fScratchKey)) {
         entry = node->entry();
         SkASSERT(fLRU.isInList(entry));
+
         if (!fuzzy_equals(m, entry->fMaskTransform)) {
             // The path was reused with an incompatible matrix.
             if (CreateIfAbsent::kYes == createIfAbsent && entry->unique()) {
                 // This entry is unique: recycle it instead of deleting and malloc-ing a new one.
+                SkASSERT(0 == entry->fOnFlushRefCnt);  // Because we are unique.
                 entry->fMaskTransform = m;
                 entry->fHitCount = 0;
-                entry->invalidateAtlas();
-                SkASSERT(!entry->fCurrFlushAtlas);  // Should be null because 'entry' is unique.
+                entry->releaseCachedAtlas(this);
             } else {
                 this->evict(*fScratchKey);
                 entry = nullptr;
@@ -226,7 +196,7 @@
 
     if (!entry) {
         if (CreateIfAbsent::kNo == createIfAbsent) {
-            return nullptr;
+            return OnFlushEntryRef();
         }
         if (fHashTable.count() >= kMaxCacheCount) {
             SkDEBUGCODE(HashNode* node = fHashTable.find(*fLRU.tail()->fCacheKey));
@@ -250,20 +220,54 @@
     SkASSERT(node && node->entry() == entry);
     fLRU.addToHead(entry);
 
-    entry->fTimestamp = this->quickPerFlushTimestamp();
-    ++entry->fHitCount;
-    return sk_ref_sp(entry);
+    if (0 == entry->fOnFlushRefCnt) {
+        // Only update the time stamp and hit count if we haven't seen this entry yet during the
+        // current flush.
+        entry->fTimestamp = this->quickPerFlushTimestamp();
+        ++entry->fHitCount;
+
+        if (entry->fCachedAtlas) {
+            SkASSERT(SkToBool(entry->fCachedAtlas->peekOnFlushRefCnt())
+                             == SkToBool(entry->fCachedAtlas->getOnFlushProxy()));
+            if (!entry->fCachedAtlas->getOnFlushProxy()) {
+                entry->fCachedAtlas->setOnFlushProxy(
+                    onFlushRP->findOrCreateProxyByUniqueKey(entry->fCachedAtlas->textureKey(),
+                                                            GrCCAtlas::kTextureOrigin));
+            }
+            if (!entry->fCachedAtlas->getOnFlushProxy()) {
+                // Our atlas's backing texture got purged from the GrResourceCache. Release the
+                // cached atlas.
+                entry->releaseCachedAtlas(this);
+            }
+        }
+    }
+    SkASSERT(!entry->fCachedAtlas || entry->fCachedAtlas->getOnFlushProxy());
+    return OnFlushEntryRef::OnFlushRef(entry);
 }
 
-void GrCCPathCache::doPostFlushProcessing() {
-    this->purgeInvalidatedKeys();
+void GrCCPathCache::evict(const GrCCPathCache::Key& key, GrCCPathCacheEntry* entry) {
+    if (!entry) {
+        HashNode* node = fHashTable.find(key);
+        SkASSERT(node);
+        entry = node->entry();
+    }
+    SkASSERT(*entry->fCacheKey == key);
+    entry->fCacheKey->markShouldUnregisterFromPath();  // Unregister the path listener.
+    entry->releaseCachedAtlas(this);
+    fLRU.remove(entry);
+    fHashTable.remove(key);
+}
+
+void GrCCPathCache::doPreFlushProcessing() {
+    this->evictInvalidatedCacheKeys();
 
     // Mark the per-flush timestamp as needing to be updated with a newer clock reading.
     fPerFlushTimestamp = GrStdSteadyClock::time_point::min();
 }
 
-void GrCCPathCache::purgeEntriesOlderThan(const GrStdSteadyClock::time_point& purgeTime) {
-    this->purgeInvalidatedKeys();
+void GrCCPathCache::purgeEntriesOlderThan(GrProxyProvider* proxyProvider,
+                                          const GrStdSteadyClock::time_point& purgeTime) {
+    this->evictInvalidatedCacheKeys();
 
 #ifdef SK_DEBUG
     auto lastTimestamp = (fLRU.isEmpty())
@@ -271,7 +275,7 @@
             : fLRU.tail()->fTimestamp;
 #endif
 
-    // Drop every cache entry whose timestamp is older than purgeTime.
+    // Evict every entry from our local path cache whose timestamp is older than purgeTime.
     while (!fLRU.isEmpty() && fLRU.tail()->fTimestamp < purgeTime) {
 #ifdef SK_DEBUG
         // Verify that fLRU is sorted by timestamp.
@@ -281,9 +285,37 @@
 #endif
         this->evict(*fLRU.tail()->fCacheKey);
     }
+
+    // Now take all the atlas textures we just invalidated and purge them from the GrResourceCache.
+    this->purgeInvalidatedAtlasTextures(proxyProvider);
 }
 
-void GrCCPathCache::purgeInvalidatedKeys() {
+void GrCCPathCache::purgeInvalidatedAtlasTextures(GrOnFlushResourceProvider* onFlushRP) {
+    for (sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) {
+        onFlushRP->removeUniqueKeyFromProxy(proxy.get());
+    }
+    fInvalidatedProxies.reset();
+
+    for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) {
+        onFlushRP->processInvalidUniqueKey(key);
+    }
+    fInvalidatedProxyUniqueKeys.reset();
+}
+
+void GrCCPathCache::purgeInvalidatedAtlasTextures(GrProxyProvider* proxyProvider) {
+    for (sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) {
+        proxyProvider->removeUniqueKeyFromProxy(proxy.get());
+    }
+    fInvalidatedProxies.reset();
+
+    for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) {
+        proxyProvider->processInvalidUniqueKey(key, nullptr,
+                                               GrProxyProvider::InvalidateGPUResource::kYes);
+    }
+    fInvalidatedProxyUniqueKeys.reset();
+}
+
+void GrCCPathCache::evictInvalidatedCacheKeys() {
     SkTArray<sk_sp<Key>> invalidatedKeys;
     fInvalidatedKeysInbox.poll(&invalidatedKeys);
     for (const sk_sp<Key>& key : invalidatedKeys) {
@@ -294,17 +326,41 @@
     }
 }
 
+GrCCPathCache::OnFlushEntryRef
+GrCCPathCache::OnFlushEntryRef::OnFlushRef(GrCCPathCacheEntry* entry) {
+    entry->ref();
+    ++entry->fOnFlushRefCnt;
+    if (entry->fCachedAtlas) {
+        entry->fCachedAtlas->incrOnFlushRefCnt();
+    }
+    return OnFlushEntryRef(entry);
+}
 
-void GrCCPathCacheEntry::initAsStashedAtlas(const GrUniqueKey& atlasKey,
-                                            const SkIVector& atlasOffset, const SkRect& devBounds,
-                                            const SkRect& devBounds45, const SkIRect& devIBounds,
-                                            const SkIVector& maskShift) {
-    SkASSERT(atlasKey.isValid());
-    SkASSERT(!fCurrFlushAtlas);  // Otherwise we should reuse the atlas from last time.
+GrCCPathCache::OnFlushEntryRef::~OnFlushEntryRef() {
+    if (!fEntry) {
+        return;
+    }
+    --fEntry->fOnFlushRefCnt;
+    SkASSERT(fEntry->fOnFlushRefCnt >= 0);
+    if (fEntry->fCachedAtlas) {
+        fEntry->fCachedAtlas->decrOnFlushRefCnt();
+    }
+    fEntry->unref();
+}
 
-    fAtlasKey = atlasKey;
+
+void GrCCPathCacheEntry::setCoverageCountAtlas(
+        GrOnFlushResourceProvider* onFlushRP, GrCCAtlas* atlas, const SkIVector& atlasOffset,
+        const SkRect& devBounds, const SkRect& devBounds45, const SkIRect& devIBounds,
+        const SkIVector& maskShift) {
+    SkASSERT(fOnFlushRefCnt > 0);
+    SkASSERT(!fCachedAtlas);  // Otherwise we would need to call releaseCachedAtlas().
+
+    fCachedAtlas = atlas->refOrMakeCachedAtlas(onFlushRP);
+    fCachedAtlas->incrOnFlushRefCnt(fOnFlushRefCnt);
+    fCachedAtlas->addPathPixels(devIBounds.height() * devIBounds.width());
+
     fAtlasOffset = atlasOffset + maskShift;
-    SkASSERT(!fCachedAtlasInfo);  // Otherwise they should have reused the cached atlas instead.
 
     float dx = (float)maskShift.fX, dy = (float)maskShift.fY;
     fDevBounds = devBounds.makeOffset(-dx, -dy);
@@ -312,34 +368,65 @@
     fDevIBounds = devIBounds.makeOffset(-maskShift.fX, -maskShift.fY);
 }
 
-void GrCCPathCacheEntry::updateToCachedAtlas(const GrUniqueKey& atlasKey,
-                                             const SkIVector& newAtlasOffset,
-                                             sk_sp<GrCCAtlas::CachedAtlasInfo> info) {
-    SkASSERT(atlasKey.isValid());
-    SkASSERT(!fCurrFlushAtlas);  // Otherwise we should reuse the atlas from last time.
+GrCCPathCacheEntry::ReleaseAtlasResult GrCCPathCacheEntry::upgradeToLiteralCoverageAtlas(
+        GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP, GrCCAtlas* atlas,
+        const SkIVector& newAtlasOffset) {
+    SkASSERT(fOnFlushRefCnt > 0);
+    SkASSERT(fCachedAtlas);
+    SkASSERT(GrCCAtlas::CoverageType::kFP16_CoverageCount == fCachedAtlas->coverageType());
 
-    fAtlasKey = atlasKey;
+    ReleaseAtlasResult releaseAtlasResult = this->releaseCachedAtlas(pathCache);
+
+    fCachedAtlas = atlas->refOrMakeCachedAtlas(onFlushRP);
+    fCachedAtlas->incrOnFlushRefCnt(fOnFlushRefCnt);
+    fCachedAtlas->addPathPixels(this->height() * this->width());
+
     fAtlasOffset = newAtlasOffset;
-
-    SkASSERT(!fCachedAtlasInfo);  // Otherwise we need to invalidate our pixels in the old info.
-    fCachedAtlasInfo = std::move(info);
-    fCachedAtlasInfo->fNumPathPixels += this->height() * this->width();
+    return releaseAtlasResult;
 }
 
-void GrCCPathCacheEntry::invalidateAtlas() {
-    if (fCachedAtlasInfo) {
-        // Mark our own pixels invalid in the cached atlas texture.
-        fCachedAtlasInfo->fNumInvalidatedPathPixels += this->height() * this->width();
-        if (!fCachedAtlasInfo->fIsPurgedFromResourceCache &&
-            fCachedAtlasInfo->fNumInvalidatedPathPixels >= fCachedAtlasInfo->fNumPathPixels / 2) {
-            // Too many invalidated pixels: purge the atlas texture from the resource cache.
-            // The GrContext and CCPR path cache both share the same unique ID.
-            SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(
-                    GrUniqueKeyInvalidatedMessage(fAtlasKey, fCachedAtlasInfo->fContextUniqueID));
-            fCachedAtlasInfo->fIsPurgedFromResourceCache = true;
+GrCCPathCacheEntry::ReleaseAtlasResult GrCCPathCacheEntry::releaseCachedAtlas(
+        GrCCPathCache* pathCache) {
+    ReleaseAtlasResult result = ReleaseAtlasResult::kNone;
+    if (fCachedAtlas) {
+        result = fCachedAtlas->invalidatePathPixels(pathCache, this->height() * this->width());
+        if (fOnFlushRefCnt) {
+            SkASSERT(fOnFlushRefCnt > 0);
+            fCachedAtlas->decrOnFlushRefCnt(fOnFlushRefCnt);
         }
+        fCachedAtlas = nullptr;
     }
+    return result;
+}
 
-    fAtlasKey.reset();
-    fCachedAtlasInfo = nullptr;
+GrCCPathCacheEntry::ReleaseAtlasResult GrCCCachedAtlas::invalidatePathPixels(
+        GrCCPathCache* pathCache, int numPixels) {
+    // Mark the pixels invalid in the cached atlas texture.
+    fNumInvalidatedPathPixels += numPixels;
+    SkASSERT(fNumInvalidatedPathPixels <= fNumPathPixels);
+    if (!fIsInvalidatedFromResourceCache && fNumInvalidatedPathPixels >= fNumPathPixels / 2) {
+        // Too many invalidated pixels: purge the atlas texture from the resource cache.
+        if (fOnFlushProxy) {
+            // Don't clear (or std::move) fOnFlushProxy. Other path cache entries might still have a
+            // reference on this atlas and expect to use our proxy during the current flush.
+            // fOnFlushProxy will be cleared once fOnFlushRefCnt decrements to zero.
+            pathCache->fInvalidatedProxies.push_back(fOnFlushProxy);
+        } else {
+            pathCache->fInvalidatedProxyUniqueKeys.push_back(fTextureKey);
+        }
+        fIsInvalidatedFromResourceCache = true;
+        return ReleaseAtlasResult::kDidInvalidateFromCache;
+    }
+    return ReleaseAtlasResult::kNone;
+}
+
+void GrCCCachedAtlas::decrOnFlushRefCnt(int count) const {
+    SkASSERT(count > 0);
+    fOnFlushRefCnt -= count;
+    SkASSERT(fOnFlushRefCnt >= 0);
+    if (0 == fOnFlushRefCnt) {
+        // Don't hold the actual proxy past the end of the current flush.
+        SkASSERT(fOnFlushProxy);
+        fOnFlushProxy = nullptr;
+    }
 }
diff --git a/src/gpu/ccpr/GrCCPathCache.h b/src/gpu/ccpr/GrCCPathCache.h
index 3b34fe2..9aef2e8 100644
--- a/src/gpu/ccpr/GrCCPathCache.h
+++ b/src/gpu/ccpr/GrCCPathCache.h
@@ -8,6 +8,7 @@
 #ifndef GrCCPathCache_DEFINED
 #define GrCCPathCache_DEFINED
 
+#include "GrShape.h"
 #include "SkExchange.h"
 #include "SkTHash.h"
 #include "SkTInternalLList.h"
@@ -24,7 +25,7 @@
  */
 class GrCCPathCache {
 public:
-    GrCCPathCache();
+    GrCCPathCache(uint32_t contextUniqueID);
     ~GrCCPathCache();
 
     class Key : public SkPathRef::GenIDChangeListener {
@@ -43,7 +44,10 @@
         }
         uint32_t* data();
 
-        bool operator==(const Key&) const;
+        bool operator==(const Key& that) const {
+            return fDataSizeInBytes == that.fDataSizeInBytes &&
+                   !memcmp(this->data(), that.data(), fDataSizeInBytes);
+        }
 
         // Called when our corresponding path is modified or deleted. Not threadsafe.
         void onChange() override;
@@ -76,6 +80,25 @@
 #endif
     };
 
+    // Represents a ref on a GrCCPathCacheEntry that should only be used during the current flush.
+    class OnFlushEntryRef : SkNoncopyable {
+    public:
+        static OnFlushEntryRef OnFlushRef(GrCCPathCacheEntry*);
+        OnFlushEntryRef() = default;
+        OnFlushEntryRef(OnFlushEntryRef&& ref) : fEntry(skstd::exchange(ref.fEntry, nullptr)) {}
+        ~OnFlushEntryRef();
+
+        GrCCPathCacheEntry* get() const { return fEntry; }
+        GrCCPathCacheEntry* operator->() const { return fEntry; }
+        GrCCPathCacheEntry& operator*() const { return *fEntry; }
+        explicit operator bool() const { return fEntry; }
+        void operator=(OnFlushEntryRef&& ref) { fEntry = skstd::exchange(ref.fEntry, nullptr); }
+
+    private:
+        OnFlushEntryRef(GrCCPathCacheEntry* entry) : fEntry(entry) {}
+        GrCCPathCacheEntry* fEntry = nullptr;
+    };
+
     enum class CreateIfAbsent : bool {
         kNo = false,
         kYes = true
@@ -83,11 +106,19 @@
 
     // Finds an entry in the cache. Shapes are only given one entry, so any time they are accessed
     // with a different MaskTransform, the old entry gets evicted.
-    sk_sp<GrCCPathCacheEntry> find(const GrShape&, const MaskTransform&,
-                                   CreateIfAbsent = CreateIfAbsent::kNo);
+    OnFlushEntryRef find(GrOnFlushResourceProvider*, const GrShape&, const MaskTransform&,
+                         CreateIfAbsent = CreateIfAbsent::kNo);
 
-    void doPostFlushProcessing();
-    void purgeEntriesOlderThan(const GrStdSteadyClock::time_point& purgeTime);
+    void doPreFlushProcessing();
+
+    void purgeEntriesOlderThan(GrProxyProvider*, const GrStdSteadyClock::time_point& purgeTime);
+
+    // As we evict entries from our local path cache, we accumulate a list of invalidated atlas
+    // textures. This call purges the invalidated atlas textures from the mainline GrResourceCache.
+    // This call is available with two different "provider" objects, to accomodate whatever might
+    // be available at the callsite.
+    void purgeInvalidatedAtlasTextures(GrOnFlushResourceProvider*);
+    void purgeInvalidatedAtlasTextures(GrProxyProvider*);
 
 private:
     // This is a special ref ptr for GrCCPathCacheEntry, used by the hash table. It provides static
@@ -97,7 +128,9 @@
     class HashNode : SkNoncopyable {
     public:
         static const Key& GetKey(const HashNode&);
-        static uint32_t Hash(const Key&);
+        inline static uint32_t Hash(const Key& key) {
+            return GrResourceKeyHash(key.data(), key.dataSizeInBytes());
+        }
 
         HashNode() = default;
         HashNode(GrCCPathCache*, sk_sp<Key>, const MaskTransform&, const GrShape&);
@@ -108,13 +141,11 @@
 
         ~HashNode();
 
-        HashNode& operator=(HashNode&& node);
+        void operator=(HashNode&& node);
 
         GrCCPathCacheEntry* entry() const { return fEntry.get(); }
 
     private:
-        void willExitHashTable();
-
         GrCCPathCache* fPathCache = nullptr;
         sk_sp<GrCCPathCacheEntry> fEntry;
     };
@@ -127,13 +158,15 @@
         return fPerFlushTimestamp;
     }
 
-    void evict(const GrCCPathCache::Key& key) {
-        fHashTable.remove(key);  // HashNode::willExitHashTable() takes care of the rest.
-    }
+    void evict(const GrCCPathCache::Key&, GrCCPathCacheEntry* = nullptr);
 
-    void purgeInvalidatedKeys();
+    // Evicts all the cache entries whose keys have been queued up in fInvalidatedKeysInbox via
+    // SkPath listeners.
+    void evictInvalidatedCacheKeys();
 
-    SkTHashTable<HashNode, const GrCCPathCache::Key&> fHashTable;
+    const uint32_t fContextUniqueID;
+
+    SkTHashTable<HashNode, const Key&> fHashTable;
     SkTInternalLList<GrCCPathCacheEntry> fLRU;
     SkMessageBus<sk_sp<Key>>::Inbox fInvalidatedKeysInbox;
     sk_sp<Key> fScratchKey;  // Reused for creating a temporary key in the find() method.
@@ -141,6 +174,18 @@
     // We only read the clock once per flush, and cache it in this variable. This prevents us from
     // excessive clock reads for cache timestamps that might degrade performance.
     GrStdSteadyClock::time_point fPerFlushTimestamp = GrStdSteadyClock::time_point::min();
+
+    // As we evict entries from our local path cache, we accumulate lists of invalidated atlas
+    // textures in these two members. We hold these until we purge them from the GrResourceCache
+    // (e.g. via purgeInvalidatedAtlasTextures().)
+    SkSTArray<4, sk_sp<GrTextureProxy>> fInvalidatedProxies;
+    SkSTArray<4, GrUniqueKey> fInvalidatedProxyUniqueKeys;
+
+    friend class GrCCCachedAtlas;  // To append to fInvalidatedProxies, fInvalidatedProxyUniqueKeys.
+
+public:
+    const SkTHashTable<HashNode, const Key&>& testingOnly_getHashTable() const;
+    const SkTInternalLList<GrCCPathCacheEntry>& testingOnly_getLRU() const;
 };
 
 /**
@@ -152,10 +197,12 @@
     SK_DECLARE_INTERNAL_LLIST_INTERFACE(GrCCPathCacheEntry);
 
     ~GrCCPathCacheEntry() {
-        SkASSERT(!fCurrFlushAtlas);  // Client is required to reset fCurrFlushAtlas back to null.
-        this->invalidateAtlas();
+        SkASSERT(0 == fOnFlushRefCnt);
+        SkASSERT(!fCachedAtlas);  // Should have called GrCCPathCache::evict().
     }
 
+    const GrCCPathCache::Key& cacheKey() const { SkASSERT(fCacheKey); return *fCacheKey; }
+
     // The number of times this specific entry (path + matrix combination) has been pulled from
     // the path cache. As long as the caller does exactly one lookup per draw, this translates to
     // the number of times the path has been drawn with a compatible matrix.
@@ -164,44 +211,28 @@
     // GrCCPathCache::find(.., CreateIfAbsent::kYes), its hit count will be 1.
     int hitCount() const { return fHitCount; }
 
-    // Does this entry reference a permanent, 8-bit atlas that resides in the resource cache?
-    // (i.e. not a temporarily-stashed, fp16 coverage count atlas.)
-    bool hasCachedAtlas() const { return SkToBool(fCachedAtlasInfo); }
+    const GrCCCachedAtlas* cachedAtlas() const { return fCachedAtlas.get(); }
 
     const SkIRect& devIBounds() const { return fDevIBounds; }
     int width() const { return fDevIBounds.width(); }
     int height() const { return fDevIBounds.height(); }
 
+    enum class ReleaseAtlasResult : bool {
+        kNone,
+        kDidInvalidateFromCache
+    };
+
     // Called once our path has been rendered into the mainline CCPR (fp16, coverage count) atlas.
     // The caller will stash this atlas texture away after drawing, and during the next flush,
     // recover it and attempt to copy any paths that got reused into permanent 8-bit atlases.
-    void initAsStashedAtlas(const GrUniqueKey& atlasKey, const SkIVector& atlasOffset,
-                            const SkRect& devBounds, const SkRect& devBounds45,
-                            const SkIRect& devIBounds, const SkIVector& maskShift);
+    void setCoverageCountAtlas(GrOnFlushResourceProvider*, GrCCAtlas*, const SkIVector& atlasOffset,
+                               const SkRect& devBounds, const SkRect& devBounds45,
+                               const SkIRect& devIBounds, const SkIVector& maskShift);
 
     // Called once our path mask has been copied into a permanent, 8-bit atlas. This method points
-    // the entry at the new atlas and updates the CachedAtlasInfo data.
-    void updateToCachedAtlas(const GrUniqueKey& atlasKey, const SkIVector& newAtlasOffset,
-                             sk_sp<GrCCAtlas::CachedAtlasInfo>);
-
-    const GrUniqueKey& atlasKey() const { return fAtlasKey; }
-
-    void resetAtlasKeyAndInfo() {
-        fAtlasKey.reset();
-        fCachedAtlasInfo.reset();
-    }
-
-    // This is a utility for the caller to detect when a path gets drawn more than once during the
-    // same flush, with compatible matrices. Before adding a path to an atlas, the caller may check
-    // here to see if they have already placed the path previously during the same flush. The caller
-    // is required to reset all currFlushAtlas references back to null before any subsequent flush.
-    void setCurrFlushAtlas(const GrCCAtlas* currFlushAtlas) {
-        // This should not get called more than once in a single flush. Once fCurrFlushAtlas is
-        // non-null, it can only be set back to null (once the flush is over).
-        SkASSERT(!fCurrFlushAtlas || !currFlushAtlas);
-        fCurrFlushAtlas = currFlushAtlas;
-    }
-    const GrCCAtlas* currFlushAtlas() const { return fCurrFlushAtlas; }
+    // the entry at the new atlas and updates the GrCCCCachedAtlas data.
+    ReleaseAtlasResult upgradeToLiteralCoverageAtlas(GrCCPathCache*, GrOnFlushResourceProvider*,
+                                                     GrCCAtlas*, const SkIVector& newAtlasOffset);
 
 private:
     using MaskTransform = GrCCPathCache::MaskTransform;
@@ -212,32 +243,115 @@
 
     // Resets this entry back to not having an atlas, and purges its previous atlas texture from the
     // resource cache if needed.
-    void invalidateAtlas();
+    ReleaseAtlasResult releaseCachedAtlas(GrCCPathCache*);
 
     sk_sp<GrCCPathCache::Key> fCacheKey;
-
     GrStdSteadyClock::time_point fTimestamp;
     int fHitCount = 0;
-    MaskTransform fMaskTransform;
 
-    GrUniqueKey fAtlasKey;
+    sk_sp<GrCCCachedAtlas> fCachedAtlas;
     SkIVector fAtlasOffset;
 
+    MaskTransform fMaskTransform;
     SkRect fDevBounds;
     SkRect fDevBounds45;
     SkIRect fDevIBounds;
 
-    // If null, then we are referencing a "stashed" atlas (see initAsStashedAtlas()).
-    sk_sp<GrCCAtlas::CachedAtlasInfo> fCachedAtlasInfo;
-
-    // This field is for when a path gets drawn more than once during the same flush.
-    const GrCCAtlas* fCurrFlushAtlas = nullptr;
+    int fOnFlushRefCnt = 0;
 
     friend class GrCCPathCache;
     friend void GrCCPathProcessor::Instance::set(const GrCCPathCacheEntry&, const SkIVector&,
                                                  GrColor, DoEvenOddFill);  // To access data.
+
+public:
+    int testingOnly_peekOnFlushRefCnt() const;
 };
 
+/**
+ * Encapsulates the data for an atlas whose texture is stored in the mainline GrResourceCache. Many
+ * instances of GrCCPathCacheEntry will reference the same GrCCCachedAtlas.
+ *
+ * We use this object to track the percentage of the original atlas pixels that could still ever
+ * potentially be reused (i.e., those which still represent an extant path). When the percentage
+ * of useful pixels drops below 50%, we purge the entire texture from the resource cache.
+ *
+ * This object also holds a ref on the atlas's actual texture proxy during flush. When
+ * fOnFlushRefCnt decrements back down to zero, we release fOnFlushProxy and reset it back to null.
+ */
+class GrCCCachedAtlas : public GrNonAtomicRef<GrCCCachedAtlas> {
+public:
+    using ReleaseAtlasResult = GrCCPathCacheEntry::ReleaseAtlasResult;
+
+    GrCCCachedAtlas(GrCCAtlas::CoverageType type, const GrUniqueKey& textureKey,
+                    sk_sp<GrTextureProxy> onFlushProxy)
+            : fCoverageType(type)
+            , fTextureKey(textureKey)
+            , fOnFlushProxy(std::move(onFlushProxy)) {}
+
+    ~GrCCCachedAtlas() {
+        SkASSERT(!fOnFlushProxy);
+        SkASSERT(!fOnFlushRefCnt);
+    }
+
+    GrCCAtlas::CoverageType coverageType() const  { return fCoverageType; }
+    const GrUniqueKey& textureKey() const { return fTextureKey; }
+
+    GrTextureProxy* getOnFlushProxy() const { return fOnFlushProxy.get(); }
+
+    void setOnFlushProxy(sk_sp<GrTextureProxy> proxy) {
+        SkASSERT(!fOnFlushProxy);
+        fOnFlushProxy = std::move(proxy);
+    }
+
+    void addPathPixels(int numPixels) { fNumPathPixels += numPixels; }
+    ReleaseAtlasResult invalidatePathPixels(GrCCPathCache*, int numPixels);
+
+    int peekOnFlushRefCnt() const { return fOnFlushRefCnt; }
+    void incrOnFlushRefCnt(int count = 1) const {
+        SkASSERT(count > 0);
+        SkASSERT(fOnFlushProxy);
+        fOnFlushRefCnt += count;
+    }
+    void decrOnFlushRefCnt(int count = 1) const;
+
+private:
+    const GrCCAtlas::CoverageType fCoverageType;
+    const GrUniqueKey fTextureKey;
+
+    int fNumPathPixels = 0;
+    int fNumInvalidatedPathPixels = 0;
+    bool fIsInvalidatedFromResourceCache = false;
+
+    mutable sk_sp<GrTextureProxy> fOnFlushProxy;
+    mutable int fOnFlushRefCnt = 0;
+
+public:
+    int testingOnly_peekOnFlushRefCnt() const;
+};
+
+
+inline GrCCPathCache::HashNode::HashNode(GrCCPathCache* pathCache, sk_sp<Key> key,
+                                         const MaskTransform& m, const GrShape& shape)
+        : fPathCache(pathCache)
+        , fEntry(new GrCCPathCacheEntry(key, m)) {
+    SkASSERT(shape.hasUnstyledKey());
+    shape.addGenIDChangeListener(std::move(key));
+}
+
+inline const GrCCPathCache::Key& GrCCPathCache::HashNode::GetKey(
+        const GrCCPathCache::HashNode& node) {
+    return *node.entry()->fCacheKey;
+}
+
+inline GrCCPathCache::HashNode::~HashNode() {
+    SkASSERT(!fEntry || !fEntry->fCachedAtlas);  // Should have called GrCCPathCache::evict().
+}
+
+inline void GrCCPathCache::HashNode::operator=(HashNode&& node) {
+    SkASSERT(!fEntry || !fEntry->fCachedAtlas);  // Should have called GrCCPathCache::evict().
+    fEntry = skstd::exchange(node.fEntry, nullptr);
+}
+
 inline void GrCCPathProcessor::Instance::set(const GrCCPathCacheEntry& entry,
                                              const SkIVector& shift, GrColor color,
                                              DoEvenOddFill doEvenOddFill) {
diff --git a/src/gpu/ccpr/GrCCPerFlushResources.cpp b/src/gpu/ccpr/GrCCPerFlushResources.cpp
index 41cd2e2..e6cf8bb 100644
--- a/src/gpu/ccpr/GrCCPerFlushResources.cpp
+++ b/src/gpu/ccpr/GrCCPerFlushResources.cpp
@@ -33,8 +33,9 @@
         return RequiresDstTexture::kNo;
     }
     CombineResult onCombineIfPossible(GrOp* other, const GrCaps&) override {
-        SK_ABORT("Only expected one Op per CCPR atlas.");
-        return CombineResult::kMerged;
+        // We will only make multiple copy ops if they have different source proxies.
+        // TODO: make use of texture chaining.
+        return CombineResult::kCannotCombine;
     }
     void onPrepare(GrOpFlushState*) override {}
 
@@ -50,7 +51,7 @@
     const sk_sp<const GrCCPerFlushResources> fResources;
 };
 
-// Copies paths from a stashed coverage count atlas into an 8-bit literal-coverage atlas.
+// Copies paths from a cached coverage count atlas into an 8-bit literal-coverage atlas.
 class CopyAtlasOp : public AtlasOp {
 public:
     DEFINE_OP_CLASS_ID
@@ -66,18 +67,16 @@
     }
 
     const char* name() const override { return "CopyAtlasOp (CCPR)"; }
-    void visitProxies(const VisitProxyFunc& fn, VisitorType) const override {
-        fn(fStashedAtlasProxy.get());
-    }
+    void visitProxies(const VisitProxyFunc& fn, VisitorType) const override { fn(fSrcProxy.get()); }
 
     void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
-        SkASSERT(fStashedAtlasProxy);
+        SkASSERT(fSrcProxy);
         GrPipeline::FixedDynamicState dynamicState;
-        auto atlasProxy = fStashedAtlasProxy.get();
-        dynamicState.fPrimitiveProcessorTextures = &atlasProxy;
+        auto srcProxy = fSrcProxy.get();
+        dynamicState.fPrimitiveProcessorTextures = &srcProxy;
 
         GrPipeline pipeline(flushState->proxy(), GrScissorTest::kDisabled, SkBlendMode::kSrc);
-        GrCCPathProcessor pathProc(atlasProxy);
+        GrCCPathProcessor pathProc(srcProxy);
         pathProc.drawPaths(flushState, pipeline, &dynamicState, *fResources, fBaseInstance,
                            fEndInstance, this->bounds());
     }
@@ -85,15 +84,14 @@
 private:
     friend class ::GrOpMemoryPool; // for ctor
 
-    CopyAtlasOp(sk_sp<const GrCCPerFlushResources> resources, sk_sp<GrTextureProxy> copyProxy,
+    CopyAtlasOp(sk_sp<const GrCCPerFlushResources> resources, sk_sp<GrTextureProxy> srcProxy,
                 int baseInstance, int endInstance, const SkISize& drawBounds)
             : AtlasOp(ClassID(), std::move(resources), drawBounds)
-            , fStashedAtlasProxy(copyProxy)
+            , fSrcProxy(srcProxy)
             , fBaseInstance(baseInstance)
             , fEndInstance(endInstance) {
     }
-
-    sk_sp<GrTextureProxy> fStashedAtlasProxy;
+    sk_sp<GrTextureProxy> fSrcProxy;
     const int fBaseInstance;
     const int fEndInstance;
 };
@@ -161,9 +159,10 @@
         , fStroker(specs.fNumRenderedPaths[kStrokeIdx],
                    specs.fRenderedPathStats[kStrokeIdx].fNumTotalSkPoints,
                    specs.fRenderedPathStats[kStrokeIdx].fNumTotalSkVerbs)
-        , fCopyAtlasStack(kAlpha_8_GrPixelConfig, specs.fCopyAtlasSpecs, onFlushRP->caps())
-        , fRenderedAtlasStack(kAlpha_half_GrPixelConfig, specs.fRenderedAtlasSpecs,
-                              onFlushRP->caps())
+        , fCopyAtlasStack(GrCCAtlas::CoverageType::kA8_LiteralCoverage, specs.fCopyAtlasSpecs,
+                          onFlushRP->caps())
+        , fRenderedAtlasStack(GrCCAtlas::CoverageType::kFP16_CoverageCount,
+                              specs.fRenderedAtlasSpecs, onFlushRP->caps())
         , fIndexBuffer(GrCCPathProcessor::FindIndexBuffer(onFlushRP))
         , fVertexBuffer(GrCCPathProcessor::FindVertexBuffer(onFlushRP))
         , fInstanceBuffer(onFlushRP->makeBuffer(kVertex_GrBufferType,
@@ -190,21 +189,84 @@
     SkDEBUGCODE(fEndPathInstance = inst_buffer_count(specs));
 }
 
-GrCCAtlas* GrCCPerFlushResources::copyPathToCachedAtlas(const GrCCPathCacheEntry& entry,
-                                                        GrCCPathProcessor::DoEvenOddFill evenOdd,
-                                                        SkIVector* newAtlasOffset) {
+void GrCCPerFlushResources::upgradeEntryToLiteralCoverageAtlas(
+        GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP, GrCCPathCacheEntry* entry,
+        GrCCPathProcessor::DoEvenOddFill evenOdd) {
+    using ReleaseAtlasResult = GrCCPathCacheEntry::ReleaseAtlasResult;
     SkASSERT(this->isMapped());
     SkASSERT(fNextCopyInstanceIdx < fEndCopyInstance);
-    SkASSERT(!entry.hasCachedAtlas());  // Unexpected, but not necessarily a problem.
 
-    if (GrCCAtlas* retiredAtlas = fCopyAtlasStack.addRect(entry.devIBounds(), newAtlasOffset)) {
-        // We did not fit in the previous copy atlas and it was retired. We will render the copies
-        // up until fNextCopyInstanceIdx into the retired atlas during finalize().
-        retiredAtlas->setFillBatchID(fNextCopyInstanceIdx);
+    const GrCCCachedAtlas* cachedAtlas = entry->cachedAtlas();
+    SkASSERT(cachedAtlas);
+    SkASSERT(cachedAtlas->getOnFlushProxy());
+
+    if (GrCCAtlas::CoverageType::kA8_LiteralCoverage == cachedAtlas->coverageType()) {
+        // This entry has already been upgraded to literal coverage. The path must have been drawn
+        // multiple times during the flush.
+        SkDEBUGCODE(--fEndCopyInstance);
+        return;
     }
 
-    fPathInstanceData[fNextCopyInstanceIdx++].set(entry, *newAtlasOffset, GrColor_WHITE, evenOdd);
-    return &fCopyAtlasStack.current();
+    SkIVector newAtlasOffset;
+    if (GrCCAtlas* retiredAtlas = fCopyAtlasStack.addRect(entry->devIBounds(), &newAtlasOffset)) {
+        // We did not fit in the previous copy atlas and it was retired. We will render the ranges
+        // up until fCopyPathRanges.count() into the retired atlas during finalize().
+        retiredAtlas->setFillBatchID(fCopyPathRanges.count());
+        fCurrCopyAtlasRangesIdx = fCopyPathRanges.count();
+    }
+
+    this->recordCopyPathInstance(*entry, newAtlasOffset, evenOdd,
+                                 sk_ref_sp(cachedAtlas->getOnFlushProxy()));
+
+    sk_sp<GrTexture> previousAtlasTexture =
+            sk_ref_sp(cachedAtlas->getOnFlushProxy()->peekTexture());
+    GrCCAtlas* newAtlas = &fCopyAtlasStack.current();
+    if (ReleaseAtlasResult::kDidInvalidateFromCache ==
+            entry->upgradeToLiteralCoverageAtlas(pathCache, onFlushRP, newAtlas, newAtlasOffset)) {
+        // This texture just got booted out of the cache. Keep it around, in case we might be able
+        // to recycle it for a new atlas. We can recycle it because copying happens before rendering
+        // new paths, and every path from the atlas that we're planning to use this flush will be
+        // copied to a new atlas. We'll never copy some and leave others.
+        fRecyclableAtlasTextures.push_back(std::move(previousAtlasTexture));
+    }
+}
+
+template<typename T, typename... Args>
+static void emplace_at_memcpy(SkTArray<T>* array, int idx, Args&&... args) {
+    if (int moveCount = array->count() - idx) {
+        array->push_back();
+        T* location = array->begin() + idx;
+        memcpy(location+1, location, moveCount * sizeof(T));
+        new (location) T(std::forward<Args>(args)...);
+    } else {
+        array->emplace_back(std::forward<Args>(args)...);
+    }
+}
+
+void GrCCPerFlushResources::recordCopyPathInstance(const GrCCPathCacheEntry& entry,
+                                                   const SkIVector& newAtlasOffset,
+                                                   GrCCPathProcessor::DoEvenOddFill evenOdd,
+                                                   sk_sp<GrTextureProxy> srcProxy) {
+    SkASSERT(fNextCopyInstanceIdx < fEndCopyInstance);
+
+    // Write the instance at the back of the array.
+    int currentInstanceIdx = fNextCopyInstanceIdx++;
+    fPathInstanceData[currentInstanceIdx].set(entry, newAtlasOffset, GrColor_WHITE, evenOdd);
+
+    // Percolate the instance forward until it's contiguous with other instances that share the same
+    // proxy.
+    for (int i = fCopyPathRanges.count() - 1; i >= fCurrCopyAtlasRangesIdx; --i) {
+        if (fCopyPathRanges[i].fSrcProxy == srcProxy) {
+            ++fCopyPathRanges[i].fCount;
+            return;
+        }
+        int rangeFirstInstanceIdx = currentInstanceIdx - fCopyPathRanges[i].fCount;
+        std::swap(fPathInstanceData[rangeFirstInstanceIdx], fPathInstanceData[currentInstanceIdx]);
+        currentInstanceIdx = rangeFirstInstanceIdx;
+    }
+
+    // An instance with this particular proxy did not yet exist in the array. Add a range for it.
+    emplace_at_memcpy(&fCopyPathRanges, fCurrCopyAtlasRangesIdx, std::move(srcProxy), 1);
 }
 
 static bool transform_path_pts(const SkMatrix& m, const SkPath& path,
@@ -263,7 +325,7 @@
     return true;
 }
 
-const GrCCAtlas* GrCCPerFlushResources::renderShapeInAtlas(
+GrCCAtlas* GrCCPerFlushResources::renderShapeInAtlas(
         const SkIRect& clipIBounds, const SkMatrix& m, const GrShape& shape, float strokeDevWidth,
         SkRect* devBounds, SkRect* devBounds45, SkIRect* devIBounds, SkIVector* devToAtlasOffset) {
     SkASSERT(this->isMapped());
@@ -361,17 +423,17 @@
 }
 
 bool GrCCPerFlushResources::finalize(GrOnFlushResourceProvider* onFlushRP,
-                                     sk_sp<GrTextureProxy> stashedAtlasProxy,
                                      SkTArray<sk_sp<GrRenderTargetContext>>* out) {
     SkASSERT(this->isMapped());
     SkASSERT(fNextPathInstanceIdx == fEndPathInstance);
-    // No assert for fEndCopyInstance because the caller may have detected and skipped duplicates.
+    SkASSERT(fNextCopyInstanceIdx == fEndCopyInstance);
 
     fInstanceBuffer->unmap();
     fPathInstanceData = nullptr;
 
     if (!fCopyAtlasStack.empty()) {
-        fCopyAtlasStack.current().setFillBatchID(fNextCopyInstanceIdx);
+        fCopyAtlasStack.current().setFillBatchID(fCopyPathRanges.count());
+        fCurrCopyAtlasRangesIdx = fCopyPathRanges.count();
     }
     if (!fRenderedAtlasStack.empty()) {
         fRenderedAtlasStack.current().setFillBatchID(fFiller.closeCurrentBatch());
@@ -387,38 +449,44 @@
         return false;
     }
 
-    // Draw the copies from the stashed atlas into 8-bit cached atlas(es).
+    // Draw the copies from 16-bit literal coverage atlas(es) into 8-bit cached atlas(es).
+    int copyRangeIdx = 0;
     int baseCopyInstance = 0;
     for (GrCCAtlasStack::Iter atlas(fCopyAtlasStack); atlas.next();) {
-        int endCopyInstance = atlas->getFillBatchID();
-        if (endCopyInstance <= baseCopyInstance) {
-            SkASSERT(endCopyInstance == baseCopyInstance);
-            continue;
+        int endCopyRange = atlas->getFillBatchID();
+        SkASSERT(endCopyRange > copyRangeIdx);
+
+        sk_sp<GrRenderTargetContext> rtc = atlas->makeRenderTargetContext(onFlushRP);
+        for (; copyRangeIdx < endCopyRange; ++copyRangeIdx) {
+            const CopyPathRange& copyRange = fCopyPathRanges[copyRangeIdx];
+            int endCopyInstance = baseCopyInstance + copyRange.fCount;
+            if (rtc) {
+                auto op = CopyAtlasOp::Make(rtc->surfPriv().getContext(), sk_ref_sp(this),
+                                            copyRange.fSrcProxy, baseCopyInstance, endCopyInstance,
+                                            atlas->drawBounds());
+                rtc->addDrawOp(GrNoClip(), std::move(op));
+            }
+            baseCopyInstance = endCopyInstance;
         }
-        if (auto rtc = atlas->makeRenderTargetContext(onFlushRP)) {
-            GrContext* ctx = rtc->surfPriv().getContext();
-            auto op = CopyAtlasOp::Make(ctx, sk_ref_sp(this), stashedAtlasProxy, baseCopyInstance,
-                                        endCopyInstance, atlas->drawBounds());
-            rtc->addDrawOp(GrNoClip(), std::move(op));
-            out->push_back(std::move(rtc));
-        }
-        baseCopyInstance = endCopyInstance;
+        out->push_back(std::move(rtc));
     }
+    SkASSERT(fCopyPathRanges.count() == copyRangeIdx);
+    SkASSERT(fNextCopyInstanceIdx == baseCopyInstance);
+    SkASSERT(baseCopyInstance == fEndCopyInstance);
 
     // Render the coverage count atlas(es).
     for (GrCCAtlasStack::Iter atlas(fRenderedAtlasStack); atlas.next();) {
-        // Copies will be finished by the time we get to this atlas. See if we can recycle the
-        // stashed atlas texture instead of creating a new one.
+        // Copies will be finished by the time we get to rendering new atlases. See if we can
+        // recycle any previous invalidated atlas textures instead of creating new ones.
         sk_sp<GrTexture> backingTexture;
-        if (stashedAtlasProxy && atlas->currentWidth() == stashedAtlasProxy->width() &&
-            atlas->currentHeight() == stashedAtlasProxy->height()) {
-            backingTexture = sk_ref_sp(stashedAtlasProxy->peekTexture());
+        for (sk_sp<GrTexture>& texture : fRecyclableAtlasTextures) {
+            if (texture && atlas->currentHeight() == texture->height() &&
+                    atlas->currentWidth() == texture->width()) {
+                backingTexture = skstd::exchange(texture, nullptr);
+                break;
+            }
         }
 
-        // Delete the stashed proxy here. That way, if we can't recycle the stashed atlas texture,
-        // we free this memory prior to allocating a new backing texture.
-        stashedAtlasProxy = nullptr;
-
         if (auto rtc = atlas->makeRenderTargetContext(onFlushRP, std::move(backingTexture))) {
             auto op = RenderAtlasOp::Make(rtc->surfPriv().getContext(), sk_ref_sp(this),
                                           atlas->getFillBatchID(), atlas->getStrokeBatchID(),
@@ -431,23 +499,10 @@
     return true;
 }
 
-void GrCCPerFlushResourceSpecs::convertCopiesToRenders() {
-    for (int i = 0; i < 2; ++i) {
-        fNumRenderedPaths[i] += fNumCopiedPaths[i];
-        fNumCopiedPaths[i] = 0;
-
-        fRenderedPathStats[i].fMaxPointsPerPath =
-               SkTMax(fRenderedPathStats[i].fMaxPointsPerPath, fCopyPathStats[i].fMaxPointsPerPath);
-        fRenderedPathStats[i].fNumTotalSkPoints += fCopyPathStats[i].fNumTotalSkPoints;
-        fRenderedPathStats[i].fNumTotalSkVerbs += fCopyPathStats[i].fNumTotalSkVerbs;
-        fRenderedPathStats[i].fNumTotalConicWeights += fCopyPathStats[i].fNumTotalConicWeights;
-        fCopyPathStats[i] = GrCCRenderedPathStats();
-    }
-
-    fRenderedAtlasSpecs.fApproxNumPixels += fCopyAtlasSpecs.fApproxNumPixels;
-    fRenderedAtlasSpecs.fMinWidth =
-            SkTMax(fRenderedAtlasSpecs.fMinWidth, fCopyAtlasSpecs.fMinWidth);
-    fRenderedAtlasSpecs.fMinHeight =
-            SkTMax(fRenderedAtlasSpecs.fMinHeight, fCopyAtlasSpecs.fMinHeight);
+void GrCCPerFlushResourceSpecs::cancelCopies() {
+    // Convert copies to cached draws.
+    fNumCachedPaths += fNumCopiedPaths[kFillIdx] + fNumCopiedPaths[kStrokeIdx];
+    fNumCopiedPaths[kFillIdx] = fNumCopiedPaths[kStrokeIdx] = 0;
+    fCopyPathStats[kFillIdx] = fCopyPathStats[kStrokeIdx] = GrCCRenderedPathStats();
     fCopyAtlasSpecs = GrCCAtlas::Specs();
 }
diff --git a/src/gpu/ccpr/GrCCPerFlushResources.h b/src/gpu/ccpr/GrCCPerFlushResources.h
index 132068f..f363c16 100644
--- a/src/gpu/ccpr/GrCCPerFlushResources.h
+++ b/src/gpu/ccpr/GrCCPerFlushResources.h
@@ -14,6 +14,7 @@
 #include "ccpr/GrCCStroker.h"
 #include "ccpr/GrCCPathProcessor.h"
 
+class GrCCPathCache;
 class GrCCPathCacheEntry;
 class GrOnFlushResourceProvider;
 class GrShape;
@@ -53,7 +54,8 @@
         return 0 == fNumCachedPaths + fNumCopiedPaths[kFillIdx] + fNumCopiedPaths[kStrokeIdx] +
                     fNumRenderedPaths[kFillIdx] + fNumRenderedPaths[kStrokeIdx] + fNumClipPaths;
     }
-    void convertCopiesToRenders();
+    // Converts the copies to normal cached draws.
+    void cancelCopies();
 };
 
 /**
@@ -67,22 +69,19 @@
 
     bool isMapped() const { return SkToBool(fPathInstanceData); }
 
-    // Copies a path out of the the previous flush's stashed mainline coverage count atlas, and into
-    // a cached, 8-bit, literal-coverage atlas. The actual source texture to copy from will be
-    // provided at the time finalize() is called.
-    GrCCAtlas* copyPathToCachedAtlas(const GrCCPathCacheEntry&, GrCCPathProcessor::DoEvenOddFill,
-                                     SkIVector* newAtlasOffset);
+    // Copies a coverage-counted path out of the given texture proxy, and into a cached, 8-bit,
+    // literal coverage atlas. Updates the cache entry to reference the new atlas.
+    void upgradeEntryToLiteralCoverageAtlas(GrCCPathCache*, GrOnFlushResourceProvider*,
+                                            GrCCPathCacheEntry*, GrCCPathProcessor::DoEvenOddFill);
 
     // These two methods render a path into a temporary coverage count atlas. See
-    // GrCCPathProcessor::Instance for a description of the outputs. The returned atlases are
-    // "const" to prevent the caller from assigning a unique key.
+    // GrCCPathProcessor::Instance for a description of the outputs.
     //
     // strokeDevWidth must be 0 for fills, 1 for hairlines, or the stroke width in device-space
     // pixels for non-hairline strokes (implicitly requiring a rigid-body transform).
-    const GrCCAtlas* renderShapeInAtlas(const SkIRect& clipIBounds, const SkMatrix&, const GrShape&,
-                                        float strokeDevWidth, SkRect* devBounds,
-                                        SkRect* devBounds45, SkIRect* devIBounds,
-                                        SkIVector* devToAtlasOffset);
+    GrCCAtlas* renderShapeInAtlas(const SkIRect& clipIBounds, const SkMatrix&, const GrShape&,
+                                  float strokeDevWidth, SkRect* devBounds, SkRect* devBounds45,
+                                  SkIRect* devIBounds, SkIVector* devToAtlasOffset);
     const GrCCAtlas* renderDeviceSpacePathInAtlas(const SkIRect& clipIBounds, const SkPath& devPath,
                                                   const SkIRect& devPathIBounds,
                                                   SkIVector* devToAtlasOffset);
@@ -100,11 +99,8 @@
         return fPathInstanceData[fNextPathInstanceIdx++];
     }
 
-    // Finishes off the GPU buffers and renders the atlas(es). 'stashedAtlasProxy', if provided, is
-    // the mainline coverage count atlas from the previous flush. It will be used as the source
-    // texture for any copies setup by copyStashedPathToAtlas().
-    bool finalize(GrOnFlushResourceProvider*, sk_sp<GrTextureProxy> stashedAtlasProxy,
-                  SkTArray<sk_sp<GrRenderTargetContext>>* out);
+    // Finishes off the GPU buffers and renders the atlas(es).
+    bool finalize(GrOnFlushResourceProvider*, SkTArray<sk_sp<GrRenderTargetContext>>* out);
 
     // Accessors used by draw calls, once the resources have been finalized.
     const GrCCFiller& filler() const { SkASSERT(!this->isMapped()); return fFiller; }
@@ -113,23 +109,9 @@
     const GrBuffer* vertexBuffer() const { SkASSERT(!this->isMapped()); return fVertexBuffer.get();}
     GrBuffer* instanceBuffer() const { SkASSERT(!this->isMapped()); return fInstanceBuffer.get(); }
 
-    // Returns the mainline coverage count atlas that the client may stash for next flush, if any.
-    // The caller is responsible to call getOrAssignUniqueKey() on this atlas if they wish to
-    // actually stash it in order to copy paths into cached atlases.
-    GrCCAtlas* nextAtlasToStash() {
-        return fRenderedAtlasStack.empty() ? nullptr : &fRenderedAtlasStack.front();
-    }
-
-    // Returs true if the client has called getOrAssignUniqueKey() on our nextAtlasToStash().
-    bool hasStashedAtlas() const {
-        return !fRenderedAtlasStack.empty() && fRenderedAtlasStack.front().uniqueKey().isValid();
-    }
-    const GrUniqueKey& stashedAtlasKey() const  {
-        SkASSERT(this->hasStashedAtlas());
-        return fRenderedAtlasStack.front().uniqueKey();
-    }
-
 private:
+    void recordCopyPathInstance(const GrCCPathCacheEntry&, const SkIVector& newAtlasOffset,
+                                GrCCPathProcessor::DoEvenOddFill, sk_sp<GrTextureProxy> srcProxy);
     bool placeRenderedPathInAtlas(const SkIRect& clipIBounds, const SkIRect& pathIBounds,
                                   GrScissorTest*, SkIRect* clippedPathIBounds,
                                   SkIVector* devToAtlasOffset);
@@ -149,6 +131,30 @@
     SkDEBUGCODE(int fEndCopyInstance);
     int fNextPathInstanceIdx;
     SkDEBUGCODE(int fEndPathInstance);
+
+    // Represents a range of copy-path instances that all share the same source proxy. (i.e. Draw
+    // instances that copy a path mask from a 16-bit coverage count atlas into an 8-bit literal
+    // coverage atlas.)
+    struct CopyPathRange {
+        CopyPathRange() = default;
+        CopyPathRange(sk_sp<GrTextureProxy> srcProxy, int count)
+                : fSrcProxy(std::move(srcProxy)), fCount(count) {}
+        sk_sp<GrTextureProxy> fSrcProxy;
+        int fCount;
+    };
+
+    SkSTArray<4, CopyPathRange> fCopyPathRanges;
+    int fCurrCopyAtlasRangesIdx = 0;
+
+    // This is a list of coverage count atlas textures that have been invalidated due to us copying
+    // their paths into new 8-bit literal coverage atlases. Since copying is finished by the time
+    // we begin rendering new atlases, we can recycle these textures for the rendered atlases rather
+    // than allocating new texture objects upon instantiation.
+    SkSTArray<2, sk_sp<GrTexture>> fRecyclableAtlasTextures;
+
+public:
+    const GrTexture* testingOnly_frontCopyAtlasTexture() const;
+    const GrTexture* testingOnly_frontRenderedAtlasTexture() const;
 };
 
 inline void GrCCRenderedPathStats::statPath(const SkPath& path) {
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
index 293d8c2..901ca38 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
@@ -30,15 +30,16 @@
 }
 
 sk_sp<GrCoverageCountingPathRenderer> GrCoverageCountingPathRenderer::CreateIfSupported(
-        const GrCaps& caps, AllowCaching allowCaching) {
+        const GrCaps& caps, AllowCaching allowCaching, uint32_t contextUniqueID) {
     return sk_sp<GrCoverageCountingPathRenderer>((IsSupported(caps))
-            ? new GrCoverageCountingPathRenderer(allowCaching)
+            ? new GrCoverageCountingPathRenderer(allowCaching, contextUniqueID)
             : nullptr);
 }
 
-GrCoverageCountingPathRenderer::GrCoverageCountingPathRenderer(AllowCaching allowCaching) {
+GrCoverageCountingPathRenderer::GrCoverageCountingPathRenderer(AllowCaching allowCaching,
+                                                               uint32_t contextUniqueID) {
     if (AllowCaching::kYes == allowCaching) {
-        fPathCache = skstd::make_unique<GrCCPathCache>();
+        fPathCache = skstd::make_unique<GrCCPathCache>(contextUniqueID);
     }
 }
 
@@ -188,29 +189,16 @@
 void GrCoverageCountingPathRenderer::preFlush(GrOnFlushResourceProvider* onFlushRP,
                                               const uint32_t* opListIDs, int numOpListIDs,
                                               SkTArray<sk_sp<GrRenderTargetContext>>* out) {
-    using DoCopiesToCache = GrCCDrawPathsOp::DoCopiesToCache;
+    using DoCopiesToA8Coverage = GrCCDrawPathsOp::DoCopiesToA8Coverage;
     SkASSERT(!fFlushing);
     SkASSERT(fFlushingPaths.empty());
     SkDEBUGCODE(fFlushing = true);
 
-    // Dig up the stashed atlas from the previous flush (if any) so we can attempt to copy any
-    // reusable paths out of it and into the resource cache. We also need to clear its unique key.
-    sk_sp<GrTextureProxy> stashedAtlasProxy;
-    if (fStashedAtlasKey.isValid()) {
-        stashedAtlasProxy = onFlushRP->findOrCreateProxyByUniqueKey(fStashedAtlasKey,
-                                                                    GrCCAtlas::kTextureOrigin);
-        if (stashedAtlasProxy) {
-            // Instantiate the proxy so we can clear the underlying texture's unique key.
-            onFlushRP->instatiateProxy(stashedAtlasProxy.get());
-            SkASSERT(fStashedAtlasKey == stashedAtlasProxy->getUniqueKey());
-            onFlushRP->removeUniqueKeyFromProxy(stashedAtlasProxy.get());
-        } else {
-            fStashedAtlasKey.reset();  // Indicate there is no stashed atlas to copy from.
-        }
+    if (fPathCache) {
+        fPathCache->doPreFlushProcessing();
     }
 
     if (fPendingPaths.empty()) {
-        fStashedAtlasKey.reset();
         return;  // Nothing to draw.
     }
 
@@ -234,13 +222,12 @@
         fPendingPaths.erase(iter);
 
         for (GrCCDrawPathsOp* op : fFlushingPaths.back()->fDrawOps) {
-            op->accountForOwnPaths(fPathCache.get(), onFlushRP, fStashedAtlasKey, &specs);
+            op->accountForOwnPaths(fPathCache.get(), onFlushRP, &specs);
         }
         for (const auto& clipsIter : fFlushingPaths.back()->fClipPaths) {
             clipsIter.second.accountForOwnPath(&specs);
         }
     }
-    fStashedAtlasKey.reset();
 
     if (specs.isEmpty()) {
         return;  // Nothing to draw.
@@ -250,12 +237,10 @@
     // copy them to cached atlas(es).
     int numCopies = specs.fNumCopiedPaths[GrCCPerFlushResourceSpecs::kFillIdx] +
                     specs.fNumCopiedPaths[GrCCPerFlushResourceSpecs::kStrokeIdx];
-    DoCopiesToCache doCopies = DoCopiesToCache(numCopies > 100 ||
-                                               specs.fCopyAtlasSpecs.fApproxNumPixels > 256 * 256);
-    if (numCopies && DoCopiesToCache::kNo == doCopies) {
-        specs.convertCopiesToRenders();
-        SkASSERT(!specs.fNumCopiedPaths[GrCCPerFlushResourceSpecs::kFillIdx]);
-        SkASSERT(!specs.fNumCopiedPaths[GrCCPerFlushResourceSpecs::kStrokeIdx]);
+    auto doCopies = DoCopiesToA8Coverage(numCopies > 100 ||
+                                         specs.fCopyAtlasSpecs.fApproxNumPixels > 256 * 256);
+    if (numCopies && DoCopiesToA8Coverage::kNo == doCopies) {
+        specs.cancelCopies();
     }
 
     auto resources = sk_make_sp<GrCCPerFlushResources>(onFlushRP, specs);
@@ -266,20 +251,23 @@
     // Layout the atlas(es) and parse paths.
     for (const auto& flushingPaths : fFlushingPaths) {
         for (GrCCDrawPathsOp* op : flushingPaths->fDrawOps) {
-            op->setupResources(onFlushRP, resources.get(), doCopies);
+            op->setupResources(fPathCache.get(), onFlushRP, resources.get(), doCopies);
         }
         for (auto& clipsIter : flushingPaths->fClipPaths) {
             clipsIter.second.renderPathInAtlas(resources.get(), onFlushRP);
         }
     }
 
-    // Allocate resources and then render the atlas(es).
-    if (!resources->finalize(onFlushRP, std::move(stashedAtlasProxy), out)) {
-        return;
+    if (fPathCache) {
+        // Purge invalidated textures from previous atlases *before* calling finalize(). That way,
+        // the underlying textures objects can be freed up and reused for the next atlases.
+        fPathCache->purgeInvalidatedAtlasTextures(onFlushRP);
     }
 
-    // Verify the stashed atlas got released so its texture could be recycled.
-    SkASSERT(!stashedAtlasProxy);  // NOLINT(bugprone-use-after-move)
+    // Allocate resources and then render the atlas(es).
+    if (!resources->finalize(onFlushRP, out)) {
+        return;
+    }
 
     // Commit flushing paths to the resources once they are successfully completed.
     for (auto& flushingPaths : fFlushingPaths) {
@@ -291,15 +279,8 @@
 void GrCoverageCountingPathRenderer::postFlush(GrDeferredUploadToken, const uint32_t* opListIDs,
                                                int numOpListIDs) {
     SkASSERT(fFlushing);
-    SkASSERT(!fStashedAtlasKey.isValid());  // Should have been cleared in preFlush().
 
     if (!fFlushingPaths.empty()) {
-        // Note the stashed atlas's key for next flush, if any.
-        auto resources = fFlushingPaths.front()->fFlushResources.get();
-        if (resources && resources->hasStashedAtlas()) {
-            fStashedAtlasKey = resources->stashedAtlasKey();
-        }
-
         // In DDL mode these aren't guaranteed to be deleted so we must clear out the perFlush
         // resources manually.
         for (auto& flushingPaths : fFlushingPaths) {
@@ -310,17 +291,13 @@
         fFlushingPaths.reset();
     }
 
-    if (fPathCache) {
-        fPathCache->doPostFlushProcessing();
-    }
-
     SkDEBUGCODE(fFlushing = false);
 }
 
 void GrCoverageCountingPathRenderer::purgeCacheEntriesOlderThan(
-        const GrStdSteadyClock::time_point& purgeTime) {
+        GrProxyProvider* proxyProvider, const GrStdSteadyClock::time_point& purgeTime) {
     if (fPathCache) {
-        fPathCache->purgeEntriesOlderThan(purgeTime);
+        fPathCache->purgeEntriesOlderThan(proxyProvider, purgeTime);
     }
 }
 
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
index 554404d..b5fb321 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
@@ -34,7 +34,8 @@
         kYes = true
     };
 
-    static sk_sp<GrCoverageCountingPathRenderer> CreateIfSupported(const GrCaps&, AllowCaching);
+    static sk_sp<GrCoverageCountingPathRenderer> CreateIfSupported(const GrCaps&, AllowCaching,
+                                                                   uint32_t contextUniqueID);
 
     using PendingPathsMap = std::map<uint32_t, sk_sp<GrCCPerOpListPaths>>;
 
@@ -65,10 +66,7 @@
                   SkTArray<sk_sp<GrRenderTargetContext>>* out) override;
     void postFlush(GrDeferredUploadToken, const uint32_t* opListIDs, int numOpListIDs) override;
 
-    void purgeCacheEntriesOlderThan(const GrStdSteadyClock::time_point& purgeTime);
-
-    void testingOnly_drawPathDirectly(const DrawPathArgs&);
-    const GrUniqueKey& testingOnly_getStashedAtlasKey() const;
+    void purgeCacheEntriesOlderThan(GrProxyProvider*, const GrStdSteadyClock::time_point&);
 
     // If a path spans more pixels than this, we need to crop it or else analytic AA can run out of
     // fp32 precision.
@@ -84,7 +82,7 @@
                                    float* inflationRadius = nullptr);
 
 private:
-    GrCoverageCountingPathRenderer(AllowCaching);
+    GrCoverageCountingPathRenderer(AllowCaching, uint32_t contextUniqueID);
 
     // GrPathRenderer overrides.
     StencilSupport onGetStencilSupport(const GrShape&) const override {
@@ -106,9 +104,13 @@
     SkSTArray<4, sk_sp<GrCCPerOpListPaths>> fFlushingPaths;
 
     std::unique_ptr<GrCCPathCache> fPathCache;
-    GrUniqueKey fStashedAtlasKey;
 
     SkDEBUGCODE(bool fFlushing = false);
+
+public:
+    void testingOnly_drawPathDirectly(const DrawPathArgs&);
+    const GrCCPerFlushResources* testingOnly_getCurrentFlushResources();
+    const GrCCPathCache* testingOnly_getPathCache() const;
 };
 
 #endif
diff --git a/src/gpu/mock/GrMockGpu.cpp b/src/gpu/mock/GrMockGpu.cpp
index 4f2c4cd..b98af80 100644
--- a/src/gpu/mock/GrMockGpu.cpp
+++ b/src/gpu/mock/GrMockGpu.cpp
@@ -15,7 +15,11 @@
 
 int GrMockGpu::NextInternalTextureID() {
     static std::atomic<int> nextID{1};
-    return nextID++;
+    int id;
+    do {
+        id = nextID.fetch_add(1);
+    } while (0 == id);  // Reserve 0 for an invalid ID.
+    return id;
 }
 
 int GrMockGpu::NextExternalTextureID() {