GrResourceCache2 manages scratch texture.

BUG=skia:2889

Review URL: https://codereview.chromium.org/608883003
diff --git a/src/gpu/GrClipMaskCache.h b/src/gpu/GrClipMaskCache.h
index 6b484e8..a2495f1 100644
--- a/src/gpu/GrClipMaskCache.h
+++ b/src/gpu/GrClipMaskCache.h
@@ -204,7 +204,9 @@
 
             fLastClipGenID = clipGenID;
 
-            fLastMask.set(context, desc);
+            // HACK: set the last param to true to indicate that this request is at
+            // flush time and therefore we require a scratch texture with no pending IO operations.
+            fLastMask.set(context, desc, GrContext::kApprox_ScratchTexMatch, /*flushing=*/true);
 
             fLastBound = bound;
         }
diff --git a/src/gpu/GrContext.cpp b/src/gpu/GrContext.cpp
index d0f3cc5..1993ad3 100755
--- a/src/gpu/GrContext.cpp
+++ b/src/gpu/GrContext.cpp
@@ -70,25 +70,6 @@
 
 #define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this)
 
-GrTexture* GrAutoScratchTexture::detach() {
-    if (NULL == fTexture) {
-        return NULL;
-    }
-    GrTexture* texture = fTexture;
-    fTexture = NULL;
-
-    // This GrAutoScratchTexture has a ref from lockAndRefScratchTexture, which we give up now.
-    // The cache also has a ref which we are lending to the caller of detach(). When the caller
-    // lets go of the ref and the ref count goes to 0 internal_dispose will see this flag is
-    // set and re-ref the texture, thereby restoring the cache's ref.
-    SkASSERT(!texture->unique());
-    texture->texturePriv().setFlag((GrTextureFlags) GrTexture::kReturnToCache_FlagBit);
-    texture->unref();
-    SkASSERT(texture->getCacheEntry());
-
-    return texture;
-}
-
 // Glorified typedef to avoid including GrDrawState.h in GrContext.h
 class GrContext::AutoRestoreEffects : public GrDrawState::AutoRestoreEffects {};
 
@@ -459,159 +440,81 @@
     return texture;
 }
 
-static GrTexture* create_scratch_texture(GrGpu* gpu,
-                                         GrResourceCache* resourceCache,
-                                         const GrTextureDesc& desc) {
-    GrTexture* texture = gpu->createTexture(desc, NULL, 0);
-    if (texture) {
-        GrResourceKey key = GrTexturePriv::ComputeScratchKey(texture->desc());
-        // Adding a resource could put us overbudget. Try to free up the
-        // necessary space before adding it.
-        resourceCache->purgeAsNeeded(1, texture->gpuMemorySize());
-        // Make the resource exclusive so future 'find' calls don't return it
-        resourceCache->addResource(key, texture, GrResourceCache::kHide_OwnershipFlag);
+bool GrContext::createNewScratchTexture(const GrTextureDesc& desc) {
+    SkAutoTUnref<GrTexture> texture(fGpu->createTexture(desc, NULL, 0));
+    if (!texture) {
+        return false;
     }
-    return texture;
+    fResourceCache->addResource(texture->getScratchKey(), texture);
+    texture->fIsScratch = GrIORef::kYes_IsScratch;
+    return true;
 }
 
-GrTexture* GrContext::lockAndRefScratchTexture(const GrTextureDesc& inDesc, ScratchTexMatch match) {
+GrTexture* GrContext::lockAndRefScratchTexture(const GrTextureDesc& inDesc, ScratchTexMatch match,
+                                               bool calledDuringFlush) {
 
+    // kNoStencil has no meaning if kRT isn't set.
     SkASSERT((inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
              !(inDesc.fFlags & kNoStencil_GrTextureFlagBit));
 
-    // Renderable A8 targets are not universally supported (e.g., not on ANGLE)
-    SkASSERT(this->isConfigRenderable(kAlpha_8_GrPixelConfig, inDesc.fSampleCnt > 0) ||
-             !(inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
-             (inDesc.fConfig != kAlpha_8_GrPixelConfig));
+    // Make sure caller has checked for renderability if kRT is set.
+    SkASSERT(!(inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
+             this->isConfigRenderable(inDesc.fConfig, inDesc.fSampleCnt > 0));
 
-    if (!fGpu->caps()->reuseScratchTextures() &&
-        !(inDesc.fFlags & kRenderTarget_GrTextureFlagBit)) {
-        // If we're never recycling this texture we can always make it the right size
-        return create_scratch_texture(fGpu, fResourceCache, inDesc);
-    }
+    SkTCopyOnFirstWrite<GrTextureDesc> desc(inDesc);
 
-    GrTextureDesc desc = inDesc;
-
-    if (kApprox_ScratchTexMatch == match) {
-        // bin by pow2 with a reasonable min
-        static const int MIN_SIZE = 16;
-        desc.fWidth  = SkTMax(MIN_SIZE, GrNextPow2(desc.fWidth));
-        desc.fHeight = SkTMax(MIN_SIZE, GrNextPow2(desc.fHeight));
-    }
-
-    GrGpuResource* resource = NULL;
-    int origWidth = desc.fWidth;
-    int origHeight = desc.fHeight;
-
-    do {
-        GrResourceKey key = GrTexturePriv::ComputeScratchKey(desc);
-        // Ensure we have exclusive access to the texture so future 'find' calls don't return it
-        resource = fResourceCache->find(key, GrResourceCache::kHide_OwnershipFlag);
-        if (resource) {
-            resource->ref();
-            break;
-        }
-        if (kExact_ScratchTexMatch == match) {
-            break;
-        }
-        // We had a cache miss and we are in approx mode, relax the fit of the flags.
-
-        // We no longer try to reuse textures that were previously used as render targets in
-        // situations where no RT is needed; doing otherwise can confuse the video driver and
-        // cause significant performance problems in some cases.
-        if (desc.fFlags & kNoStencil_GrTextureFlagBit) {
-            desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit;
-        } else {
-            break;
+    // There is a regression here in that when reuseScratchTextures is false, the texture won't be
+    // freed when its ref and io counts reach zero. TODO: Make GrResourceCache2 free scratch
+    // resources immediately after it is the sole owner and reuseScratchTextures is false.
+    if (fGpu->caps()->reuseScratchTextures() || (desc->fFlags & kRenderTarget_GrTextureFlagBit)) {
+        GrTextureFlags origFlags = desc->fFlags;
+        if (kApprox_ScratchTexMatch == match) {
+            // bin by pow2 with a reasonable min
+            static const int MIN_SIZE = 16;
+            GrTextureDesc* wdesc = desc.writable();
+            wdesc->fWidth  = SkTMax(MIN_SIZE, GrNextPow2(desc->fWidth));
+            wdesc->fHeight = SkTMax(MIN_SIZE, GrNextPow2(desc->fHeight));
         }
 
-    } while (true);
+        do {
+            GrResourceKey key = GrTexturePriv::ComputeScratchKey(*desc);
+            GrGpuResource* resource = fResourceCache2->findAndRefScratchResource(key,
+                                                                                 calledDuringFlush);
+            if (resource) {
+                fResourceCache->makeResourceMRU(resource);
+                return static_cast<GrTexture*>(resource);
+            }
 
-    if (NULL == resource) {
-        desc.fFlags = inDesc.fFlags;
-        desc.fWidth = origWidth;
-        desc.fHeight = origHeight;
-        resource = create_scratch_texture(fGpu, fResourceCache, desc);
+            if (kExact_ScratchTexMatch == match) {
+                break;
+            }
+            // We had a cache miss and we are in approx mode, relax the fit of the flags.
+
+            // We no longer try to reuse textures that were previously used as render targets in
+            // situations where no RT is needed; doing otherwise can confuse the video driver and
+            // cause significant performance problems in some cases.
+            if (desc->fFlags & kNoStencil_GrTextureFlagBit) {
+                desc.writable()->fFlags = desc->fFlags & ~kNoStencil_GrTextureFlagBit;
+            } else {
+                break;
+            }
+
+        } while (true);
+
+        desc.writable()->fFlags = origFlags;
     }
 
+    if (!this->createNewScratchTexture(*desc)) {
+        return NULL;
+    }
+
+    // If we got here then we didn't find a cached texture, but we just added one.
+    GrResourceKey key = GrTexturePriv::ComputeScratchKey(*desc);
+    GrGpuResource* resource = fResourceCache2->findAndRefScratchResource(key, calledDuringFlush);
+    SkASSERT(resource);
     return static_cast<GrTexture*>(resource);
 }
 
-void GrContext::addExistingTextureToCache(GrTexture* texture) {
-
-    if (NULL == texture) {
-        return;
-    }
-
-    // This texture should already have a cache entry since it was once
-    // attached
-    SkASSERT(texture->getCacheEntry());
-
-    // Conceptually, the cache entry is going to assume responsibility
-    // for the creation ref. Assert refcnt == 1.
-    // Except that this also gets called when the texture is prematurely
-    // abandoned. In that case the ref count may be > 1.
-    // SkASSERT(texture->unique());
-
-    if (fGpu->caps()->reuseScratchTextures() || texture->asRenderTarget()) {
-        // Since this texture came from an AutoScratchTexture it should
-        // still be in the exclusive pile. Recycle it.
-        fResourceCache->makeNonExclusive(texture->getCacheEntry());
-        this->purgeCache();
-    } else {
-        // When we aren't reusing textures we know this scratch texture
-        // will never be reused and would be just wasting time in the cache
-        fResourceCache->makeNonExclusive(texture->getCacheEntry());
-        fResourceCache->deleteResource(texture->getCacheEntry());
-    }
-}
-
-void GrContext::unlockScratchTexture(GrTexture* texture) {
-    if (texture->wasDestroyed()) {
-        if (texture->getCacheEntry()->key().isScratch()) {
-            // This texture was detached from the cache but the cache still had a ref to it but
-            // not a pointer to it. This will unref the texture and delete its resource cache
-            // entry.
-            delete texture->getCacheEntry();
-        }
-        return;
-    }
-
-    ASSERT_OWNED_RESOURCE(texture);
-    SkASSERT(texture->getCacheEntry());
-
-    // If this is a scratch texture we detached it from the cache
-    // while it was locked (to avoid two callers simultaneously getting
-    // the same texture).
-    if (texture->getCacheEntry()->key().isScratch()) {
-        if (fGpu->caps()->reuseScratchTextures() || texture->asRenderTarget()) {
-            fResourceCache->makeNonExclusive(texture->getCacheEntry());
-            this->purgeCache();
-        } else if (texture->unique()) {
-            // Only the cache now knows about this texture. Since we're never
-            // reusing scratch textures (in this code path) it would just be
-            // wasting time sitting in the cache.
-            fResourceCache->makeNonExclusive(texture->getCacheEntry());
-            fResourceCache->deleteResource(texture->getCacheEntry());
-        } else {
-            // In this case (there is still a non-cache ref) but we don't really
-            // want to readd it to the cache (since it will never be reused).
-            // Instead, give up the cache's ref and leave the decision up to
-            // addExistingTextureToCache once its ref count reaches 0. For
-            // this to work we need to leave it in the exclusive list.
-            texture->texturePriv().setFlag((GrTextureFlags) GrTexture::kReturnToCache_FlagBit);
-            // Give up the cache's ref to the texture
-            texture->unref();
-        }
-    }
-}
-
-void GrContext::purgeCache() {
-    if (fResourceCache) {
-        fResourceCache->purgeAsNeeded();
-    }
-}
-
 bool GrContext::OverbudgetCB(void* data) {
     SkASSERT(data);
 
diff --git a/src/gpu/GrLayerCache.cpp b/src/gpu/GrLayerCache.cpp
index f90ab55..0481d14 100644
--- a/src/gpu/GrLayerCache.cpp
+++ b/src/gpu/GrLayerCache.cpp
@@ -245,7 +245,6 @@
 #endif
 
     } else {
-        fContext->unlockScratchTexture(layer->texture());
         layer->setTexture(NULL, GrIRect16::MakeEmpty());
     }
 
diff --git a/src/gpu/GrResourceCache.cpp b/src/gpu/GrResourceCache.cpp
index e9be509..12e959f 100644
--- a/src/gpu/GrResourceCache.cpp
+++ b/src/gpu/GrResourceCache.cpp
@@ -82,14 +82,10 @@
 #if GR_CACHE_STATS
     fHighWaterEntryCount          = 0;
     fHighWaterEntryBytes          = 0;
-    fHighWaterClientDetachedCount = 0;
-    fHighWaterClientDetachedBytes = 0;
 #endif
 
     fEntryCount                   = 0;
     fEntryBytes                   = 0;
-    fClientDetachedCount          = 0;
-    fClientDetachedBytes          = 0;
 
     fPurging                      = false;
 
@@ -136,55 +132,26 @@
     }
 }
 
-void GrResourceCache::internalDetach(GrResourceCacheEntry* entry,
-                                     BudgetBehaviors behavior) {
+void GrResourceCache::internalDetach(GrResourceCacheEntry* entry) {
     fList.remove(entry);
-
-    // update our stats
-    if (kIgnore_BudgetBehavior == behavior) {
-        fClientDetachedCount += 1;
-        fClientDetachedBytes += entry->fCachedSize;
-
-#if GR_CACHE_STATS
-        if (fHighWaterClientDetachedCount < fClientDetachedCount) {
-            fHighWaterClientDetachedCount = fClientDetachedCount;
-        }
-        if (fHighWaterClientDetachedBytes < fClientDetachedBytes) {
-            fHighWaterClientDetachedBytes = fClientDetachedBytes;
-        }
-#endif
-
-    } else {
-        SkASSERT(kAccountFor_BudgetBehavior == behavior);
-
-        fEntryCount -= 1;
-        fEntryBytes -= entry->fCachedSize;
-    }
+    fEntryCount -= 1;
+    fEntryBytes -= entry->fCachedSize;
 }
 
-void GrResourceCache::attachToHead(GrResourceCacheEntry* entry,
-                                   BudgetBehaviors behavior) {
+void GrResourceCache::attachToHead(GrResourceCacheEntry* entry) {
     fList.addToHead(entry);
 
-    // update our stats
-    if (kIgnore_BudgetBehavior == behavior) {
-        fClientDetachedCount -= 1;
-        fClientDetachedBytes -= entry->fCachedSize;
-    } else {
-        SkASSERT(kAccountFor_BudgetBehavior == behavior);
-
-        fEntryCount += 1;
-        fEntryBytes += entry->fCachedSize;
+    fEntryCount += 1;
+    fEntryBytes += entry->fCachedSize;
 
 #if GR_CACHE_STATS
-        if (fHighWaterEntryCount < fEntryCount) {
-            fHighWaterEntryCount = fEntryCount;
-        }
-        if (fHighWaterEntryBytes < fEntryBytes) {
-            fHighWaterEntryBytes = fEntryBytes;
-        }
-#endif
+    if (fHighWaterEntryCount < fEntryCount) {
+        fHighWaterEntryCount = fEntryCount;
     }
+    if (fHighWaterEntryBytes < fEntryBytes) {
+        fHighWaterEntryBytes = fEntryBytes;
+    }
+#endif
 }
 
 // This functor just searches for an entry with only a single ref (from
@@ -193,41 +160,40 @@
 class GrTFindUnreffedFunctor {
 public:
     bool operator()(const GrResourceCacheEntry* entry) const {
-        return entry->resource()->unique();
+        return entry->resource()->isPurgable();
     }
 };
 
-GrGpuResource* GrResourceCache::find(const GrResourceKey& key, uint32_t ownershipFlags) {
+
+void GrResourceCache::makeResourceMRU(GrGpuResource* resource) {
+    GrResourceCacheEntry* entry = resource->getCacheEntry();
+    if (entry) {
+        this->internalDetach(entry);
+        this->attachToHead(entry);
+    }
+}
+
+GrGpuResource* GrResourceCache::find(const GrResourceKey& key) {
     GrAutoResourceCacheValidate atcv(this);
 
     GrResourceCacheEntry* entry = NULL;
 
-    if (ownershipFlags & kNoOtherOwners_OwnershipFlag) {
-        GrTFindUnreffedFunctor functor;
-
-        entry = fCache.find<GrTFindUnreffedFunctor>(key, functor);
-    } else {
-        entry = fCache.find(key);
-    }
+    entry = fCache.find(key);
 
     if (NULL == entry) {
         return NULL;
     }
 
-    if (ownershipFlags & kHide_OwnershipFlag) {
-        this->makeExclusive(entry);
-    } else {
-        // Make this resource MRU
-        this->internalDetach(entry);
-        this->attachToHead(entry);
-    }
+    // Make this resource MRU
+    this->internalDetach(entry);
+    this->attachToHead(entry);
 
+    // GrResourceCache2 is responsible for scratch resources.
+    SkASSERT(GrIORef::kNo_IsScratch == entry->resource()->fIsScratch);
     return entry->fResource;
 }
 
-void GrResourceCache::addResource(const GrResourceKey& key,
-                                  GrGpuResource* resource,
-                                  uint32_t ownershipFlags) {
+void GrResourceCache::addResource(const GrResourceKey& key, GrGpuResource* resource) {
     SkASSERT(NULL == resource->getCacheEntry());
     // we don't expect to create new resources during a purge. In theory
     // this could cause purgeAsNeeded() into an infinite loop (e.g.
@@ -241,77 +207,15 @@
 
     this->attachToHead(entry);
     fCache.insert(key, entry);
-
-    if (ownershipFlags & kHide_OwnershipFlag) {
-        this->makeExclusive(entry);
-    }
-
-}
-
-void GrResourceCache::makeExclusive(GrResourceCacheEntry* entry) {
-    GrAutoResourceCacheValidate atcv(this);
-
-    SkASSERT(!entry->fIsExclusive);
-    entry->fIsExclusive = true;
-
-    // When scratch textures are detached (to hide them from future finds) they
-    // still count against the resource budget
-    this->internalDetach(entry, kIgnore_BudgetBehavior);
-    fCache.remove(entry->key(), entry);
-
-#ifdef SK_DEBUG
-    fExclusiveList.addToHead(entry);
-#endif
-}
-
-void GrResourceCache::removeInvalidResource(GrResourceCacheEntry* entry) {
-    // If the resource went invalid while it was detached then purge it
-    // This can happen when a 3D context was lost,
-    // the client called GrContext::abandonContext() to notify Gr,
-    // and then later an SkGpuDevice's destructor releases its backing
-    // texture (which was invalidated at contextDestroyed time).
-    // TODO: Safely delete the GrResourceCacheEntry as well.
-    fClientDetachedCount -= 1;
-    fEntryCount -= 1;
-    fClientDetachedBytes -= entry->fCachedSize;
-    fEntryBytes -= entry->fCachedSize;
-    entry->fCachedSize = 0;
-}
-
-void GrResourceCache::makeNonExclusive(GrResourceCacheEntry* entry) {
-    GrAutoResourceCacheValidate atcv(this);
-
-#ifdef SK_DEBUG
-    fExclusiveList.remove(entry);
-#endif
-
-    if (!entry->resource()->wasDestroyed()) {
-        // Since scratch textures still count against the cache budget even
-        // when they have been removed from the cache, re-adding them doesn't
-        // alter the budget information.
-        attachToHead(entry, kIgnore_BudgetBehavior);
-        fCache.insert(entry->key(), entry);
-
-        SkASSERT(entry->fIsExclusive);
-        entry->fIsExclusive = false;
-    } else {
-        this->removeInvalidResource(entry);
-    }
 }
 
 void GrResourceCache::didIncreaseResourceSize(const GrResourceCacheEntry* entry, size_t amountInc) {
     fEntryBytes += amountInc;
-    if (entry->fIsExclusive) {
-        fClientDetachedBytes += amountInc;
-    }
     this->purgeAsNeeded();
 }
 
 void GrResourceCache::didDecreaseResourceSize(const GrResourceCacheEntry* entry, size_t amountDec) {
     fEntryBytes -= amountDec;
-    if (entry->fIsExclusive) {
-        fClientDetachedBytes -= amountDec;
-    }
 #ifdef SK_DEBUG
     this->validate();
 #endif
@@ -359,13 +263,6 @@
     fInvalidationInbox.poll(&invalidated);
 
     for (int i = 0; i < invalidated.count(); i++) {
-        // We're somewhat missing an opportunity here.  We could use the
-        // default find functor that gives us back resources whether we own
-        // them exclusively or not, and when they're not exclusively owned mark
-        // them for purging later when they do become exclusively owned.
-        //
-        // This is complicated and confusing.  May try this in the future.  For
-        // now, these resources are just LRU'd as if we never got the message.
         while (GrResourceCacheEntry* entry = fCache.find(invalidated[i].key, GrTFindUnreffedFunctor())) {
             this->deleteResource(entry);
         }
@@ -373,7 +270,7 @@
 }
 
 void GrResourceCache::deleteResource(GrResourceCacheEntry* entry) {
-    SkASSERT(entry->fResource->unique());
+    SkASSERT(entry->fResource->isPurgable());
 
     // remove from our cache
     fCache.remove(entry->key(), entry);
@@ -412,7 +309,7 @@
             }
 
             GrResourceCacheEntry* prev = iter.prev();
-            if (entry->fResource->unique()) {
+            if (entry->fResource->isPurgable()) {
                 changed = true;
                 this->deleteResource(entry);
             }
@@ -435,14 +332,7 @@
     this->purgeAsNeeded();
 
 #ifdef SK_DEBUG
-    SkASSERT(fExclusiveList.countEntries() == fClientDetachedCount);
-    SkASSERT(countBytes(fExclusiveList) == fClientDetachedBytes);
     if (!fCache.count()) {
-        // Items may have been detached from the cache (such as the backing
-        // texture for an SkGpuDevice). The above purge would not have removed
-        // them.
-        SkASSERT(fEntryCount == fClientDetachedCount);
-        SkASSERT(fEntryBytes == fClientDetachedBytes);
         SkASSERT(fList.isEmpty());
     }
 #endif
@@ -474,25 +364,14 @@
 
 void GrResourceCache::validate() const {
     fList.validate();
-    fExclusiveList.validate();
     SkASSERT(both_zero_or_nonzero(fEntryCount, fEntryBytes));
-    SkASSERT(both_zero_or_nonzero(fClientDetachedCount, fClientDetachedBytes));
-    SkASSERT(fClientDetachedBytes <= fEntryBytes);
-    SkASSERT(fClientDetachedCount <= fEntryCount);
-    SkASSERT((fEntryCount - fClientDetachedCount) == fCache.count());
+    SkASSERT(fEntryCount == fCache.count());
 
     EntryList::Iter iter;
 
-    // check that the exclusively held entries are okay
-    const GrResourceCacheEntry* entry = iter.init(const_cast<EntryList&>(fExclusiveList),
-                                                  EntryList::Iter::kHead_IterStart);
-
-    for ( ; entry; entry = iter.next()) {
-        entry->validate();
-    }
-
     // check that the shareable entries are okay
-    entry = iter.init(const_cast<EntryList&>(fList), EntryList::Iter::kHead_IterStart);
+    const GrResourceCacheEntry* entry = iter.init(const_cast<EntryList&>(fList),
+                                                  EntryList::Iter::kHead_IterStart);
 
     int count = 0;
     for ( ; entry; entry = iter.next()) {
@@ -500,17 +379,11 @@
         SkASSERT(fCache.find(entry->key()));
         count += 1;
     }
-    SkASSERT(count == fEntryCount - fClientDetachedCount);
+    SkASSERT(count == fEntryCount);
 
-    size_t bytes = countBytes(fList);
-    SkASSERT(bytes == fEntryBytes  - fClientDetachedBytes);
-
-    bytes = countBytes(fExclusiveList);
-    SkASSERT(bytes == fClientDetachedBytes);
-
-    SkASSERT(fList.countEntries() == fEntryCount - fClientDetachedCount);
-
-    SkASSERT(fExclusiveList.countEntries() == fClientDetachedCount);
+    size_t bytes = this->countBytes(fList);
+    SkASSERT(bytes == fEntryBytes);
+    SkASSERT(fList.countEntries() == fEntryCount);
 }
 #endif // SK_DEBUG
 
@@ -534,10 +407,6 @@
                 fEntryCount, locked, fHighWaterEntryCount);
     SkDebugf("\t\tEntry Bytes: current %d high %d\n",
                 fEntryBytes, fHighWaterEntryBytes);
-    SkDebugf("\t\tDetached Entry Count: current %d high %d\n",
-                fClientDetachedCount, fHighWaterClientDetachedCount);
-    SkDebugf("\t\tDetached Bytes: current %d high %d\n",
-                fClientDetachedBytes, fHighWaterClientDetachedBytes);
 }
 
 #endif
diff --git a/src/gpu/GrResourceCache.h b/src/gpu/GrResourceCache.h
index 8333780..880a0a9 100644
--- a/src/gpu/GrResourceCache.h
+++ b/src/gpu/GrResourceCache.h
@@ -141,26 +141,13 @@
      */
     int getCachedResourceCount() const { return fEntryCount; }
 
-    // For a found or added resource to be completely exclusive to the caller
-    // both the kNoOtherOwners and kHide flags need to be specified
-    enum OwnershipFlags {
-        kNoOtherOwners_OwnershipFlag = 0x1, // found/added resource has no other owners
-        kHide_OwnershipFlag = 0x2  // found/added resource is hidden from future 'find's
-    };
-
     /**
      *  Search for an entry with the same Key. If found, return it.
      *  If not found, return null.
-     *  If ownershipFlags includes kNoOtherOwners and a resource is returned
-     *  then that resource has no other refs to it.
-     *  If ownershipFlags includes kHide and a resource is returned then that
-     *  resource will not be returned from future 'find' calls until it is
-     *  'freed' (and recycled) or makeNonExclusive is called.
-     *  For a resource to be completely exclusive to a caller both kNoOtherOwners
-     *  and kHide must be specified.
      */
-    GrGpuResource* find(const GrResourceKey& key,
-                        uint32_t ownershipFlags = 0);
+    GrGpuResource* find(const GrResourceKey& key);
+
+    void makeResourceMRU(GrGpuResource*);
 
     /**
      *  Add the new resource to the cache (by creating a new cache entry based
@@ -168,14 +155,8 @@
      *
      *  Ownership of the resource is transferred to the resource cache,
      *  which will unref() it when it is purged or deleted.
-     *
-     *  If ownershipFlags includes kHide, subsequent calls to 'find' will not
-     *  return 'resource' until it is 'freed' (and recycled) or makeNonExclusive
-     *  is called.
      */
-    void addResource(const GrResourceKey& key,
-                     GrGpuResource* resource,
-                     uint32_t ownershipFlags = 0);
+    void addResource(const GrResourceKey& key, GrGpuResource* resource);
 
     /**
      * Determines if the cache contains an entry matching a key. If a matching
@@ -184,20 +165,6 @@
     bool hasKey(const GrResourceKey& key) const { return SkToBool(fCache.find(key)); }
 
     /**
-     * Hide 'entry' so that future searches will not find it. Such
-     * hidden entries will not be purged. The entry still counts against
-     * the cache's budget and should be made non-exclusive when exclusive access
-     * is no longer needed.
-     */
-    void makeExclusive(GrResourceCacheEntry* entry);
-
-    /**
-     * Restore 'entry' so that it can be found by future searches. 'entry'
-     * will also be purgeable (provided its lock count is now 0.)
-     */
-    void makeNonExclusive(GrResourceCacheEntry* entry);
-
-    /**
      * Notify the cache that the size of a resource has changed.
      */
     void didIncreaseResourceSize(const GrResourceCacheEntry*, size_t amountInc);
@@ -237,15 +204,8 @@
 #endif
 
 private:
-    enum BudgetBehaviors {
-        kAccountFor_BudgetBehavior,
-        kIgnore_BudgetBehavior
-    };
-
-    void internalDetach(GrResourceCacheEntry*, BudgetBehaviors behavior = kAccountFor_BudgetBehavior);
-    void attachToHead(GrResourceCacheEntry*, BudgetBehaviors behavior = kAccountFor_BudgetBehavior);
-
-    void removeInvalidResource(GrResourceCacheEntry* entry);
+    void internalDetach(GrResourceCacheEntry*);
+    void attachToHead(GrResourceCacheEntry*);
 
     SkTMultiMap<GrResourceCacheEntry, GrResourceKey> fCache;
 
@@ -253,11 +213,6 @@
     typedef SkTInternalLList<GrResourceCacheEntry> EntryList;
     EntryList      fList;
 
-#ifdef SK_DEBUG
-    // These objects cannot be returned by a search
-    EntryList      fExclusiveList;
-#endif
-
     // our budget, used in purgeAsNeeded()
     int            fMaxCount;
     size_t         fMaxBytes;
@@ -266,14 +221,10 @@
 #if GR_CACHE_STATS
     int            fHighWaterEntryCount;
     size_t         fHighWaterEntryBytes;
-    int            fHighWaterClientDetachedCount;
-    size_t         fHighWaterClientDetachedBytes;
 #endif
 
     int            fEntryCount;
     size_t         fEntryBytes;
-    int            fClientDetachedCount;
-    size_t         fClientDetachedBytes;
 
     // prevents recursive purging
     bool           fPurging;
diff --git a/src/gpu/GrResourceCache2.cpp b/src/gpu/GrResourceCache2.cpp
index e0ba26a..1ed6669 100644
--- a/src/gpu/GrResourceCache2.cpp
+++ b/src/gpu/GrResourceCache2.cpp
@@ -8,7 +8,8 @@
 
 
 #include "GrResourceCache2.h"
-#include "GrGpuResource.h"
+#include "GrGpuResource.h"  
+#include "SkRefCnt.h"
 
 GrResourceCache2::~GrResourceCache2() {
     this->releaseAll();
@@ -55,3 +56,31 @@
     SkASSERT(!fScratchMap.count());
     SkASSERT(!fCount);
 }
+
+class GrResourceCache2::AvailableForScratchUse {
+public:
+    AvailableForScratchUse(bool calledDuringFlush) : fFlushing(calledDuringFlush) { }
+
+    bool operator()(const GrGpuResource* resource) const {
+        if (fFlushing) {
+            // If this request is coming during draw buffer flush then no refs are allowed
+            // either by drawing code or for pending io operations.
+            // This will be removed when flush no longer creates resources.
+            return resource->reffedOnlyByCache() && !resource->internalHasPendingIO() &&
+                   GrIORef::kYes_IsScratch == resource->fIsScratch;
+        } else {
+            // Because duties are currently shared between GrResourceCache and GrResourceCache2, the
+            // current interpretation of this rule is that only GrResourceCache has a ref but that
+            // it has been marked as a scratch resource.
+            return resource->reffedOnlyByCache() && GrIORef::kYes_IsScratch == resource->fIsScratch;
+        }
+    }
+private:
+    bool fFlushing;
+};
+
+GrGpuResource* GrResourceCache2::findAndRefScratchResource(const GrResourceKey& scratchKey,
+                                                           bool calledDuringFlush) {
+    SkASSERT(scratchKey.isScratch());
+    return SkSafeRef(fScratchMap.find(scratchKey, AvailableForScratchUse(calledDuringFlush)));
+}
diff --git a/src/gpu/GrResourceCache2.h b/src/gpu/GrResourceCache2.h
index e05efd7..d48ca0b 100644
--- a/src/gpu/GrResourceCache2.h
+++ b/src/gpu/GrResourceCache2.h
@@ -24,14 +24,17 @@
     GrResourceCache2() : fCount(0) {};
     ~GrResourceCache2();
 
-    void insertResource(GrGpuResource* resource);
+    void insertResource(GrGpuResource*);
 
-    void removeResource(GrGpuResource* resource);
+    void removeResource(GrGpuResource*);
 
     void abandonAll();
 
     void releaseAll();
 
+    GrGpuResource* findAndRefScratchResource(const GrResourceKey& scratchKey,
+                                             bool calledDuringFlush);
+
 private:
 #ifdef SK_DEBUG
     bool isInCache(const GrGpuResource* r) const {
@@ -39,8 +42,8 @@
     }
 #endif
 
+    class AvailableForScratchUse;
 
-    void removeScratch(const GrGpuResource* resource);
     struct ScratchMapTraits {
         static const GrResourceKey& GetKey(const GrGpuResource& r) {
             return r.getScratchKey();
diff --git a/src/gpu/GrTexture.cpp b/src/gpu/GrTexture.cpp
index e1188f9..36ed823 100644
--- a/src/gpu/GrTexture.cpp
+++ b/src/gpu/GrTexture.cpp
@@ -21,27 +21,6 @@
     }
 }
 
-/**
- * This method allows us to interrupt the normal deletion process and place
- * textures back in the texture cache when their ref count goes to zero.
- */
-void GrTexture::internal_dispose() const {
-    if (this->texturePriv().isSetFlag((GrTextureFlags) GrTexture::kReturnToCache_FlagBit) &&
-        this->INHERITED::getContext()) {
-        GrTexture* nonConstThis = const_cast<GrTexture *>(this);
-        this->ref(); // restore ref count to initial setting
-
-        nonConstThis->texturePriv().resetFlag((GrTextureFlags) kReturnToCache_FlagBit);
-        nonConstThis->INHERITED::getContext()->addExistingTextureToCache(nonConstThis);
-
-        // Note: "this" texture might be freed inside addExistingTextureToCache
-        // if it is purged.
-        return;
-    }
-
-    this->INHERITED::internal_dispose();
-}
-
 void GrTexture::dirtyMipMaps(bool mipMapsDirty) {
     if (mipMapsDirty) {
         if (kValid_MipMapsStatus == fMipMapsStatus) {
@@ -102,27 +81,12 @@
                                 pixelOpsFlags);
 }
 
-void GrTexture::abandonReleaseCommon() {
-    // In debug builds the resource cache tracks removed/exclusive textures and has an unref'ed ptr.
-    // After abandon() or release() the resource cache will be unreachable (getContext() == NULL).
-    // So we readd the texture to the cache here so that it is removed from the exclusive list and
-    // there is no longer an unref'ed ptr to the texture in the cache.
-    if (this->texturePriv().isSetFlag((GrTextureFlags)kReturnToCache_FlagBit)) {
-        SkASSERT(!this->wasDestroyed());
-        this->ref();  // restores the ref the resource cache gave up when it marked this exclusive.
-        this->texturePriv().resetFlag((GrTextureFlags) kReturnToCache_FlagBit);
-        this->getContext()->addExistingTextureToCache(this);
-    }
-}
-
 void GrTexture::onRelease() {
-    this->abandonReleaseCommon();
     SkASSERT(!this->texturePriv().isSetFlag((GrTextureFlags) kReturnToCache_FlagBit));
     INHERITED::onRelease();
 }
 
 void GrTexture::onAbandon() {
-    this->abandonReleaseCommon();
     if (fRenderTarget.get()) {
         fRenderTarget->abandon();
     }
diff --git a/src/gpu/SkGpuDevice.cpp b/src/gpu/SkGpuDevice.cpp
index 6a9c31d..22968a7 100644
--- a/src/gpu/SkGpuDevice.cpp
+++ b/src/gpu/SkGpuDevice.cpp
@@ -77,50 +77,37 @@
 
 ///////////////////////////////////////////////////////////////////////////////
 
-
-class SkGpuDevice::SkAutoCachedTexture : public ::SkNoncopyable {
+// Helper for turning a bitmap into a texture. If the bitmap is GrTexture backed this
+// just accesses the backing GrTexture. Otherwise, it creates a cached texture
+// representation and releases it in the destructor.
+class AutoBitmapTexture : public SkNoncopyable {
 public:
-    SkAutoCachedTexture()
-        : fDevice(NULL)
-        , fTexture(NULL) {
-    }
+    AutoBitmapTexture() {}
 
-    SkAutoCachedTexture(SkGpuDevice* device,
-                        const SkBitmap& bitmap,
-                        const GrTextureParams* params,
-                        GrTexture** texture)
-        : fDevice(NULL)
-        , fTexture(NULL) {
+    AutoBitmapTexture(GrContext* context,
+                      const SkBitmap& bitmap,
+                      const GrTextureParams* params,
+                      GrTexture** texture) {
         SkASSERT(texture);
-        *texture = this->set(device, bitmap, params);
+        *texture = this->set(context, bitmap, params);
     }
 
-    ~SkAutoCachedTexture() {
-        if (fTexture) {
-            GrUnlockAndUnrefCachedBitmapTexture(fTexture);
-        }
-    }
-
-    GrTexture* set(SkGpuDevice* device,
+    GrTexture* set(GrContext* context,
                    const SkBitmap& bitmap,
                    const GrTextureParams* params) {
-        if (fTexture) {
-            GrUnlockAndUnrefCachedBitmapTexture(fTexture);
-            fTexture = NULL;
+        // Either get the texture directly from the bitmap, or else use the cache and
+        // remember to unref it.
+        if (GrTexture* bmpTexture = bitmap.getTexture()) {
+            fTexture.reset(NULL);
+            return bmpTexture;
+        } else {
+            fTexture.reset(GrRefCachedBitmapTexture(context, bitmap, params));
+            return fTexture.get();
         }
-        fDevice = device;
-        GrTexture* result = (GrTexture*)bitmap.getTexture();
-        if (NULL == result) {
-            // Cannot return the native texture so look it up in our cache
-            fTexture = GrLockAndRefCachedBitmapTexture(device->context(), bitmap, params);
-            result = fTexture;
-        }
-        return result;
     }
 
 private:
-    SkGpuDevice* fDevice;
-    GrTexture*   fTexture;
+    SkAutoTUnref<GrTexture> fTexture;
 };
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -153,8 +140,7 @@
     fRenderTarget = SkRef(surface->asRenderTarget());
 
     SkImageInfo info = surface->surfacePriv().info();
-    SkPixelRef* pr = SkNEW_ARGS(SkGrPixelRef,
-                                (info, surface, SkToBool(flags & kCached_Flag)));
+    SkPixelRef* pr = SkNEW_ARGS(SkGrPixelRef, (info, surface));
     fLegacyBitmap.setInfo(info);
     fLegacyBitmap.setPixelRef(pr)->unref();
 
@@ -1299,7 +1285,7 @@
              bitmap.height() <= fContext->getMaxTextureSize());
 
     GrTexture* texture;
-    SkAutoCachedTexture act(this, bitmap, &params, &texture);
+    AutoBitmapTexture abt(fContext, bitmap, &params, &texture);
     if (NULL == texture) {
         return;
     }
@@ -1394,7 +1380,7 @@
 
     GrTexture* texture;
     // draw sprite uses the default texture params
-    SkAutoCachedTexture act(this, bitmap, NULL, &texture);
+    AutoBitmapTexture abt(fContext, bitmap, NULL, &texture);
 
     SkImageFilter* filter = paint.getImageFilter();
     // This bitmap will own the filtered result as a texture.
@@ -1571,7 +1557,7 @@
     GrTexture* texture;
     // We assume here that the filter will not attempt to tile the src. Otherwise, this cache lookup
     // must be pushed upstack.
-    SkAutoCachedTexture act(this, src, NULL, &texture);
+    AutoBitmapTexture abt(fContext, src, NULL, &texture);
 
     return filter_texture(this, fContext, texture, filter, src.width(), src.height(), ctx,
                           result, offset);
@@ -1802,7 +1788,6 @@
 #if CACHE_COMPATIBLE_DEVICE_TEXTURES
     // layers are never draw in repeat modes, so we can request an approx
     // match and ignore any padding.
-    flags |= kCached_Flag;
     const GrContext::ScratchTexMatch match = (kSaveLayer_Usage == usage) ?
                                                 GrContext::kApprox_ScratchTexMatch :
                                                 GrContext::kExact_ScratchTexMatch;
diff --git a/src/gpu/SkGpuDevice.h b/src/gpu/SkGpuDevice.h
index 285a737..fe03e32 100644
--- a/src/gpu/SkGpuDevice.h
+++ b/src/gpu/SkGpuDevice.h
@@ -34,8 +34,7 @@
 public:
     enum Flags {
         kNeedClear_Flag = 1 << 0,  //!< Surface requires an initial clear
-        kCached_Flag    = 1 << 1,  //!< Surface is cached and needs to be unlocked when released
-        kDFFonts_Flag   = 1 << 2,  //!< Surface should render fonts using signed distance fields
+        kDFFonts_Flag   = 1 << 1,  //!< Surface should render fonts using signed distance fields
     };
 
     /**
@@ -117,9 +116,6 @@
                              const SkImageFilter::Context&,
                              SkBitmap*, SkIPoint*) SK_OVERRIDE;
 
-    class SkAutoCachedTexture; // used internally
-
-
 protected:
     virtual bool onReadPixels(const SkImageInfo&, void*, size_t, int, int) SK_OVERRIDE;
     virtual bool onWritePixels(const SkImageInfo&, const void*, size_t, int, int) SK_OVERRIDE;
diff --git a/src/gpu/SkGr.cpp b/src/gpu/SkGr.cpp
index e81abdb..d943545 100644
--- a/src/gpu/SkGr.cpp
+++ b/src/gpu/SkGr.cpp
@@ -369,9 +369,9 @@
     return ctx->isTextureInCache(desc, cacheID, params);
 }
 
-GrTexture* GrLockAndRefCachedBitmapTexture(GrContext* ctx,
-                                           const SkBitmap& bitmap,
-                                           const GrTextureParams* params) {
+GrTexture* GrRefCachedBitmapTexture(GrContext* ctx,
+                                    const SkBitmap& bitmap,
+                                    const GrTextureParams* params) {
     GrTexture* result = NULL;
 
     bool cache = !bitmap.isVolatile();
@@ -397,13 +397,6 @@
     return result;
 }
 
-void GrUnlockAndUnrefCachedBitmapTexture(GrTexture* texture) {
-    SkASSERT(texture->getContext());
-
-    texture->getContext()->unlockScratchTexture(texture);
-    texture->unref();
-}
-
 ///////////////////////////////////////////////////////////////////////////////
 
 // alphatype is ignore for now, but if GrPixelConfig is expanded to encompass
diff --git a/src/gpu/SkGrPixelRef.cpp b/src/gpu/SkGrPixelRef.cpp
index 489a418..448f2d3 100644
--- a/src/gpu/SkGrPixelRef.cpp
+++ b/src/gpu/SkGrPixelRef.cpp
@@ -99,8 +99,7 @@
 
 ///////////////////////////////////////////////////////////////////////////////
 
-SkGrPixelRef::SkGrPixelRef(const SkImageInfo& info, GrSurface* surface,
-                           bool transferCacheLock) : INHERITED(info) {
+SkGrPixelRef::SkGrPixelRef(const SkImageInfo& info, GrSurface* surface) : INHERITED(info) {
     // For surfaces that are both textures and render targets, the texture owns the
     // render target but not vice versa. So we ref the texture to keep both alive for
     // the lifetime of this pixel ref.
@@ -108,7 +107,6 @@
     if (NULL == fSurface) {
         fSurface = SkSafeRef(surface);
     }
-    fUnlock = transferCacheLock;
 
     if (fSurface) {
         SkASSERT(info.width() <= fSurface->width());
@@ -117,13 +115,6 @@
 }
 
 SkGrPixelRef::~SkGrPixelRef() {
-    if (fUnlock) {
-        GrContext* context = fSurface->getContext();
-        GrTexture* texture = fSurface->asTexture();
-        if (context && texture) {
-            context->unlockScratchTexture(texture);
-        }
-    }
     SkSafeUnref(fSurface);
 }
 
diff --git a/src/gpu/effects/GrTextureStripAtlas.cpp b/src/gpu/effects/GrTextureStripAtlas.cpp
index 91df897..9755ccd 100644
--- a/src/gpu/effects/GrTextureStripAtlas.cpp
+++ b/src/gpu/effects/GrTextureStripAtlas.cpp
@@ -216,7 +216,6 @@
     SkASSERT(fTexture && 0 == fLockedRows);
     fTexture->unref();
     fTexture = NULL;
-    fDesc.fContext->purgeCache();
 }
 
 void GrTextureStripAtlas::initLRU() {