Implement a benchmark for GrResourceCache

Adds "grresourcecache_add" and "grresourcecache_find" bench tests to test
GrResourceCache::add and GrResourceCache::find. The tests work only
with GPU backends, since GrResourceCache needs an GrGpu.

Modifies bench tests to override SkBenchmark::isSuitableFor(Backend)
function that specifies what kind of backend the test is inteded
for. This replaces the previous "fIsRendering" flag that would
indicate test that did no rendering.

Adds SkCanvas::getGrContext() call to get the GrContext that the
canvas ends up drawing to. The member function solves a common
use-case that is also used in the benchmark added here.

R=mtklein@google.com, bsalomon@google.com

Author: kkinnunen@nvidia.com

Review URL: https://codereview.chromium.org/73643005

git-svn-id: http://skia.googlecode.com/svn/trunk@12334 2bbb7eff-a529-9590-31e7-b0007b416f81
diff --git a/bench/ChecksumBench.cpp b/bench/ChecksumBench.cpp
index 3e7a739..d57ede3 100644
--- a/bench/ChecksumBench.cpp
+++ b/bench/ChecksumBench.cpp
@@ -33,7 +33,10 @@
         for (int i = 0; i < U32COUNT; ++i) {
             fData[i] = rand.nextU();
         }
-        fIsRendering = false;
+    }
+
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
 
 protected:
diff --git a/bench/DecodeBench.cpp b/bench/DecodeBench.cpp
index 4397eef..cbcc4a6 100644
--- a/bench/DecodeBench.cpp
+++ b/bench/DecodeBench.cpp
@@ -29,7 +29,10 @@
             fname++; // skip the slash
         }
         fName.printf("decode_%s_%s", gConfigName[c], fname);
-        fIsRendering = false;
+    }
+
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
 
 protected:
diff --git a/bench/GrMemoryPoolBench.cpp b/bench/GrMemoryPoolBench.cpp
index 0adf92a..b692aae 100644
--- a/bench/GrMemoryPoolBench.cpp
+++ b/bench/GrMemoryPoolBench.cpp
@@ -32,9 +32,10 @@
  */
 class GrMemoryPoolBenchStack : public SkBenchmark {
 public:
-    GrMemoryPoolBenchStack()  {
-        fIsRendering = false;
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
+
 protected:
     virtual const char* onGetName() {
         return "grmemorypool_stack";
@@ -83,9 +84,10 @@
  */
 class GrMemoryPoolBenchRandom : public SkBenchmark {
 public:
-    GrMemoryPoolBenchRandom()  {
-        fIsRendering = false;
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
+
 protected:
     virtual const char* onGetName() {
         return "grmemorypool_random";
@@ -120,9 +122,10 @@
         M = 4 * (1 << 10),
     };
 public:
-    GrMemoryPoolBenchQueue()  {
-        fIsRendering = false;
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
+
 protected:
     virtual const char* onGetName() {
         return "grmemorypool_queue";
diff --git a/bench/GrResourceCacheBench.cpp b/bench/GrResourceCacheBench.cpp
new file mode 100644
index 0000000..ea8297d
--- /dev/null
+++ b/bench/GrResourceCacheBench.cpp
@@ -0,0 +1,242 @@
+
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#if SK_SUPPORT_GPU
+
+#include "GrContext.h"
+#include "GrResource.h"
+#include "GrResourceCache.h"
+#include "GrStencilBuffer.h"
+#include "GrTexture.h"
+#include "SkBenchmark.h"
+#include "SkCanvas.h"
+
+enum {
+    CACHE_SIZE_COUNT = 2048,
+    CACHE_SIZE_BYTES = 2 * 1024 * 1024,
+};
+
+class StencilResource : public GrResource {
+public:
+    SK_DECLARE_INST_COUNT(StencilResource);
+    StencilResource(GrGpu* gpu, int id)
+        : INHERITED(gpu, false),
+          fID(id) {
+    }
+    ~StencilResource() {
+        this->release();
+    }
+
+    virtual size_t sizeInBytes() const SK_OVERRIDE {
+        return 100 + ((fID % 1 == 0) ? -5 : 6);
+    }
+
+    static GrResourceKey ComputeKey(int width, int height, int sampleCnt) {
+        return GrStencilBuffer::ComputeKey(width, height, sampleCnt);
+    }
+
+    int fID;
+
+private:
+    typedef GrResource INHERITED;
+};
+
+class TextureResource : public GrResource {
+public:
+    SK_DECLARE_INST_COUNT(TextureResource);
+    TextureResource(GrGpu* gpu, int id)
+        : INHERITED(gpu, false),
+          fID(id) {
+    }
+    ~TextureResource() {
+        this->release();
+    }
+
+    virtual size_t sizeInBytes() const SK_OVERRIDE {
+        return 100 + ((fID % 1 == 0) ? -40 : 33);
+    }
+
+    static GrResourceKey ComputeKey(const GrTextureDesc& desc) {
+        return GrTexture::ComputeScratchKey(desc);
+    }
+
+    int fID;
+
+private:
+    typedef GrResource INHERITED;
+};
+
+SK_DEFINE_INST_COUNT(StencilResource)
+SK_DEFINE_INST_COUNT(TextureResource)
+
+static void get_stencil(int i, int* w, int* h, int* s) {
+    *w = i % 1024;
+    *h = i * 2 % 1024;
+    *s = i % 1 == 0 ? 0 : 4;
+}
+
+static void get_texture_desc(int i, GrTextureDesc* desc) {
+    desc->fFlags = kRenderTarget_GrTextureFlagBit |
+        kNoStencil_GrTextureFlagBit;
+    desc->fWidth  = i % 1024;
+    desc->fHeight = i * 2 % 1024;
+    desc->fConfig = static_cast<GrPixelConfig>(i % (kLast_GrPixelConfig + 1));
+    desc->fSampleCnt = i % 1 == 0 ? 0 : 4;
+}
+
+static void populate_cache(GrResourceCache* cache, GrGpu* gpu, int resourceCount) {
+    for (int i = 0; i < resourceCount; ++i) {
+        int w, h, s;
+        get_stencil(i, &w, &h, &s);
+        GrResourceKey key = GrStencilBuffer::ComputeKey(w, h, s);
+        GrResource* resource = SkNEW_ARGS(StencilResource, (gpu, i));
+        cache->purgeAsNeeded(1, resource->sizeInBytes());
+        cache->addResource(key, resource);
+        resource->unref();
+    }
+
+    for (int i = 0; i < resourceCount; ++i) {
+        GrTextureDesc desc;
+        get_texture_desc(i, &desc);
+        GrResourceKey key =  TextureResource::ComputeKey(desc);
+        GrResource* resource = SkNEW_ARGS(TextureResource, (gpu, i));
+        cache->purgeAsNeeded(1, resource->sizeInBytes());
+        cache->addResource(key, resource);
+        resource->unref();
+    }
+}
+
+static void check_cache_contents_or_die(GrResourceCache* cache, int k) {
+    // Benchmark find calls that succeed.
+    {
+        GrTextureDesc desc;
+        get_texture_desc(k, &desc);
+        GrResourceKey key = TextureResource::ComputeKey(desc);
+        GrResource* item = cache->find(key);
+        if (NULL == item) {
+            GrCrash("cache add does not work as expected");
+            return;
+        }
+        if (static_cast<TextureResource*>(item)->fID != k) {
+            GrCrash("cache add does not work as expected");
+            return;
+        }
+    }
+    {
+        int w, h, s;
+        get_stencil(k, &w, &h, &s);
+        GrResourceKey key = StencilResource::ComputeKey(w, h, s);
+        GrResource* item = cache->find(key);
+        if (NULL == item) {
+            GrCrash("cache add does not work as expected");
+            return;
+        }
+        if (static_cast<TextureResource*>(item)->fID != k) {
+            GrCrash("cache add does not work as expected");
+            return;
+        }
+    }
+
+    // Benchmark also find calls that always fail.
+    {
+        GrTextureDesc desc;
+        get_texture_desc(k, &desc);
+        desc.fHeight |= 1;
+        GrResourceKey key = TextureResource::ComputeKey(desc);
+        GrResource* item = cache->find(key);
+        if (NULL != item) {
+            GrCrash("cache add does not work as expected");
+            return;
+        }
+    }
+    {
+        int w, h, s;
+        get_stencil(k, &w, &h, &s);
+        h |= 1;
+        GrResourceKey key = StencilResource::ComputeKey(w, h, s);
+        GrResource* item = cache->find(key);
+        if (NULL != item) {
+            GrCrash("cache add does not work as expected");
+            return;
+        }
+    }
+}
+
+class GrResourceCacheBenchAdd : public SkBenchmark {
+    enum {
+        RESOURCE_COUNT = CACHE_SIZE_COUNT / 2,
+        DUPLICATE_COUNT = CACHE_SIZE_COUNT / 4,
+    };
+
+public:
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kGPU_Backend;
+    }
+
+protected:
+    virtual const char* onGetName() SK_OVERRIDE {
+        return "grresourcecache_add";
+    }
+
+    virtual void onDraw(SkCanvas* canvas) SK_OVERRIDE {
+        GrGpu* gpu = canvas->getGrContext()->getGpu();
+
+        for (int i = 0; i < this->getLoops(); ++i) {
+            GrResourceCache cache(CACHE_SIZE_COUNT, CACHE_SIZE_BYTES);
+            populate_cache(&cache, gpu, DUPLICATE_COUNT);
+            populate_cache(&cache, gpu, RESOURCE_COUNT);
+
+            // Check that cache works.
+            for (int k = 0; k < RESOURCE_COUNT; k += 33) {
+                check_cache_contents_or_die(&cache, k);
+            }
+            cache.purgeAllUnlocked();
+        }
+    }
+
+private:
+    typedef SkBenchmark INHERITED;
+};
+
+class GrResourceCacheBenchFind : public SkBenchmark {
+    enum {
+        RESOURCE_COUNT = (CACHE_SIZE_COUNT / 2) - 100,
+        DUPLICATE_COUNT = 100
+    };
+
+public:
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kGPU_Backend;
+    }
+
+protected:
+    virtual const char* onGetName() SK_OVERRIDE {
+        return "grresourcecache_find";
+    }
+
+    virtual void onDraw(SkCanvas* canvas) SK_OVERRIDE {
+        GrGpu* gpu = canvas->getGrContext()->getGpu();
+        GrResourceCache cache(CACHE_SIZE_COUNT, CACHE_SIZE_BYTES);
+        populate_cache(&cache, gpu, DUPLICATE_COUNT);
+        populate_cache(&cache, gpu, RESOURCE_COUNT);
+
+        for (int i = 0; i < this->getLoops(); ++i) {
+            for (int k = 0; k < RESOURCE_COUNT; ++k) {
+                check_cache_contents_or_die(&cache, k);
+            }
+        }
+    }
+
+private:
+    typedef SkBenchmark INHERITED;
+};
+
+DEF_BENCH( return new GrResourceCacheBenchAdd(); )
+DEF_BENCH( return new GrResourceCacheBenchFind(); )
+
+#endif
diff --git a/bench/ImageDecodeBench.cpp b/bench/ImageDecodeBench.cpp
index 3a61163..af0569f 100644
--- a/bench/ImageDecodeBench.cpp
+++ b/bench/ImageDecodeBench.cpp
@@ -26,7 +26,10 @@
     , fStream()
     , fValid(false) {
         fName.append(SkOSPath::SkBasename(filename));
-        fIsRendering = false;
+    }
+
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
 
 protected:
diff --git a/bench/InterpBench.cpp b/bench/InterpBench.cpp
index f43c37e..9cd99df 100644
--- a/bench/InterpBench.cpp
+++ b/bench/InterpBench.cpp
@@ -20,7 +20,10 @@
         fName.printf("interp_%s", name);
         fFx = 3.3f;
         fDx = 0.1257f;
-        fIsRendering = false;
+    }
+
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
 
     virtual void performTest(int16_t dst[], float x, float dx, int count) = 0;
diff --git a/bench/MathBench.cpp b/bench/MathBench.cpp
index 6327c3c..8094219 100644
--- a/bench/MathBench.cpp
+++ b/bench/MathBench.cpp
@@ -29,8 +29,10 @@
         for (int i = 0; i < kBuffer; ++i) {
             fSrc[i] = rand.nextSScalar1();
         }
+    }
 
-        fIsRendering = false;
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
 
     virtual void performTest(float* SK_RESTRICT dst,
@@ -281,7 +283,10 @@
             fProc = gRec[index].fProc;
             fName = gRec[index].fName;
         }
-        fIsRendering = false;
+    }
+
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
 
 protected:
@@ -346,7 +351,10 @@
         } else {
             fName = "floor_std";
         }
-        fIsRendering = false;
+    }
+
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
 
     virtual void process(float) {}
@@ -404,7 +412,10 @@
         } else {
             fName = "clz_intrinsic";
         }
-        fIsRendering = false;
+    }
+
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
 
     // just so the compiler doesn't remove our loops
@@ -457,7 +468,10 @@
         }
 
         fName = "point_normalize";
-        fIsRendering = false;
+    }
+
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
 
     // just so the compiler doesn't remove our loops
@@ -501,7 +515,10 @@
             fData[i%N] = rand.nextSScalar1();
         }
 
-        fIsRendering = false;
+    }
+
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
 
 protected:
@@ -534,7 +551,10 @@
 public:
     explicit DivModBench(const char* name) {
         fName.printf("divmod_%s", name);
-        fIsRendering = false;
+    }
+
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
 
 protected:
diff --git a/bench/Matrix44Bench.cpp b/bench/Matrix44Bench.cpp
index 1316997..7c0c0de 100644
--- a/bench/Matrix44Bench.cpp
+++ b/bench/Matrix44Bench.cpp
@@ -15,7 +15,10 @@
 public:
     Matrix44Bench(const char name[]) {
         fName.printf("matrix44_%s", name);
-        fIsRendering = false;
+    }
+
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
 
     virtual void performTest() = 0;
diff --git a/bench/MatrixBench.cpp b/bench/MatrixBench.cpp
index 796c7e5..c2d68b5 100644
--- a/bench/MatrixBench.cpp
+++ b/bench/MatrixBench.cpp
@@ -16,7 +16,10 @@
 public:
     MatrixBench(const char name[])  {
         fName.printf("matrix_%s", name);
-        fIsRendering = false;
+    }
+
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
 
     virtual void performTest() = 0;
diff --git a/bench/MemoryBench.cpp b/bench/MemoryBench.cpp
index 418e149..e3ac2c3 100644
--- a/bench/MemoryBench.cpp
+++ b/bench/MemoryBench.cpp
@@ -19,7 +19,10 @@
     ChunkAllocBench(size_t minSize)  {
         fMinSize = minSize;
         fName.printf("chunkalloc_" SK_SIZE_T_SPECIFIER, minSize);
-        fIsRendering = false;
+    }
+
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
 
 protected:
@@ -85,7 +88,10 @@
             fName.appendf("_w");
         }
         fName.appendf("_"SK_SIZE_T_SPECIFIER, num);
-        fIsRendering = false;
+    }
+
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
 
 protected:
diff --git a/bench/MemsetBench.cpp b/bench/MemsetBench.cpp
index affae29..17adf04 100644
--- a/bench/MemsetBench.cpp
+++ b/bench/MemsetBench.cpp
@@ -34,7 +34,10 @@
         fMaxSize = maxSize;
         fName.printf("memset%d_" SK_SIZE_T_SPECIFIER "_" SK_SIZE_T_SPECIFIER,
                      type, minSize, maxSize);
-        fIsRendering = false;
+    }
+
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
 
     virtual void performTest() = 0;
diff --git a/bench/MutexBench.cpp b/bench/MutexBench.cpp
index 1d037ec..bec0c80 100644
--- a/bench/MutexBench.cpp
+++ b/bench/MutexBench.cpp
@@ -9,9 +9,10 @@
 
 class MutexBench : public SkBenchmark {
 public:
-    MutexBench()  {
-        fIsRendering = false;
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
+
 protected:
     virtual const char* onGetName() {
         return "mutex";
diff --git a/bench/PathBench.cpp b/bench/PathBench.cpp
index 11151d9..b3d4ab1 100644
--- a/bench/PathBench.cpp
+++ b/bench/PathBench.cpp
@@ -216,8 +216,8 @@
 
 class RandomPathBench : public SkBenchmark {
 public:
-    RandomPathBench()  {
-        fIsRendering = false;
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
 
 protected:
@@ -761,7 +761,6 @@
     };
 
     ConservativelyContainsBench(Type type)  {
-        fIsRendering = false;
         fParity = false;
         fName = "conservatively_contains_";
         switch (type) {
@@ -780,6 +779,10 @@
         }
     }
 
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
+    }
+
 private:
     virtual const char* onGetName() SK_OVERRIDE {
         return fName.c_str();
@@ -903,7 +906,10 @@
         for (int i = 0; i < CONICS; ++i) {
             rand_conic(&fConics[i], rand);
         }
-        fIsRendering = false;
+    }
+
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
 
 protected:
diff --git a/bench/PathIterBench.cpp b/bench/PathIterBench.cpp
index a18b29d..aa2042c 100644
--- a/bench/PathIterBench.cpp
+++ b/bench/PathIterBench.cpp
@@ -54,8 +54,10 @@
                     break;
             }
         }
+    }
 
-        fIsRendering = false;
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
 
 protected:
diff --git a/bench/PictureRecordBench.cpp b/bench/PictureRecordBench.cpp
index 87e7240..66ec19c 100644
--- a/bench/PictureRecordBench.cpp
+++ b/bench/PictureRecordBench.cpp
@@ -18,7 +18,10 @@
 public:
     PictureRecordBench(const char name[])  {
         fName.printf("picture_record_%s", name);
-        fIsRendering = false;
+    }
+
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
 
     enum {
diff --git a/bench/RTreeBench.cpp b/bench/RTreeBench.cpp
index 2d86c2d..6991b3f 100644
--- a/bench/RTreeBench.cpp
+++ b/bench/RTreeBench.cpp
@@ -34,8 +34,12 @@
         if (fBulkLoad) {
             fName.append("_bulk");
         }
-        fIsRendering = false;
     }
+
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
+    }
+
     virtual ~BBoxBuildBench() {
         fTree->unref();
     }
@@ -84,8 +88,12 @@
         if (fBulkLoad) {
             fName.append("_bulk");
         }
-        fIsRendering = false;
     }
+
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
+    }
+
     virtual ~BBoxQueryBench() {
         fTree->unref();
     }
diff --git a/bench/RefCntBench.cpp b/bench/RefCntBench.cpp
index 0110dbc..f6ec733 100644
--- a/bench/RefCntBench.cpp
+++ b/bench/RefCntBench.cpp
@@ -16,9 +16,10 @@
 
 class RefCntBench_Stack : public SkBenchmark {
 public:
-    RefCntBench_Stack() {
-        fIsRendering = false;
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
+
 protected:
     virtual const char* onGetName() {
         return "ref_cnt_stack";
@@ -53,9 +54,10 @@
 
 class RefCntBench_Heap : public SkBenchmark {
 public:
-    RefCntBench_Heap() {
-        fIsRendering = false;
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
+
 protected:
     virtual const char* onGetName() {
         return "ref_cnt_heap";
@@ -79,9 +81,10 @@
 
 class RefCntBench_New : public SkBenchmark {
 public:
-    RefCntBench_New() {
-        fIsRendering = false;
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
+
 protected:
     virtual const char* onGetName() {
         return "ref_cnt_new";
@@ -106,9 +109,10 @@
 
 class WeakRefCntBench_Stack : public SkBenchmark {
 public:
-    WeakRefCntBench_Stack() {
-        fIsRendering = false;
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
+
 protected:
     virtual const char* onGetName() {
         return "ref_cnt_stack_weak";
@@ -136,9 +140,10 @@
 
 class WeakRefCntBench_Heap : public SkBenchmark {
 public:
-    WeakRefCntBench_Heap() {
-        fIsRendering = false;
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
+
 protected:
     virtual const char* onGetName() {
         return "ref_cnt_heap_weak";
@@ -162,9 +167,10 @@
 
 class WeakRefCntBench_New : public SkBenchmark {
 public:
-    WeakRefCntBench_New() {
-        fIsRendering = false;
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
+
 protected:
     virtual const char* onGetName() {
         return "ref_cnt_new_weak";
diff --git a/bench/RegionBench.cpp b/bench/RegionBench.cpp
index e383113..fb6e94f 100644
--- a/bench/RegionBench.cpp
+++ b/bench/RegionBench.cpp
@@ -97,7 +97,10 @@
             fA.op(randrect(rand), SkRegion::kXOR_Op);
             fB.op(randrect(rand), SkRegion::kXOR_Op);
         }
-        fIsRendering = false;
+    }
+
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
 
 protected:
diff --git a/bench/RegionContainBench.cpp b/bench/RegionContainBench.cpp
index a799895..a34706d 100644
--- a/bench/RegionContainBench.cpp
+++ b/bench/RegionContainBench.cpp
@@ -43,8 +43,10 @@
         }
 
         fB.setRect(0, 0, H, W);
+    }
 
-        fIsRendering = false;
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
 
 protected:
diff --git a/bench/ScalarBench.cpp b/bench/ScalarBench.cpp
index 536af28..a1ea737 100644
--- a/bench/ScalarBench.cpp
+++ b/bench/ScalarBench.cpp
@@ -16,7 +16,10 @@
 public:
     ScalarBench(const char name[])  {
         fName.printf("scalar_%s", name);
-        fIsRendering = false;
+    }
+
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
 
     virtual void performTest() = 0;
@@ -143,7 +146,10 @@
             fPts[i].fX = rand.nextSScalar1();
             fPts[i].fY = rand.nextSScalar1();
         }
-        fIsRendering = false;
+    }
+
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
 
 protected:
diff --git a/bench/SkBenchmark.cpp b/bench/SkBenchmark.cpp
index 432d5be..26a7a3b 100644
--- a/bench/SkBenchmark.cpp
+++ b/bench/SkBenchmark.cpp
@@ -21,7 +21,6 @@
     fForceAA = true;
     fForceFilter = false;
     fDither = SkTriState::kDefault;
-    fIsRendering = true;
     fOrMask = fClearMask = 0;
     fLoops = 1;
 }
diff --git a/bench/SkBenchmark.h b/bench/SkBenchmark.h
index ec40077..77e2357 100644
--- a/bench/SkBenchmark.h
+++ b/bench/SkBenchmark.h
@@ -49,6 +49,19 @@
     const char* getName();
     SkIPoint getSize();
 
+    enum Backend {
+        kNonRendering_Backend,
+        kRaster_Backend,
+        kGPU_Backend,
+        kPDF_Backend,
+    };
+
+    // Call to determine whether the benchmark is intended for
+    // the rendering mode.
+    virtual bool isSuitableFor(Backend backend) {
+        return backend != kNonRendering_Backend;
+    }
+
     // Call before draw, allows the benchmark to do setup work outside of the
     // timer. When a benchmark is repeatedly drawn, this should be called once
     // before the initial draw.
@@ -77,13 +90,6 @@
         fDither = state;
     }
 
-    /** If true; the benchmark does rendering; if false, the benchmark
-        doesn't, and so need not be re-run in every different rendering
-        mode. */
-    bool isRendering() {
-        return fIsRendering;
-    }
-
     /** Assign masks for paint-flags. These will be applied when setupPaint()
      *  is called.
      *
@@ -120,8 +126,6 @@
     virtual void onPostDraw() {}
 
     virtual SkIPoint onGetSize();
-    /// Defaults to true.
-    bool    fIsRendering;
 
 private:
     int     fForceAlpha;
diff --git a/bench/SkipZeroesBench.cpp b/bench/SkipZeroesBench.cpp
index fc2f060..b0e3630 100644
--- a/bench/SkipZeroesBench.cpp
+++ b/bench/SkipZeroesBench.cpp
@@ -33,7 +33,10 @@
         } else {
             fName.append("_write_zeroes");
         }
-        fIsRendering = false;
+    }
+
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
 
 protected:
diff --git a/bench/SortBench.cpp b/bench/SortBench.cpp
index 6fb3a71..161e979 100644
--- a/bench/SortBench.cpp
+++ b/bench/SortBench.cpp
@@ -104,10 +104,13 @@
 
 public:
     SortBench(Type t, SortType s) : fType(t), fSortProc(gSorts[s].fProc) {
-        fIsRendering = false;
         fName.printf("sort_%s_%s", gSorts[s].fName, gRec[t].fName);
     }
 
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
+    }
+
 protected:
     virtual const char* onGetName() SK_OVERRIDE {
         return fName.c_str();
diff --git a/bench/WriterBench.cpp b/bench/WriterBench.cpp
index 0956b22..f9a0ac8 100644
--- a/bench/WriterBench.cpp
+++ b/bench/WriterBench.cpp
@@ -12,7 +12,9 @@
 
 class WriterBench : public SkBenchmark {
 public:
-    WriterBench() { fIsRendering = false; }
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
+    }
 
 protected:
     virtual const char* onGetName() SK_OVERRIDE {
diff --git a/bench/XfermodeBench.cpp b/bench/XfermodeBench.cpp
index 0c79cb3..41b00e6 100644
--- a/bench/XfermodeBench.cpp
+++ b/bench/XfermodeBench.cpp
@@ -63,8 +63,8 @@
 
 class XferCreateBench : public SkBenchmark {
 public:
-    XferCreateBench() {
-        fIsRendering = false;
+    virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+        return backend == kNonRendering_Backend;
     }
 
 protected:
diff --git a/bench/benchmain.cpp b/bench/benchmain.cpp
index 83d3dee..de38cc9 100644
--- a/bench/benchmain.cpp
+++ b/bench/benchmain.cpp
@@ -157,27 +157,20 @@
     canvas->translate(-x, -y);
 }
 
-enum Backend {
-    kNonRendering_Backend,
-    kRaster_Backend,
-    kGPU_Backend,
-    kPDF_Backend,
-};
-
 static SkBaseDevice* make_device(SkBitmap::Config config, const SkIPoint& size,
-                                 Backend backend, int sampleCount, GrContext* context) {
+                                 SkBenchmark::Backend backend, int sampleCount, GrContext* context) {
     SkBaseDevice* device = NULL;
     SkBitmap bitmap;
     bitmap.setConfig(config, size.fX, size.fY);
 
     switch (backend) {
-        case kRaster_Backend:
+        case SkBenchmark::kRaster_Backend:
             bitmap.allocPixels();
             erase(bitmap);
             device = SkNEW_ARGS(SkBitmapDevice, (bitmap));
             break;
 #if SK_SUPPORT_GPU
-        case kGPU_Backend: {
+        case SkBenchmark::kGPU_Backend: {
             GrTextureDesc desc;
             desc.fConfig = kSkia8888_GrPixelConfig;
             desc.fFlags = kRenderTarget_GrTextureFlagBit;
@@ -192,7 +185,7 @@
             break;
         }
 #endif
-        case kPDF_Backend:
+        case SkBenchmark::kPDF_Backend:
         default:
             SkDEBUGFAIL("unsupported");
     }
@@ -223,22 +216,22 @@
     SkBitmap::Config    config;
     const char*         name;
     int                 sampleCount;
-    Backend             backend;
+    SkBenchmark::Backend backend;
     GLContextType       contextType;
     bool                runByDefault;
 } gConfigs[] = {
-    { SkBitmap::kNo_Config,        "NONRENDERING", 0, kNonRendering_Backend, kNative, true},
-    { SkBitmap::kARGB_8888_Config, "8888",         0, kRaster_Backend,       kNative, true},
-    { SkBitmap::kRGB_565_Config,   "565",          0, kRaster_Backend,       kNative, true},
+    { SkBitmap::kNo_Config,        "NONRENDERING", 0, SkBenchmark::kNonRendering_Backend, kNative, true},
+    { SkBitmap::kARGB_8888_Config, "8888",         0, SkBenchmark::kRaster_Backend,       kNative, true},
+    { SkBitmap::kRGB_565_Config,   "565",          0, SkBenchmark::kRaster_Backend,       kNative, true},
 #if SK_SUPPORT_GPU
-    { SkBitmap::kARGB_8888_Config, "GPU",          0, kGPU_Backend,          kNative, true},
-    { SkBitmap::kARGB_8888_Config, "MSAA4",        4, kGPU_Backend,          kNative, false},
-    { SkBitmap::kARGB_8888_Config, "MSAA16",      16, kGPU_Backend,          kNative, false},
+    { SkBitmap::kARGB_8888_Config, "GPU",          0, SkBenchmark::kGPU_Backend,          kNative, true},
+    { SkBitmap::kARGB_8888_Config, "MSAA4",        4, SkBenchmark::kGPU_Backend,          kNative, false},
+    { SkBitmap::kARGB_8888_Config, "MSAA16",      16, SkBenchmark::kGPU_Backend,          kNative, false},
 #if SK_ANGLE
-    { SkBitmap::kARGB_8888_Config, "ANGLE",        0, kGPU_Backend,          kANGLE,  true},
+    { SkBitmap::kARGB_8888_Config, "ANGLE",        0, SkBenchmark::kGPU_Backend,          kANGLE,  true},
 #endif // SK_ANGLE
-    { SkBitmap::kARGB_8888_Config, "Debug",        0, kGPU_Backend,          kDebug,  kIsDebug},
-    { SkBitmap::kARGB_8888_Config, "NULLGPU",      0, kGPU_Backend,          kNull,   true},
+    { SkBitmap::kARGB_8888_Config, "Debug",        0, SkBenchmark::kGPU_Backend,          kDebug,  kIsDebug},
+    { SkBitmap::kARGB_8888_Config, "NULLGPU",      0, SkBenchmark::kGPU_Backend,          kNull,   true},
 #endif // SK_SUPPORT_GPU
 };
 
@@ -349,7 +342,7 @@
         // Non-rendering configs only run in normal mode
         for (int i = 0; i < configs.count(); ++i) {
             const Config& config = gConfigs[configs[i]];
-            if (kNonRendering_Backend == config.backend) {
+            if (SkBenchmark::kNonRendering_Backend == config.backend) {
                 configs.remove(i, 1);
                 --i;
             }
@@ -364,7 +357,7 @@
     for (int i = 0; i < configs.count(); ++i) {
         const Config& config = gConfigs[configs[i]];
 
-        if (kGPU_Backend == config.backend) {
+        if (SkBenchmark::kGPU_Backend == config.backend) {
             GrContext* context = gContextFactory.get(config.contextType);
             if (NULL == context) {
                 logger.logError(SkStringPrintf(
@@ -426,7 +419,7 @@
     for (size_t i = 0; i < SK_ARRAY_COUNT(gConfigs); ++i) {
 #if SK_SUPPORT_GPU
         const Config& config = gConfigs[i];
-        if (kGPU_Backend != config.backend) {
+        if (SkBenchmark::kGPU_Backend != config.backend) {
             continue;
         }
         GrContext* context = gContextFactory.get(config.contextType);
@@ -479,14 +472,14 @@
             const int configIndex = configs[i];
             const Config& config = gConfigs[configIndex];
 
-            if ((kNonRendering_Backend == config.backend) == bench->isRendering()) {
+            if (!bench->isSuitableFor(config.backend)) {
                 continue;
             }
 
             GrContext* context = NULL;
 #if SK_SUPPORT_GPU
             SkGLContextHelper* glContext = NULL;
-            if (kGPU_Backend == config.backend) {
+            if (SkBenchmark::kGPU_Backend == config.backend) {
                 context = gContextFactory.get(config.contextType);
                 if (NULL == context) {
                     continue;
@@ -502,7 +495,7 @@
             const SkPicture::RecordingFlags kRecordFlags =
                 SkPicture::kUsePathBoundsForClip_RecordingFlag;
 
-            if (kNonRendering_Backend != config.backend) {
+            if (SkBenchmark::kNonRendering_Backend != config.backend) {
                 device.reset(make_device(config.config,
                                          dim,
                                          config.backend,
@@ -552,7 +545,7 @@
 
 #if SK_SUPPORT_GPU
             SkGLContextHelper* contextHelper = NULL;
-            if (kGPU_Backend == config.backend) {
+            if (SkBenchmark::kGPU_Backend == config.backend) {
                 contextHelper = gContextFactory.getGLContext(config.contextType);
             }
             BenchTimer timer(contextHelper);
@@ -664,7 +657,7 @@
             } while (!kIsDebug && !converged);
             if (FLAGS_verbose) { SkDebugf("\n"); }
 
-            if (FLAGS_outDir.count() && kNonRendering_Backend != config.backend) {
+            if (FLAGS_outDir.count() && SkBenchmark::kNonRendering_Backend != config.backend) {
                 saveFile(bench->getName(),
                          config.name,
                          FLAGS_outDir[0],
diff --git a/gm/bleed.cpp b/gm/bleed.cpp
index 7b2f9f8..8b7d2e2 100644
--- a/gm/bleed.cpp
+++ b/gm/bleed.cpp
@@ -201,7 +201,7 @@
         this->drawCase4(canvas, kCol2X, kRow3Y, SkCanvas::kNone_DrawBitmapRectFlag, SkPaint::kHigh_FilterLevel);
 
 #if SK_SUPPORT_GPU
-        GrContext* ctx = GM::GetGr(canvas);
+        GrContext* ctx = canvas->getGrContext();
         int oldMaxTextureSize = 0;
         if (NULL != ctx) {
             // shrink the max texture size so all our textures can be reasonably sized
diff --git a/gm/gm.cpp b/gm/gm.cpp
index 9da1a17..29d02d1 100644
--- a/gm/gm.cpp
+++ b/gm/gm.cpp
@@ -63,18 +63,5 @@
     canvas->drawRect(r, paint);
 }
 
-#if SK_SUPPORT_GPU
-// canvas could almost be a const&, but accessRenderTarget isn't const.
-/*static*/ GrContext* GM::GetGr(SkCanvas* canvas) {
-    SkASSERT(NULL != canvas);
-    SkBaseDevice* device = canvas->getTopDevice();
-    GrRenderTarget* renderTarget = device->accessRenderTarget();
-    if (NULL != renderTarget) {
-        return renderTarget->getContext();
-    }
-    return NULL;
-}
-#endif
-
 // need to explicitly declare this, or we get some weird infinite loop llist
 template GMRegistry* SkTRegistry<GM*(*)(void*)>::gHead;
diff --git a/gm/gm.h b/gm/gm.h
index a0ad3cf..e69cfc0 100644
--- a/gm/gm.h
+++ b/gm/gm.h
@@ -99,10 +99,6 @@
             fCanvasIsDeferred = isDeferred;
         }
 
-#if SK_SUPPORT_GPU
-        static GrContext* GetGr(/*very nearly const*/ SkCanvas*);
-#endif
-
     const SkMatrix& getStarterMatrix() { return fStarterMatrix; }
     void setStarterMatrix(const SkMatrix& matrix) {
         fStarterMatrix = matrix;
diff --git a/gm/image.cpp b/gm/image.cpp
index 7ecb604..93e16b7 100644
--- a/gm/image.cpp
+++ b/gm/image.cpp
@@ -186,7 +186,7 @@
         SkAutoTUnref<SkSurface> surf2(SkSurface::NewPicture(info.fWidth, info.fHeight));
         SkAutoTUnref<SkSurface> surf3(SkSurface::NewPicture(info.fWidth, info.fHeight));
 #if SK_SUPPORT_GPU
-        GrContext* ctx = GM::GetGr(canvas);
+        GrContext* ctx = canvas->getGrContext();
 
         SkAutoTUnref<SkSurface> surf4(SkSurface::NewRenderTarget(ctx, info, 0));
 #endif
diff --git a/gm/texdata.cpp b/gm/texdata.cpp
index d2e5d05..a87684e 100644
--- a/gm/texdata.cpp
+++ b/gm/texdata.cpp
@@ -40,7 +40,7 @@
     virtual void onDraw(SkCanvas* canvas) {
         SkBaseDevice* device = canvas->getTopDevice();
         GrRenderTarget* target = device->accessRenderTarget();
-        GrContext* ctx = GM::GetGr(canvas);
+        GrContext* ctx = canvas->getGrContext();
         if (ctx && target) {
             SkAutoTArray<SkPMColor> gTextureData((2 * S) * (2 * S));
             static const int stride = 2 * S;
diff --git a/gyp/bench.gypi b/gyp/bench.gypi
index 0274176..33f0dba 100644
--- a/gyp/bench.gypi
+++ b/gyp/bench.gypi
@@ -31,6 +31,7 @@
     '../bench/GameBench.cpp',
     '../bench/GradientBench.cpp',
     '../bench/GrMemoryPoolBench.cpp',
+    '../bench/GrResourceCacheBench.cpp',
     '../bench/ImageCacheBench.cpp',
     '../bench/ImageDecodeBench.cpp',
     '../bench/InterpBench.cpp',
diff --git a/include/core/SkCanvas.h b/include/core/SkCanvas.h
index d831a5c..7bcc09a 100644
--- a/include/core/SkCanvas.h
+++ b/include/core/SkCanvas.h
@@ -29,6 +29,7 @@
 class SkPicture;
 class SkRRect;
 class SkSurface_Base;
+class GrContext;
 
 /** \class SkCanvas
 
@@ -109,6 +110,12 @@
                                          int width, int height,
                                          bool isOpaque);
 
+    /**
+     * Return the GPU context of the device that is associated with the canvas.
+     * For a canvas with non-GPU device, NULL is returned.
+     */
+    GrContext* getGrContext();
+
     ///////////////////////////////////////////////////////////////////////////
 
     /**
diff --git a/src/core/SkCanvas.cpp b/src/core/SkCanvas.cpp
index 0d5fccb..4eaea11 100644
--- a/src/core/SkCanvas.cpp
+++ b/src/core/SkCanvas.cpp
@@ -26,6 +26,10 @@
 #include "SkTLazy.h"
 #include "SkUtils.h"
 
+#if SK_SUPPORT_GPU
+#include "GrRenderTarget.h"
+#endif
+
 SK_DEFINE_INST_COUNT(SkBounder)
 SK_DEFINE_INST_COUNT(SkCanvas)
 SK_DEFINE_INST_COUNT(SkDrawFilter)
@@ -1570,6 +1574,20 @@
     }
 }
 
+GrContext* SkCanvas::getGrContext() {
+#if SK_SUPPORT_GPU
+    SkBaseDevice* device = this->getTopDevice();
+    if (NULL != device) {
+        GrRenderTarget* renderTarget = device->accessRenderTarget();
+        if (NULL != renderTarget) {
+            return renderTarget->getContext();
+        }
+    }
+#endif
+
+    return NULL;
+
+}
 
 //////////////////////////////////////////////////////////////////////////////
 //  These are the virtual drawing methods