Split GrResource into GrCacheable/GrGpuObject

Before this change, an object needed to inherit from GrResource (and
thus be a GPU object) in order to live in the GrResourceCache. That
was a problem for caching items that weren't GPU objects themselves,
but owned GPU objects.

This change splits GrResource into two classes:

  1. GrCacheable: The base class for objects that can live in the
     GrResourceCache.

  2. GrGpuObject, which inherits from GrCacheable: The base class for
     objects that get tracked by GrGpu.

This change is purely a refactor; there is no change in functionality.

Change-Id: I3e8daeb1f123041f414aa306c1366e959ae9e39e

BUG=skia:
R=bsalomon@google.com

Author: cdalton@nvidia.com

Review URL: https://codereview.chromium.org/251013002

git-svn-id: http://skia.googlecode.com/svn/trunk@14553 2bbb7eff-a529-9590-31e7-b0007b416f81
diff --git a/src/gpu/GrBufferAllocPool.cpp b/src/gpu/GrBufferAllocPool.cpp
index 2dbf3eb..2f18e15 100644
--- a/src/gpu/GrBufferAllocPool.cpp
+++ b/src/gpu/GrBufferAllocPool.cpp
@@ -109,7 +109,7 @@
         if (block.fBuffer->isLocked()) {
             block.fBuffer->unlock();
         } else {
-            size_t flushSize = block.fBuffer->sizeInBytes() - block.fBytesFree;
+            size_t flushSize = block.fBuffer->gpuMemorySize() - block.fBytesFree;
             flushCpuData(fBlocks.back().fBuffer, flushSize);
         }
         fBufferPtr = NULL;
@@ -135,7 +135,7 @@
         SkASSERT(!fBlocks[i].fBuffer->isLocked());
     }
     for (int i = 0; i < fBlocks.count(); ++i) {
-        size_t bytes = fBlocks[i].fBuffer->sizeInBytes() - fBlocks[i].fBytesFree;
+        size_t bytes = fBlocks[i].fBuffer->gpuMemorySize() - fBlocks[i].fBytesFree;
         bytesInUse += bytes;
         SkASSERT(bytes || unusedBlockAllowed);
     }
@@ -161,7 +161,7 @@
 
     if (NULL != fBufferPtr) {
         BufferBlock& back = fBlocks.back();
-        size_t usedBytes = back.fBuffer->sizeInBytes() - back.fBytesFree;
+        size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
         size_t pad = GrSizeAlignUpPad(usedBytes,
                                       alignment);
         if ((size + pad) <= back.fBytesFree) {
@@ -201,7 +201,7 @@
     VALIDATE();
     if (NULL != fBufferPtr) {
         const BufferBlock& back = fBlocks.back();
-        size_t usedBytes = back.fBuffer->sizeInBytes() - back.fBytesFree;
+        size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
         size_t pad = GrSizeAlignUpPad(usedBytes, itemSize);
         return static_cast<int>((back.fBytesFree - pad) / itemSize);
     } else if (fPreallocBuffersInUse < fPreallocBuffers.count()) {
@@ -231,7 +231,7 @@
         // caller shouldnt try to put back more than they've taken
         SkASSERT(!fBlocks.empty());
         BufferBlock& block = fBlocks.back();
-        size_t bytesUsed = block.fBuffer->sizeInBytes() - block.fBytesFree;
+        size_t bytesUsed = block.fBuffer->gpuMemorySize() - block.fBytesFree;
         if (bytes >= bytesUsed) {
             bytes -= bytesUsed;
             fBytesInUse -= bytesUsed;
@@ -290,7 +290,7 @@
             prev.fBuffer->unlock();
         } else {
             flushCpuData(prev.fBuffer,
-                         prev.fBuffer->sizeInBytes() - prev.fBytesFree);
+                         prev.fBuffer->gpuMemorySize() - prev.fBytesFree);
         }
         fBufferPtr = NULL;
     }
@@ -348,7 +348,7 @@
     SkASSERT(NULL != buffer);
     SkASSERT(!buffer->isLocked());
     SkASSERT(fCpuData.get() == fBufferPtr);
-    SkASSERT(flushSize <= buffer->sizeInBytes());
+    SkASSERT(flushSize <= buffer->gpuMemorySize());
     VALIDATE(true);
 
     if (fGpu->caps()->bufferLockSupport() &&