Move control of explicit GPU resource allocation to GrContextOptions

Change-Id: Ic284acc79bab5936f0007d5ae5fb1e7a9929e2af
Reviewed-on: https://skia-review.googlesource.com/104880
Commit-Queue: Robert Phillips <robertphillips@google.com>
Reviewed-by: Brian Salomon <bsalomon@google.com>
Reviewed-by: Greg Daniel <egdaniel@google.com>
diff --git a/bench/TopoSortBench.cpp b/bench/TopoSortBench.cpp
index 8a8ec66..123ff74 100644
--- a/bench/TopoSortBench.cpp
+++ b/bench/TopoSortBench.cpp
@@ -17,7 +17,6 @@
     TopoSortBench() { }
 
     ~TopoSortBench() override {
-        sk_tool_utils::TopoTestNode::DeallocNodes(&fGraph);
     }
 
     bool isSuitableFor(Backend backend) override {
@@ -39,7 +38,7 @@
             for (int j = 0; j < numEdges; ++j) {
                 int dep = fRand.nextU() % i;
 
-                fGraph[i]->dependsOn(fGraph[dep]);
+                fGraph[i]->dependsOn(fGraph[dep].get());
             }
         }
     }
@@ -67,7 +66,7 @@
     static const int kNumElements = 1000;
     static const int kMaxEdges = 5;
 
-    SkTDArray<sk_tool_utils::TopoTestNode*> fGraph;
+    SkTArray<sk_sp<sk_tool_utils::TopoTestNode>> fGraph;
     SkRandom fRand;
 
     typedef Benchmark INHERITED;
diff --git a/gn/flutter_defines.gni b/gn/flutter_defines.gni
index 0907b10..83e0a11 100644
--- a/gn/flutter_defines.gni
+++ b/gn/flutter_defines.gni
@@ -4,7 +4,5 @@
 # found in the LICENSE file.
 flutter_defines = [
   "SK_SUPPORT_LEGACY_IMAGE_ENCODE_API",
-  "SK_DISABLE_EXPLICIT_GPU_RESOURCE_ALLOCATION",
-  "SK_DISABLE_RENDER_TARGET_SORTING",
   "SK_SUPPORT_LEGACY_RECTMAKELARGEST",
 ]
diff --git a/include/gpu/GrContextOptions.h b/include/gpu/GrContextOptions.h
index 492a2d9..0cf4553 100644
--- a/include/gpu/GrContextOptions.h
+++ b/include/gpu/GrContextOptions.h
@@ -141,6 +141,19 @@
     Enable fUseDrawInsteadOfGLClear = Enable::kDefault;
 
     /**
+     * Allow Ganesh to explicitly allocate resources at flush time rather than incrementally while
+     * drawing. This will eventually just be the way it is but, for now, it is optional.
+     */
+    bool fExplicitlyAllocateGPUResources = false;
+
+    /**
+     * Allow Ganesh to sort the opLists prior to allocating resources. This is an optional
+     * behavior that is only relevant when 'fExplicitlyAllocateGPUResources' is enabled.
+     * Eventually this will just be what is done and will not be optional.
+     */
+    bool fSortRenderTargets = false;
+
+    /**
      * Disables correctness workarounds that are enabled for particular GPUs, OSes, or drivers.
      * This does not affect code path choices that are made for perfomance reasons nor does it
      * override other GrContextOption settings.
diff --git a/include/private/GrOpList.h b/include/private/GrOpList.h
index c9abd6d..f183081 100644
--- a/include/private/GrOpList.h
+++ b/include/private/GrOpList.h
@@ -14,17 +14,6 @@
 #include "SkRefCnt.h"
 #include "SkTDArray.h"
 
-
-// Turn on/off the explicit distribution of GPU resources at flush time
-#ifndef SK_DISABLE_EXPLICIT_GPU_RESOURCE_ALLOCATION
-   #define SK_DISABLE_EXPLICIT_GPU_RESOURCE_ALLOCATION
-#endif
-
-// Turn on/off the sorting of opLists at flush time
-#ifndef SK_DISABLE_RENDER_TARGET_SORTING
-   #define SK_DISABLE_RENDER_TARGET_SORTING
-#endif
-
 class GrAuditTrail;
 class GrCaps;
 class GrOpFlushState;
@@ -111,7 +100,7 @@
     void setStencilLoadOp(GrLoadOp loadOp) { fStencilLoadOp = loadOp; }
 
 protected:
-    SkDEBUGCODE(bool isInstantiated() const;)
+    bool isInstantiated() const;
 
     GrSurfaceProxyRef fTarget;
     GrAuditTrail*     fAuditTrail;
@@ -154,26 +143,26 @@
     }
 
     struct TopoSortTraits {
-        static void Output(GrOpList* dt, int /* index */) {
-            dt->setFlag(GrOpList::kWasOutput_Flag);
+        static void Output(GrOpList* opList, int /* index */) {
+            opList->setFlag(GrOpList::kWasOutput_Flag);
         }
-        static bool WasOutput(const GrOpList* dt) {
-            return dt->isSetFlag(GrOpList::kWasOutput_Flag);
+        static bool WasOutput(const GrOpList* opList) {
+            return opList->isSetFlag(GrOpList::kWasOutput_Flag);
         }
-        static void SetTempMark(GrOpList* dt) {
-            dt->setFlag(GrOpList::kTempMark_Flag);
+        static void SetTempMark(GrOpList* opList) {
+            opList->setFlag(GrOpList::kTempMark_Flag);
         }
-        static void ResetTempMark(GrOpList* dt) {
-            dt->resetFlag(GrOpList::kTempMark_Flag);
+        static void ResetTempMark(GrOpList* opList) {
+            opList->resetFlag(GrOpList::kTempMark_Flag);
         }
-        static bool IsTempMarked(const GrOpList* dt) {
-            return dt->isSetFlag(GrOpList::kTempMark_Flag);
+        static bool IsTempMarked(const GrOpList* opList) {
+            return opList->isSetFlag(GrOpList::kTempMark_Flag);
         }
-        static int NumDependencies(const GrOpList* dt) {
-            return dt->fDependencies.count();
+        static int NumDependencies(const GrOpList* opList) {
+            return opList->fDependencies.count();
         }
-        static GrOpList* Dependency(GrOpList* dt, int index) {
-            return dt->fDependencies[index];
+        static GrOpList* Dependency(GrOpList* opList, int index) {
+            return opList->fDependencies[index];
         }
     };
 
diff --git a/src/core/SkTTopoSort.h b/src/core/SkTTopoSort.h
index 21c8069..722707d 100644
--- a/src/core/SkTTopoSort.h
+++ b/src/core/SkTTopoSort.h
@@ -8,22 +8,23 @@
 #ifndef SkTTopoSort_DEFINED
 #define SkTTopoSort_DEFINED
 
-#include "SkTDArray.h"
+#include "SkRefCnt.h"
+#include "SkTArray.h"
 
 #ifdef SK_DEBUG
 template <typename T, typename Traits = T>
-void SkTTopoSort_CheckAllUnmarked(const SkTDArray<T*>& graph) {
+void SkTTopoSort_CheckAllUnmarked(const SkTArray<sk_sp<T>>& graph) {
     for (int i = 0; i < graph.count(); ++i) {
-        SkASSERT(!Traits::IsTempMarked(graph[i]));
-        SkASSERT(!Traits::WasOutput(graph[i]));
+        SkASSERT(!Traits::IsTempMarked(graph[i].get()));
+        SkASSERT(!Traits::WasOutput(graph[i].get()));
     }
 }
 
 template <typename T, typename Traits = T>
-void SkTTopoSort_CleanExit(const SkTDArray<T*>& graph) {
+void SkTTopoSort_CleanExit(const SkTArray<sk_sp<T>>& graph) {
     for (int i = 0; i < graph.count(); ++i) {
-        SkASSERT(!Traits::IsTempMarked(graph[i]));
-        SkASSERT(Traits::WasOutput(graph[i]));
+        SkASSERT(!Traits::IsTempMarked(graph[i].get()));
+        SkASSERT(Traits::WasOutput(graph[i].get()));
     }
 }
 #endif
@@ -31,7 +32,7 @@
 // Recursively visit a node and all the other nodes it depends on.
 // Return false if there is a loop.
 template <typename T, typename Traits = T>
-bool SkTTopoSort_Visit(T* node, SkTDArray<T*>* result) {
+bool SkTTopoSort_Visit(T* node, SkTArray<sk_sp<T>>* result) {
     if (Traits::IsTempMarked(node)) {
         // There is a loop.
         return false;
@@ -51,7 +52,7 @@
         Traits::Output(node, result->count()); // mark this node as output
         Traits::ResetTempMark(node);
 
-        *result->append() = node;
+        result->push_back(sk_ref_sp(node));
     }
 
     return true;
@@ -78,30 +79,30 @@
 // node and all the nodes on which it depends. This could be used to partially
 // flush a GrOpList DAG.
 template <typename T, typename Traits = T>
-bool SkTTopoSort(SkTDArray<T*>* graph) {
-    SkTDArray<T*> result;
+bool SkTTopoSort(SkTArray<sk_sp<T>>* graph) {
+    SkTArray<sk_sp<T>> result;
 
 #ifdef SK_DEBUG
     SkTTopoSort_CheckAllUnmarked<T, Traits>(*graph);
 #endif
 
-    result.setReserve(graph->count());
+    result.reserve(graph->count());
 
     for (int i = 0; i < graph->count(); ++i) {
-        if (Traits::WasOutput((*graph)[i])) {
+        if (Traits::WasOutput((*graph)[i].get())) {
             // This node was depended on by some earlier node and has already
             // been output
             continue;
         }
 
         // Output this node after all the nodes it depends on have been output.
-        if (!SkTTopoSort_Visit<T, Traits>((*graph)[i], &result)) {
+        if (!SkTTopoSort_Visit<T, Traits>((*graph)[i].get(), &result)) {
             return false;
         }
     }
 
     SkASSERT(graph->count() == result.count());
-    graph->swap(result);
+    graph->swap(&result);
 
 #ifdef SK_DEBUG
     SkTTopoSort_CleanExit<T, Traits>(*graph);
diff --git a/src/gpu/GrContext.cpp b/src/gpu/GrContext.cpp
index 536ad07..dce59f3 100644
--- a/src/gpu/GrContext.cpp
+++ b/src/gpu/GrContext.cpp
@@ -239,7 +239,8 @@
     if (fGpu) {
         fCaps = fGpu->refCaps();
         fResourceCache = new GrResourceCache(fCaps.get(), fUniqueID);
-        fResourceProvider = new GrResourceProvider(fGpu.get(), fResourceCache, &fSingleOwner);
+        fResourceProvider = new GrResourceProvider(fGpu.get(), fResourceCache, &fSingleOwner,
+                                                   options.fExplicitlyAllocateGPUResources);
     }
 
     fProxyProvider = new GrProxyProvider(fResourceProvider, fResourceCache, fCaps, &fSingleOwner);
@@ -282,8 +283,8 @@
     }
 #endif
 
-    fDrawingManager.reset(
-            new GrDrawingManager(this, prcOptions, atlasTextContextOptions, &fSingleOwner));
+    fDrawingManager.reset(new GrDrawingManager(this, prcOptions, atlasTextContextOptions,
+                                               &fSingleOwner, options.fSortRenderTargets));
 
     GrDrawOpAtlas::AllowMultitexturing allowMultitexturing;
     if (GrContextOptions::Enable::kNo == options.fAllowMultipleGlyphCacheTextures ||
diff --git a/src/gpu/GrDrawingManager.cpp b/src/gpu/GrDrawingManager.cpp
index ef4177f..703bc0a 100644
--- a/src/gpu/GrDrawingManager.cpp
+++ b/src/gpu/GrDrawingManager.cpp
@@ -115,11 +115,10 @@
     }
 #endif
 
-#ifndef SK_DISABLE_RENDER_TARGET_SORTING
-    SkDEBUGCODE(bool result =)
-                        SkTTopoSort<GrOpList, GrOpList::TopoSortTraits>(&fOpLists);
-    SkASSERT(result);
-#endif
+    if (fSortRenderTargets) {
+        SkDEBUGCODE(bool result =) SkTTopoSort<GrOpList, GrOpList::TopoSortTraits>(&fOpLists);
+        SkASSERT(result);
+    }
 
     GrGpu* gpu = fContext->contextPriv().getGpu();
 
@@ -179,21 +178,14 @@
             alloc.markEndOfOpList(i);
         }
 
-#ifdef SK_DISABLE_EXPLICIT_GPU_RESOURCE_ALLOCATION
-        startIndex = 0;
-        stopIndex = fOpLists.count();
-#else
         GrResourceAllocator::AssignError error = GrResourceAllocator::AssignError::kNoError;
-        while (alloc.assign(&startIndex, &stopIndex, &error))
-#endif
-        {
-#ifndef SK_DISABLE_EXPLICIT_GPU_RESOURCE_ALLOCATION
+        while (alloc.assign(&startIndex, &stopIndex, &error)) {
             if (GrResourceAllocator::AssignError::kFailedProxyInstantiation == error) {
                 for (int i = startIndex; i < stopIndex; ++i) {
                     fOpLists[i]->purgeOpsWithUninstantiatedProxies();
                 }
             }
-#endif
+
             if (this->executeOpLists(startIndex, stopIndex, &flushState)) {
                 flushed = true;
             }
@@ -221,6 +213,7 @@
 bool GrDrawingManager::executeOpLists(int startIndex, int stopIndex, GrOpFlushState* flushState) {
     SkASSERT(startIndex <= stopIndex && stopIndex <= fOpLists.count());
 
+    GrResourceProvider* resourceProvider = fContext->contextPriv().resourceProvider();
     bool anyOpListsExecuted = false;
 
     for (int i = startIndex; i < stopIndex; ++i) {
@@ -228,15 +221,19 @@
              continue;
         }
 
-#ifdef SK_DISABLE_EXPLICIT_GPU_RESOURCE_ALLOCATION
-        if (!fOpLists[i]->instantiate(fContext->contextPriv().resourceProvider())) {
-            SkDebugf("OpList failed to instantiate.\n");
-            fOpLists[i] = nullptr;
-            continue;
+        if (resourceProvider->explicitlyAllocateGPUResources()) {
+            if (!fOpLists[i]->isInstantiated()) {
+                // If the backing surface wasn't allocated drop the draw of the entire opList.
+                fOpLists[i] = nullptr;
+                continue;
+            }
+        } else {
+            if (!fOpLists[i]->instantiate(resourceProvider)) {
+                SkDebugf("OpList failed to instantiate.\n");
+                fOpLists[i] = nullptr;
+                continue;
+            }
         }
-#else
-        SkASSERT(fOpLists[i]->isInstantiated());
-#endif
 
         // TODO: handle this instantiation via lazy surface proxies?
         // Instantiate all deferred proxies (being built on worker threads) so we can upload them
diff --git a/src/gpu/GrDrawingManager.h b/src/gpu/GrDrawingManager.h
index 58b755f..200d0ca 100644
--- a/src/gpu/GrDrawingManager.h
+++ b/src/gpu/GrDrawingManager.h
@@ -86,7 +86,8 @@
     GrDrawingManager(GrContext* context,
                      const GrPathRendererChain::Options& optionsForPathRendererChain,
                      const GrAtlasTextContext::Options& optionsForAtlasTextContext,
-                     GrSingleOwner* singleOwner)
+                     GrSingleOwner* singleOwner,
+                     bool sortRenderTargets)
             : fContext(context)
             , fOptionsForPathRendererChain(optionsForPathRendererChain)
             , fOptionsForAtlasTextContext(optionsForAtlasTextContext)
@@ -95,7 +96,9 @@
             , fAtlasTextContext(nullptr)
             , fPathRendererChain(nullptr)
             , fSoftwarePathRenderer(nullptr)
-            , fFlushing(false) {}
+            , fFlushing(false)
+            , fSortRenderTargets(sortRenderTargets) {
+    }
 
     void abandon();
     void cleanup();
@@ -142,6 +145,7 @@
 
     GrTokenTracker                    fTokenTracker;
     bool                              fFlushing;
+    bool                              fSortRenderTargets;
 
     SkTArray<GrOnFlushCallbackObject*> fOnFlushCBObjects;
 };
diff --git a/src/gpu/GrOpList.cpp b/src/gpu/GrOpList.cpp
index b63e96c..bdaaa2a 100644
--- a/src/gpu/GrOpList.cpp
+++ b/src/gpu/GrOpList.cpp
@@ -32,15 +32,16 @@
     fTarget.setProxy(sk_ref_sp(surfaceProxy), kWrite_GrIOType);
     fTarget.get()->setLastOpList(this);
 
-#ifdef SK_DISABLE_EXPLICIT_GPU_RESOURCE_ALLOCATION
-    // MDB TODO: remove this! We are currently moving to having all the ops that target
-    // the RT as a dest (e.g., clear, etc.) rely on the opList's 'fTarget' pointer
-    // for the IO Ref. This works well but until they are all swapped over (and none
-    // are pre-emptively instantiating proxies themselves) we need to instantiate
-    // here so that the GrSurfaces are created in an order that preserves the GrSurface
-    // re-use assumptions.
-    fTarget.get()->instantiate(resourceProvider);
-#endif
+    if (resourceProvider && !resourceProvider->explicitlyAllocateGPUResources()) {
+        // MDB TODO: remove this! We are currently moving to having all the ops that target
+        // the RT as a dest (e.g., clear, etc.) rely on the opList's 'fTarget' pointer
+        // for the IO Ref. This works well but until they are all swapped over (and none
+        // are pre-emptively instantiating proxies themselves) we need to instantiate
+        // here so that the GrSurfaces are created in an order that preserves the GrSurface
+        // re-use assumptions.
+        fTarget.get()->instantiate(resourceProvider);
+    }
+
     fTarget.markPendingIO();
 }
 
@@ -67,11 +68,11 @@
 
 void GrOpList::instantiateDeferredProxies(GrResourceProvider* resourceProvider) {
     for (int i = 0; i < fDeferredProxies.count(); ++i) {
-#ifdef SK_DISABLE_EXPLICIT_GPU_RESOURCE_ALLOCATION
-        fDeferredProxies[i]->instantiate(resourceProvider);
-#else
-        SkASSERT(fDeferredProxies[i]->priv().isInstantiated());
-#endif
+        if (resourceProvider->explicitlyAllocateGPUResources()) {
+            SkASSERT(fDeferredProxies[i]->priv().isInstantiated());
+        } else {
+            fDeferredProxies[i]->instantiate(resourceProvider);
+        }
     }
 }
 
@@ -118,11 +119,11 @@
     }
 }
 
-#ifdef SK_DEBUG
 bool GrOpList::isInstantiated() const {
     return fTarget.get()->priv().isInstantiated();
 }
 
+#ifdef SK_DEBUG
 void GrOpList::dump() const {
     SkDebugf("--------------------------------------------------------------\n");
     SkDebugf("node: %d -> RT: %d\n", fUniqueID, fTarget.get() ? fTarget.get()->uniqueID().asUInt()
diff --git a/src/gpu/GrRenderTargetContext.cpp b/src/gpu/GrRenderTargetContext.cpp
index d4cfb7f..cb7b677 100644
--- a/src/gpu/GrRenderTargetContext.cpp
+++ b/src/gpu/GrRenderTargetContext.cpp
@@ -155,12 +155,14 @@
         , fOpList(sk_ref_sp(fRenderTargetProxy->getLastRenderTargetOpList()))
         , fSurfaceProps(SkSurfacePropsCopyOrDefault(surfaceProps))
         , fManagedOpList(managedOpList) {
-#ifdef SK_DISABLE_EXPLICIT_GPU_RESOURCE_ALLOCATION
-    // MDB TODO: to ensure all resources still get allocated in the correct order in the hybrid
-    // world we need to get the correct opList here so that it, in turn, can grab and hold
-    // its rendertarget.
-    this->getRTOpList();
-#endif
+    GrResourceProvider* resourceProvider = context->contextPriv().resourceProvider();
+    if (resourceProvider && !resourceProvider->explicitlyAllocateGPUResources()) {
+        // MDB TODO: to ensure all resources still get allocated in the correct order in the hybrid
+        // world we need to get the correct opList here so that it, in turn, can grab and hold
+        // its rendertarget.
+        this->getRTOpList();
+    }
+
     fTextTarget.reset(new TextTarget(this));
     SkDEBUGCODE(this->validate();)
 }
diff --git a/src/gpu/GrResourceAllocator.cpp b/src/gpu/GrResourceAllocator.cpp
index 6ed3d4d..f41169c 100644
--- a/src/gpu/GrResourceAllocator.cpp
+++ b/src/gpu/GrResourceAllocator.cpp
@@ -36,11 +36,9 @@
 }
 
 GrResourceAllocator::~GrResourceAllocator() {
-#ifndef SK_DISABLE_EXPLICIT_GPU_RESOURCE_ALLOCATION
     SkASSERT(fIntvlList.empty());
     SkASSERT(fActiveIntvls.empty());
     SkASSERT(!fIntvlHash.count());
-#endif
 }
 
 void GrResourceAllocator::addInterval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end
@@ -79,12 +77,12 @@
     fIntvlList.insertByIncreasingStart(newIntvl);
     fIntvlHash.add(newIntvl);
 
-#ifdef SK_DISABLE_EXPLICIT_GPU_RESOURCE_ALLOCATION
-    // FIXME: remove this once we can do the lazy instantiation from assign instead.
-    if (GrSurfaceProxy::LazyState::kNot != proxy->lazyInstantiationState()) {
-        proxy->priv().doLazyInstantiation(fResourceProvider);
+    if (!fResourceProvider->explicitlyAllocateGPUResources()) {
+        // FIXME: remove this once we can do the lazy instantiation from assign instead.
+        if (GrSurfaceProxy::LazyState::kNot != proxy->lazyInstantiationState()) {
+            proxy->priv().doLazyInstantiation(fResourceProvider);
+        }
     }
-#endif
 }
 
 GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::popHead() {
@@ -131,6 +129,13 @@
     }
 }
 
+
+ GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::detachAll() {
+    Interval* tmp = fHead;
+    fHead = nullptr;
+    return tmp;
+}
+
 // 'surface' can be reused. Add it back to the free pool.
 void GrResourceAllocator::freeUpSurface(sk_sp<GrSurface> surface) {
     const GrScratchKey &key = surface->resourcePriv().getScratchKey();
@@ -207,6 +212,11 @@
     *startIndex = fCurOpListIndex;
     *stopIndex = fEndOfOpListOpIndices.count();
 
+    if (!fResourceProvider->explicitlyAllocateGPUResources()) {
+        fIntvlList.detachAll(); // arena allocator will clean these up for us
+        return true;
+    }
+
     SkDEBUGCODE(fAssigned = true;)
 
     while (Interval* cur = fIntvlList.popHead()) {
diff --git a/src/gpu/GrResourceAllocator.h b/src/gpu/GrResourceAllocator.h
index bbc577d..f25bef3 100644
--- a/src/gpu/GrResourceAllocator.h
+++ b/src/gpu/GrResourceAllocator.h
@@ -166,6 +166,7 @@
         Interval* popHead();
         void insertByIncreasingStart(Interval*);
         void insertByIncreasingEnd(Interval*);
+        Interval* detachAll();
 
     private:
         Interval* fHead = nullptr;
diff --git a/src/gpu/GrResourceProvider.cpp b/src/gpu/GrResourceProvider.cpp
index ec94968..c9575b2 100644
--- a/src/gpu/GrResourceProvider.cpp
+++ b/src/gpu/GrResourceProvider.cpp
@@ -33,13 +33,15 @@
 #define ASSERT_SINGLE_OWNER \
     SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(fSingleOwner);)
 
-GrResourceProvider::GrResourceProvider(GrGpu* gpu, GrResourceCache* cache, GrSingleOwner* owner)
+GrResourceProvider::GrResourceProvider(GrGpu* gpu, GrResourceCache* cache, GrSingleOwner* owner,
+                                       bool explicitlyAllocateGPUResources)
         : fCache(cache)
         , fGpu(gpu)
+        , fExplicitlyAllocateGPUResources(explicitlyAllocateGPUResources)
 #ifdef SK_DEBUG
         , fSingleOwner(owner)
 #endif
-        {
+{
     fCaps = sk_ref_sp(fGpu->caps());
 
     GR_DEFINE_STATIC_UNIQUE_KEY(gQuadIndexBufferKey);
diff --git a/src/gpu/GrResourceProvider.h b/src/gpu/GrResourceProvider.h
index 385282d..7b5fb60 100644
--- a/src/gpu/GrResourceProvider.h
+++ b/src/gpu/GrResourceProvider.h
@@ -39,7 +39,7 @@
  */
 class GrResourceProvider {
 public:
-    GrResourceProvider(GrGpu* gpu, GrResourceCache* cache, GrSingleOwner* owner);
+    GrResourceProvider(GrGpu*, GrResourceCache*, GrSingleOwner*, bool explicitlyAllocate);
 
     /**
      * Finds a resource in the cache, based on the specified key. Prior to calling this, the caller
@@ -258,6 +258,10 @@
     inline GrResourceProviderPriv priv();
     inline const GrResourceProviderPriv priv() const;
 
+    bool explicitlyAllocateGPUResources() const { return fExplicitlyAllocateGPUResources; }
+
+    bool testingOnly_setExplicitlyAllocateGPUResources(bool newValue);
+
 private:
     sk_sp<GrGpuResource> findResourceByUniqueKey(const GrUniqueKey&);
 
@@ -297,6 +301,7 @@
     GrGpu*              fGpu;
     sk_sp<const GrCaps> fCaps;
     GrUniqueKey         fQuadIndexBufferKey;
+    bool                fExplicitlyAllocateGPUResources;
 
     // In debug builds we guard against improper thread handling
     SkDEBUGCODE(mutable GrSingleOwner* fSingleOwner;)
diff --git a/src/gpu/GrTextureRenderTargetProxy.cpp b/src/gpu/GrTextureRenderTargetProxy.cpp
index f1e47ec..3312077 100644
--- a/src/gpu/GrTextureRenderTargetProxy.cpp
+++ b/src/gpu/GrTextureRenderTargetProxy.cpp
@@ -108,7 +108,8 @@
 void GrTextureRenderTargetProxy::validateLazySurface(const GrSurface* surface) {
     // Anything checked here should also be checking the GrTextureProxy version
     SkASSERT(surface->asTexture());
-    SkASSERT(surface->asTexture()->texturePriv().mipMapped() == this->mipMapped());
+    SkASSERT(GrMipMapped::kNo == this->mipMapped() ||
+             GrMipMapped::kYes == surface->asTexture()->texturePriv().mipMapped());
 
     // Anything checked here should also be checking the GrRenderTargetProxy version
     SkASSERT(surface->asRenderTarget());
diff --git a/tests/LazyProxyTest.cpp b/tests/LazyProxyTest.cpp
index 5c05327..1994eeb 100644
--- a/tests/LazyProxyTest.cpp
+++ b/tests/LazyProxyTest.cpp
@@ -298,6 +298,7 @@
 DEF_GPUTEST(LazyProxyFailedInstantiationTest, reporter, /* options */) {
     GrMockOptions mockOptions;
     sk_sp<GrContext> ctx = GrContext::MakeMock(&mockOptions, GrContextOptions());
+    GrResourceProvider* resourceProvider = ctx->contextPriv().resourceProvider();
     GrProxyProvider* proxyProvider = ctx->contextPriv().proxyProvider();
     for (bool failInstantiation : {false, true}) {
         sk_sp<GrRenderTargetContext> rtc =
@@ -314,13 +315,13 @@
         ctx->flush();
 
         if (failInstantiation) {
-#ifdef SK_DISABLE_EXPLICIT_GPU_RESOURCE_ALLOCATION
-            // When we disable explicit gpu resource allocation we don't throw away ops that have
-            // uninstantiated proxies.
-            REPORTER_ASSERT(reporter, 2 == executeTestValue);
-#else
-            REPORTER_ASSERT(reporter, 1 == executeTestValue);
-#endif
+            if (resourceProvider->explicitlyAllocateGPUResources()) {
+                REPORTER_ASSERT(reporter, 1 == executeTestValue);
+            } else {
+                // When we disable explicit gpu resource allocation we don't throw away ops that
+                // have uninstantiated proxies.
+                REPORTER_ASSERT(reporter, 2 == executeTestValue);
+            }
         } else {
             REPORTER_ASSERT(reporter, 2 == executeTestValue);
         }
diff --git a/tests/ResourceAllocatorTest.cpp b/tests/ResourceAllocatorTest.cpp
index 2a4a9f7..29ad5b3 100644
--- a/tests/ResourceAllocatorTest.cpp
+++ b/tests/ResourceAllocatorTest.cpp
@@ -103,10 +103,18 @@
     REPORTER_ASSERT(reporter, expectedResult == doTheBackingStoresMatch);
 }
 
+bool GrResourceProvider::testingOnly_setExplicitlyAllocateGPUResources(bool newValue) {
+    bool oldValue = fExplicitlyAllocateGPUResources;
+    fExplicitlyAllocateGPUResources = newValue;
+    return oldValue;
+}
+
 DEF_GPUTEST_FOR_RENDERING_CONTEXTS(ResourceAllocatorTest, reporter, ctxInfo) {
     GrProxyProvider* proxyProvider = ctxInfo.grContext()->contextPriv().proxyProvider();
     GrResourceProvider* resourceProvider = ctxInfo.grContext()->contextPriv().resourceProvider();
 
+    bool orig = resourceProvider->testingOnly_setExplicitlyAllocateGPUResources(true);
+
     struct TestCase {
         ProxyParams   fP1;
         ProxyParams   fP2;
@@ -202,6 +210,8 @@
                          std::move(p1), std::move(p2), t[0].fExpectation);
         cleanup_backend(ctxInfo.grContext(), &backEndTex);
     }
+
+    resourceProvider->testingOnly_setExplicitlyAllocateGPUResources(orig);
 }
 
 #endif
diff --git a/tests/TopoSortTest.cpp b/tests/TopoSortTest.cpp
index 9cee485..18ad75d 100644
--- a/tests/TopoSortTest.cpp
+++ b/tests/TopoSortTest.cpp
@@ -11,7 +11,7 @@
 
 #include "sk_tool_utils.h"
 
-typedef void (*CreateGraphPF)(SkTDArray<sk_tool_utils::TopoTestNode*>* graph);
+typedef void (*CreateGraphPF)(SkTArray<sk_sp<sk_tool_utils::TopoTestNode>>* graph);
 
 /* Simple diamond
  *       3
@@ -20,13 +20,13 @@
  *     \   /
  *       0
  */
-static void create_graph0(SkTDArray<sk_tool_utils::TopoTestNode*>* graph) {
+static void create_graph0(SkTArray<sk_sp<sk_tool_utils::TopoTestNode>>* graph) {
     sk_tool_utils::TopoTestNode::AllocNodes(graph, 4);
 
-    (*graph)[0]->dependsOn((*graph)[1]);
-    (*graph)[0]->dependsOn((*graph)[2]);
-    (*graph)[1]->dependsOn((*graph)[3]);
-    (*graph)[2]->dependsOn((*graph)[3]);
+    (*graph)[0]->dependsOn((*graph)[1].get());
+    (*graph)[0]->dependsOn((*graph)[2].get());
+    (*graph)[1]->dependsOn((*graph)[3].get());
+    (*graph)[2]->dependsOn((*graph)[3].get());
 }
 
 /* Simple chain
@@ -38,12 +38,12 @@
  *     |
  *     0
  */
-static void create_graph1(SkTDArray<sk_tool_utils::TopoTestNode*>* graph) {
+static void create_graph1(SkTArray<sk_sp<sk_tool_utils::TopoTestNode>>* graph) {
     sk_tool_utils::TopoTestNode::AllocNodes(graph, 4);
 
-    (*graph)[0]->dependsOn((*graph)[1]);
-    (*graph)[1]->dependsOn((*graph)[2]);
-    (*graph)[2]->dependsOn((*graph)[3]);
+    (*graph)[0]->dependsOn((*graph)[1].get());
+    (*graph)[1]->dependsOn((*graph)[2].get());
+    (*graph)[2]->dependsOn((*graph)[3].get());
 }
 
 /* Loop
@@ -51,12 +51,12 @@
  *     /   \
  *    0 --- 1
  */
-static void create_graph2(SkTDArray<sk_tool_utils::TopoTestNode*>* graph) {
+static void create_graph2(SkTArray<sk_sp<sk_tool_utils::TopoTestNode>>* graph) {
     sk_tool_utils::TopoTestNode::AllocNodes(graph, 3);
 
-    (*graph)[0]->dependsOn((*graph)[1]);
-    (*graph)[1]->dependsOn((*graph)[2]);
-    (*graph)[2]->dependsOn((*graph)[0]);
+    (*graph)[0]->dependsOn((*graph)[1].get());
+    (*graph)[1]->dependsOn((*graph)[2].get());
+    (*graph)[2]->dependsOn((*graph)[0].get());
 }
 
 /* Double diamond
@@ -70,18 +70,18 @@
  *     \   /
  *       0
  */
-static void create_graph3(SkTDArray<sk_tool_utils::TopoTestNode*>* graph) {
+static void create_graph3(SkTArray<sk_sp<sk_tool_utils::TopoTestNode>>* graph) {
     sk_tool_utils::TopoTestNode::AllocNodes(graph, 7);
 
-    (*graph)[0]->dependsOn((*graph)[1]);
-    (*graph)[0]->dependsOn((*graph)[2]);
-    (*graph)[1]->dependsOn((*graph)[3]);
-    (*graph)[2]->dependsOn((*graph)[3]);
+    (*graph)[0]->dependsOn((*graph)[1].get());
+    (*graph)[0]->dependsOn((*graph)[2].get());
+    (*graph)[1]->dependsOn((*graph)[3].get());
+    (*graph)[2]->dependsOn((*graph)[3].get());
 
-    (*graph)[3]->dependsOn((*graph)[4]);
-    (*graph)[3]->dependsOn((*graph)[5]);
-    (*graph)[4]->dependsOn((*graph)[6]);
-    (*graph)[5]->dependsOn((*graph)[6]);
+    (*graph)[3]->dependsOn((*graph)[4].get());
+    (*graph)[3]->dependsOn((*graph)[5].get());
+    (*graph)[4]->dependsOn((*graph)[6].get());
+    (*graph)[5]->dependsOn((*graph)[6].get());
 }
 
 /* Two independent diamonds
@@ -91,18 +91,18 @@
  *     \   /       \   /
  *       0           4
  */
-static void create_graph4(SkTDArray<sk_tool_utils::TopoTestNode*>* graph) {
+static void create_graph4(SkTArray<sk_sp<sk_tool_utils::TopoTestNode>>* graph) {
     sk_tool_utils::TopoTestNode::AllocNodes(graph, 8);
 
-    (*graph)[0]->dependsOn((*graph)[1]);
-    (*graph)[0]->dependsOn((*graph)[2]);
-    (*graph)[1]->dependsOn((*graph)[3]);
-    (*graph)[2]->dependsOn((*graph)[3]);
+    (*graph)[0]->dependsOn((*graph)[1].get());
+    (*graph)[0]->dependsOn((*graph)[2].get());
+    (*graph)[1]->dependsOn((*graph)[3].get());
+    (*graph)[2]->dependsOn((*graph)[3].get());
 
-    (*graph)[4]->dependsOn((*graph)[5]);
-    (*graph)[4]->dependsOn((*graph)[6]);
-    (*graph)[5]->dependsOn((*graph)[7]);
-    (*graph)[6]->dependsOn((*graph)[7]);
+    (*graph)[4]->dependsOn((*graph)[5].get());
+    (*graph)[4]->dependsOn((*graph)[6].get());
+    (*graph)[5]->dependsOn((*graph)[7].get());
+    (*graph)[6]->dependsOn((*graph)[7].get());
 }
 
 DEF_TEST(TopoSort, reporter) {
@@ -120,7 +120,7 @@
     };
 
     for (size_t i = 0; i < SK_ARRAY_COUNT(tests); ++i) {
-        SkTDArray<sk_tool_utils::TopoTestNode*> graph;
+        SkTArray<sk_sp<sk_tool_utils::TopoTestNode>> graph;
 
         (tests[i].fCreate)(&graph);
 
@@ -136,6 +136,5 @@
         }
 
         //SkDEBUGCODE(print(graph);)
-        sk_tool_utils::TopoTestNode::DeallocNodes(&graph);
     }
 }
diff --git a/tools/sk_tool_utils.h b/tools/sk_tool_utils.h
index 9081e3c..d27a14a 100644
--- a/tools/sk_tool_utils.h
+++ b/tools/sk_tool_utils.h
@@ -12,6 +12,7 @@
 #include "SkImageEncoder.h"
 #include "SkImageInfo.h"
 #include "SkRandom.h"
+#include "SkRefCnt.h"
 #include "SkStream.h"
 #include "SkTDArray.h"
 #include "SkTypeface.h"
@@ -147,7 +148,7 @@
     SkRect compute_tallest_occluder(const SkRRect& rr);
 
     // A helper object to test the topological sorting code (TopoSortBench.cpp & TopoSortTest.cpp)
-    class TopoTestNode {
+    class TopoTestNode : public SkRefCnt {
     public:
         TopoTestNode(int id) : fID(id), fOutputPos(-1), fTempMark(false) { }
 
@@ -194,37 +195,29 @@
         }
 
         // Helper functions for TopoSortBench & TopoSortTest
-        static void AllocNodes(SkTDArray<TopoTestNode*>* graph, int num) {
-            graph->setReserve(num);
+        static void AllocNodes(SkTArray<sk_sp<sk_tool_utils::TopoTestNode>>* graph, int num) {
+            graph->reserve(num);
 
             for (int i = 0; i < num; ++i) {
-                *graph->append() = new TopoTestNode(i);
+                graph->push_back(sk_sp<TopoTestNode>(new TopoTestNode(i)));
             }
         }
 
-        static void DeallocNodes(SkTDArray<TopoTestNode*>* graph) {
-            for (int i = 0; i < graph->count(); ++i) {
-                delete (*graph)[i];
-            }
-        }
-
-        #ifdef SK_DEBUG
-        static void Print(const SkTDArray<TopoTestNode*>& graph) {
+#ifdef SK_DEBUG
+        static void Print(const SkTArray<TopoTestNode*>& graph) {
             for (int i = 0; i < graph.count(); ++i) {
                 SkDebugf("%d, ", graph[i]->id());
             }
             SkDebugf("\n");
         }
-        #endif
+#endif
 
         // randomize the array
-        static void Shuffle(SkTDArray<TopoTestNode*>* graph, SkRandom* rand) {
+        static void Shuffle(SkTArray<sk_sp<TopoTestNode>>* graph, SkRandom* rand) {
             for (int i = graph->count()-1; i > 0; --i) {
                 int swap = rand->nextU() % (i+1);
 
-                TopoTestNode* tmp = (*graph)[i];
-                (*graph)[i] = (*graph)[swap];
-                (*graph)[swap] = tmp;
+                (*graph)[i].swap((*graph)[swap]);
             }
         }