Remove the option of disabling explicit resource allocation

We're burning our boats behind us. Succeed or die trying!

Change-Id: I6a9f71b758a6ae7b090c5221ab12a5ab4d166b47
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/209647
Commit-Queue: Robert Phillips <robertphillips@google.com>
Reviewed-by: Brian Salomon <bsalomon@google.com>
diff --git a/src/gpu/GrBaseContextPriv.h b/src/gpu/GrBaseContextPriv.h
index 31958b3..61d4a30 100644
--- a/src/gpu/GrBaseContextPriv.h
+++ b/src/gpu/GrBaseContextPriv.h
@@ -22,10 +22,6 @@
 
     const GrContextOptions& options() const { return fContext->options(); }
 
-    bool explicitlyAllocateGPUResources() const {
-        return fContext->explicitlyAllocateGPUResources();
-    }
-
     const GrCaps* caps() const { return fContext->caps(); }
     sk_sp<const GrCaps> refCaps() const;
 
diff --git a/src/gpu/GrContext.cpp b/src/gpu/GrContext.cpp
index 0517e4e..6ee25d6 100644
--- a/src/gpu/GrContext.cpp
+++ b/src/gpu/GrContext.cpp
@@ -76,8 +76,7 @@
 
     if (fGpu) {
         fResourceCache = new GrResourceCache(this->caps(), this->singleOwner(), this->contextID());
-        fResourceProvider = new GrResourceProvider(fGpu.get(), fResourceCache, this->singleOwner(),
-                                                   this->explicitlyAllocateGPUResources());
+        fResourceProvider = new GrResourceProvider(fGpu.get(), fResourceCache, this->singleOwner());
     }
 
     if (fResourceCache) {
diff --git a/src/gpu/GrContextPriv.h b/src/gpu/GrContextPriv.h
index 2eb3b8d..3839aa9 100644
--- a/src/gpu/GrContextPriv.h
+++ b/src/gpu/GrContextPriv.h
@@ -38,10 +38,6 @@
 
     const GrContextOptions& options() const { return fContext->options(); }
 
-    bool explicitlyAllocateGPUResources() const {
-        return fContext->explicitlyAllocateGPUResources();
-    }
-
     const GrCaps* caps() const { return fContext->caps(); }
     sk_sp<const GrCaps> refCaps() const;
 
diff --git a/src/gpu/GrDDLContext.cpp b/src/gpu/GrDDLContext.cpp
index 6836e46..3b2d6d0 100644
--- a/src/gpu/GrDDLContext.cpp
+++ b/src/gpu/GrDDLContext.cpp
@@ -52,9 +52,7 @@
             return false;
         }
 
-        // DDL contexts/drawing managers always sort the oplists. This, in turn, implies that
-        // explicit resource allocation is always on (regardless of how Skia is compiled).
-        this->setupDrawingManager(true, true);
+        this->setupDrawingManager(true);  // DDL contexts/drawing managers always sort the oplists.
 
         SkASSERT(this->caps());
 
diff --git a/src/gpu/GrDrawingManager.cpp b/src/gpu/GrDrawingManager.cpp
index 2b831e4..d887e1a 100644
--- a/src/gpu/GrDrawingManager.cpp
+++ b/src/gpu/GrDrawingManager.cpp
@@ -34,10 +34,7 @@
 #include "ccpr/GrCoverageCountingPathRenderer.h"
 #include "text/GrTextContext.h"
 
-GrDrawingManager::OpListDAG::OpListDAG(bool explicitlyAllocating, bool sortOpLists)
-        : fSortOpLists(sortOpLists) {
-    SkASSERT(!sortOpLists || explicitlyAllocating);
-}
+GrDrawingManager::OpListDAG::OpListDAG(bool sortOpLists) : fSortOpLists(sortOpLists) {}
 
 GrDrawingManager::OpListDAG::~OpListDAG() {}
 
@@ -153,13 +150,12 @@
 GrDrawingManager::GrDrawingManager(GrRecordingContext* context,
                                    const GrPathRendererChain::Options& optionsForPathRendererChain,
                                    const GrTextContext::Options& optionsForTextContext,
-                                   bool explicitlyAllocating,
                                    bool sortOpLists,
                                    GrContextOptions::Enable reduceOpListSplitting)
         : fContext(context)
         , fOptionsForPathRendererChain(optionsForPathRendererChain)
         , fOptionsForTextContext(optionsForTextContext)
-        , fDAG(explicitlyAllocating, sortOpLists)
+        , fDAG(sortOpLists)
         , fTextContext(nullptr)
         , fPathRendererChain(nullptr)
         , fSoftwarePathRenderer(nullptr)
@@ -411,17 +407,10 @@
 
         GrOpList* opList = fDAG.opList(i);
 
-        if (resourceProvider->explicitlyAllocateGPUResources()) {
-            if (!opList->isFullyInstantiated()) {
-                // If the backing surface wasn't allocated drop the draw of the entire opList.
-                fDAG.removeOpList(i);
-                continue;
-            }
-        } else {
-            if (!opList->instantiate(resourceProvider)) {
-                fDAG.removeOpList(i);
-                continue;
-            }
+        if (!opList->isFullyInstantiated()) {
+            // If the backing surface wasn't allocated drop the draw of the entire opList.
+            fDAG.removeOpList(i);
+            continue;
         }
 
         // TODO: handle this instantiation via lazy surface proxies?
@@ -633,16 +622,7 @@
         fActiveOpList = nullptr;
     }
 
-    // MDB TODO: this is unfortunate. GrOpList only needs the resourceProvider here so that, when
-    // not explicitly allocating resources, it can immediately instantiate 'rtp' so that the use
-    // order matches the allocation order (see the comment in GrOpList's ctor).
-    GrResourceProvider* resourceProvider = nullptr;
-    if (fContext->priv().asDirectContext()) {
-        resourceProvider = fContext->priv().asDirectContext()->priv().resourceProvider();
-    }
-
     sk_sp<GrRenderTargetOpList> opList(new GrRenderTargetOpList(
-                                                        resourceProvider,
                                                         fContext->priv().refOpMemoryPool(),
                                                         rtp,
                                                         fContext->priv().auditTrail()));
@@ -682,16 +662,7 @@
         fActiveOpList = nullptr;
     }
 
-    // MDB TODO: this is unfortunate. GrOpList only needs the resourceProvider here so that, when
-    // not explicitly allocating resources, it can immediately instantiate 'texureProxy' so that
-    // the use order matches the allocation order (see the comment in GrOpList's ctor).
-    GrResourceProvider* resourceProvider = nullptr;
-    if (fContext->priv().asDirectContext()) {
-        resourceProvider = fContext->priv().asDirectContext()->priv().resourceProvider();
-    }
-
-    sk_sp<GrTextureOpList> opList(new GrTextureOpList(resourceProvider,
-                                                      fContext->priv().refOpMemoryPool(),
+    sk_sp<GrTextureOpList> opList(new GrTextureOpList(fContext->priv().refOpMemoryPool(),
                                                       textureProxy,
                                                       fContext->priv().auditTrail()));
 
diff --git a/src/gpu/GrDrawingManager.h b/src/gpu/GrDrawingManager.h
index 0ec6ed9..592e471 100644
--- a/src/gpu/GrDrawingManager.h
+++ b/src/gpu/GrDrawingManager.h
@@ -87,7 +87,7 @@
     // This class encapsulates maintenance and manipulation of the drawing manager's DAG of opLists.
     class OpListDAG {
     public:
-        OpListDAG(bool explicitlyAllocating, bool sortOpLists);
+        OpListDAG(bool sortOpLists);
         ~OpListDAG();
 
         // Currently, when explicitly allocating resources, this call will topologically sort the
@@ -136,7 +136,6 @@
 
     GrDrawingManager(GrRecordingContext*, const GrPathRendererChain::Options&,
                      const GrTextContext::Options&,
-                     bool explicitlyAllocating,
                      bool sortOpLists,
                      GrContextOptions::Enable reduceOpListSplitting);
 
diff --git a/src/gpu/GrFragmentProcessor.h b/src/gpu/GrFragmentProcessor.h
index 96c578c..007458b 100644
--- a/src/gpu/GrFragmentProcessor.h
+++ b/src/gpu/GrFragmentProcessor.h
@@ -439,11 +439,7 @@
     // 'instantiate' should only ever be called at flush time.
     // TODO: this can go away once explicit allocation has stuck
     bool instantiate(GrResourceProvider* resourceProvider) const {
-        if (resourceProvider->explicitlyAllocateGPUResources()) {
-            return fProxyRef.get()->isInstantiated();
-        } else {
-            return SkToBool(fProxyRef.get()->instantiate(resourceProvider));
-        }
+        return fProxyRef.get()->isInstantiated();
     }
 
     // 'peekTexture' should only ever be called after a successful 'instantiate' call
diff --git a/src/gpu/GrGpuCommandBuffer.cpp b/src/gpu/GrGpuCommandBuffer.cpp
index a80b088..b1215f6 100644
--- a/src/gpu/GrGpuCommandBuffer.cpp
+++ b/src/gpu/GrGpuCommandBuffer.cpp
@@ -49,32 +49,22 @@
     SkASSERT(!pipeline.isScissorEnabled() || fixedDynamicState ||
              (dynamicStateArrays && dynamicStateArrays->fScissorRects));
 
-    auto resourceProvider = this->gpu()->getContext()->priv().resourceProvider();
-
     if (pipeline.isBad()) {
         return false;
     }
+#ifdef SK_DEBUG
     if (fixedDynamicState && fixedDynamicState->fPrimitiveProcessorTextures) {
         GrTextureProxy** processorProxies = fixedDynamicState->fPrimitiveProcessorTextures;
         for (int i = 0; i < primProc.numTextureSamplers(); ++i) {
-            if (resourceProvider->explicitlyAllocateGPUResources()) {
-                SkASSERT(processorProxies[i]->isInstantiated());
-            } else if (!processorProxies[i]->instantiate(resourceProvider)) {
-                return false;
-            }
+            SkASSERT(processorProxies[i]->isInstantiated());
         }
     }
     if (dynamicStateArrays && dynamicStateArrays->fPrimitiveProcessorTextures) {
         int n = primProc.numTextureSamplers() * meshCount;
         const auto* textures = dynamicStateArrays->fPrimitiveProcessorTextures;
         for (int i = 0; i < n; ++i) {
-            if (resourceProvider->explicitlyAllocateGPUResources()) {
-                SkASSERT(textures[i]->isInstantiated());
-            } else if (!textures[i]->instantiate(resourceProvider)) {
-                return false;
-            }
+            SkASSERT(textures[i]->isInstantiated());
         }
-#ifdef SK_DEBUG
         SkASSERT(meshCount >= 1);
         const GrTextureProxy* const* primProcProxies =
                 dynamicStateArrays->fPrimitiveProcessorTextures;
@@ -90,9 +80,8 @@
                 SkASSERT(testProxy->config() == config);
             }
         }
-#endif
-
     }
+#endif
 
     if (primProc.numVertexAttributes() > this->gpu()->caps()->maxVertexAttributes()) {
         this->gpu()->stats()->incNumFailedDraws();
diff --git a/src/gpu/GrImageContextPriv.h b/src/gpu/GrImageContextPriv.h
index 1139085..c1fca60 100644
--- a/src/gpu/GrImageContextPriv.h
+++ b/src/gpu/GrImageContextPriv.h
@@ -22,10 +22,6 @@
 
     const GrContextOptions& options() const { return fContext->options(); }
 
-    bool explicitlyAllocateGPUResources() const {
-        return fContext->explicitlyAllocateGPUResources();
-    }
-
     const GrCaps* caps() const { return fContext->caps(); }
     sk_sp<const GrCaps> refCaps() const;
 
diff --git a/src/gpu/GrLegacyDirectContext.cpp b/src/gpu/GrLegacyDirectContext.cpp
index d5bce1b..1cf4b64 100644
--- a/src/gpu/GrLegacyDirectContext.cpp
+++ b/src/gpu/GrLegacyDirectContext.cpp
@@ -86,7 +86,7 @@
             sortOpLists = true;
         }
 
-        this->setupDrawingManager(this->explicitlyAllocateGPUResources(), sortOpLists);
+        this->setupDrawingManager(sortOpLists);
 
         SkASSERT(this->caps());
 
diff --git a/src/gpu/GrOpList.cpp b/src/gpu/GrOpList.cpp
index e2a4c5c..da23613 100644
--- a/src/gpu/GrOpList.cpp
+++ b/src/gpu/GrOpList.cpp
@@ -24,8 +24,9 @@
     return id;
 }
 
-GrOpList::GrOpList(GrResourceProvider* resourceProvider, sk_sp<GrOpMemoryPool> opMemoryPool,
-                   sk_sp<GrSurfaceProxy> surfaceProxy, GrAuditTrail* auditTrail)
+GrOpList::GrOpList(sk_sp<GrOpMemoryPool> opMemoryPool,
+                   sk_sp<GrSurfaceProxy> surfaceProxy,
+                   GrAuditTrail* auditTrail)
         : fOpMemoryPool(std::move(opMemoryPool))
         , fAuditTrail(auditTrail)
         , fUniqueID(CreateUniqueID())
@@ -34,16 +35,6 @@
     fTarget.setProxy(std::move(surfaceProxy), kWrite_GrIOType);
     fTarget.get()->setLastOpList(this);
 
-    if (resourceProvider && !resourceProvider->explicitlyAllocateGPUResources()) {
-        // MDB TODO: remove this! We are currently moving to having all the ops that target
-        // the RT as a dest (e.g., clear, etc.) rely on the opList's 'fTarget' pointer
-        // for the IO Ref. This works well but until they are all swapped over (and none
-        // are pre-emptively instantiating proxies themselves) we need to instantiate
-        // here so that the GrSurfaces are created in an order that preserves the GrSurface
-        // re-use assumptions.
-        fTarget.get()->instantiate(resourceProvider);
-    }
-
     fTarget.markPendingIO();
 }
 
@@ -56,12 +47,8 @@
 
 // TODO: this can go away when explicit allocation has stuck
 bool GrOpList::instantiate(GrResourceProvider* resourceProvider) {
-    if (resourceProvider->explicitlyAllocateGPUResources()) {
-        SkASSERT(fTarget.get()->isInstantiated());
-        return true;
-    } else {
-        return SkToBool(fTarget.get()->instantiate(resourceProvider));
-    }
+    SkASSERT(fTarget.get()->isInstantiated());
+    return true;
 }
 
 void GrOpList::endFlush() {
@@ -76,11 +63,7 @@
 
 void GrOpList::instantiateDeferredProxies(GrResourceProvider* resourceProvider) {
     for (int i = 0; i < fDeferredProxies.count(); ++i) {
-        if (resourceProvider->explicitlyAllocateGPUResources()) {
-            SkASSERT(fDeferredProxies[i]->isInstantiated());
-        } else {
-            fDeferredProxies[i]->instantiate(resourceProvider);
-        }
+        SkASSERT(fDeferredProxies[i]->isInstantiated());
     }
 }
 
diff --git a/src/gpu/GrPipeline.cpp b/src/gpu/GrPipeline.cpp
index 2c15f32..99ad78d 100644
--- a/src/gpu/GrPipeline.cpp
+++ b/src/gpu/GrPipeline.cpp
@@ -39,11 +39,7 @@
     fXferProcessor = processors.refXferProcessor();
 
     if (args.fDstProxy.proxy()) {
-        if (args.fResourceProvider->explicitlyAllocateGPUResources()) {
-            SkASSERT(args.fDstProxy.proxy()->isInstantiated());
-        } else if (!args.fDstProxy.proxy()->instantiate(args.fResourceProvider)) {
-            this->markAsBad();
-        }
+        SkASSERT(args.fDstProxy.proxy()->isInstantiated());
 
         fDstTextureProxy.reset(args.fDstProxy.proxy());
         fDstTextureOffset = args.fDstProxy.offset();
diff --git a/src/gpu/GrProxyProvider.cpp b/src/gpu/GrProxyProvider.cpp
index dad4d2f..ee8d0a1 100644
--- a/src/gpu/GrProxyProvider.cpp
+++ b/src/gpu/GrProxyProvider.cpp
@@ -247,9 +247,7 @@
             surfaceFlags |= GrInternalSurfaceFlags::kMixedSampled;
         }
     }
-    if (fImageContext->priv().explicitlyAllocateGPUResources()) {
-        surfaceFlags |= GrInternalSurfaceFlags::kNoPendingIO;
-    }
+    surfaceFlags |= GrInternalSurfaceFlags::kNoPendingIO;
 
     GrSurfaceDesc desc;
     desc.fWidth = srcImage->width();
diff --git a/src/gpu/GrRecordingContext.cpp b/src/gpu/GrRecordingContext.cpp
index 7adb948..7b1a1c0 100644
--- a/src/gpu/GrRecordingContext.cpp
+++ b/src/gpu/GrRecordingContext.cpp
@@ -64,7 +64,7 @@
     return true;
 }
 
-void GrRecordingContext::setupDrawingManager(bool explicitlyAllocate, bool sortOpLists) {
+void GrRecordingContext::setupDrawingManager(bool sortOpLists) {
     GrPathRendererChain::Options prcOptions;
     prcOptions.fAllowPathMaskCaching = this->options().fAllowPathMaskCaching;
 #if GR_TEST_UTILS
@@ -100,11 +100,10 @@
     // DDL contexts/drawing managers. We should still obey the options for non-DDL drawing managers
     // until predictive intermediate flushes are added (i.e., we can't reorder forever).
     fDrawingManager.reset(new GrDrawingManager(this,
-                                                prcOptions,
-                                                textContextOptions,
-                                                explicitlyAllocate,
-                                                sortOpLists,
-                                                this->options().fReduceOpListSplitting));
+                                               prcOptions,
+                                               textContextOptions,
+                                               sortOpLists,
+                                               this->options().fReduceOpListSplitting));
 }
 
 void GrRecordingContext::abandonContext() {
diff --git a/src/gpu/GrRecordingContextPriv.h b/src/gpu/GrRecordingContextPriv.h
index 46f13f2..a7977ce 100644
--- a/src/gpu/GrRecordingContextPriv.h
+++ b/src/gpu/GrRecordingContextPriv.h
@@ -22,10 +22,6 @@
 
     const GrContextOptions& options() const { return fContext->options(); }
 
-    bool explicitlyAllocateGPUResources() const {
-        return fContext->explicitlyAllocateGPUResources();
-    }
-
     const GrCaps* caps() const { return fContext->caps(); }
     sk_sp<const GrCaps> refCaps() const;
 
diff --git a/src/gpu/GrRenderTargetContext.cpp b/src/gpu/GrRenderTargetContext.cpp
index 69b9fe1..dbf4e28 100644
--- a/src/gpu/GrRenderTargetContext.cpp
+++ b/src/gpu/GrRenderTargetContext.cpp
@@ -144,13 +144,6 @@
         , fOpList(sk_ref_sp(fRenderTargetProxy->getLastRenderTargetOpList()))
         , fSurfaceProps(SkSurfacePropsCopyOrDefault(surfaceProps))
         , fManagedOpList(managedOpList) {
-    if (!context->priv().explicitlyAllocateGPUResources()) {
-        // MDB TODO: to ensure all resources still get allocated in the correct order in the hybrid
-        // world we need to get the correct opList here so that it, in turn, can grab and hold
-        // its rendertarget.
-        this->getRTOpList();
-    }
-
     fTextTarget.reset(new TextTarget(this));
     SkDEBUGCODE(this->validate();)
 }
diff --git a/src/gpu/GrRenderTargetOpList.cpp b/src/gpu/GrRenderTargetOpList.cpp
index 96c8f25..e652902 100644
--- a/src/gpu/GrRenderTargetOpList.cpp
+++ b/src/gpu/GrRenderTargetOpList.cpp
@@ -348,11 +348,10 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 
-GrRenderTargetOpList::GrRenderTargetOpList(GrResourceProvider* resourceProvider,
-                                           sk_sp<GrOpMemoryPool> opMemoryPool,
+GrRenderTargetOpList::GrRenderTargetOpList(sk_sp<GrOpMemoryPool> opMemoryPool,
                                            sk_sp<GrRenderTargetProxy> proxy,
                                            GrAuditTrail* auditTrail)
-        : INHERITED(resourceProvider, std::move(opMemoryPool), std::move(proxy), auditTrail)
+        : INHERITED(std::move(opMemoryPool), std::move(proxy), auditTrail)
         , fLastClipStackGenID(SK_InvalidUniqueID)
         SkDEBUGCODE(, fNumClips(0)) {
 }
diff --git a/src/gpu/GrRenderTargetOpList.h b/src/gpu/GrRenderTargetOpList.h
index e5442c1..692dee7 100644
--- a/src/gpu/GrRenderTargetOpList.h
+++ b/src/gpu/GrRenderTargetOpList.h
@@ -33,8 +33,7 @@
     using DstProxy = GrXferProcessor::DstProxy;
 
 public:
-    GrRenderTargetOpList(GrResourceProvider*, sk_sp<GrOpMemoryPool>,
-                         sk_sp<GrRenderTargetProxy>, GrAuditTrail*);
+    GrRenderTargetOpList(sk_sp<GrOpMemoryPool>, sk_sp<GrRenderTargetProxy>, GrAuditTrail*);
 
     ~GrRenderTargetOpList() override;
 
diff --git a/src/gpu/GrRenderTargetProxy.cpp b/src/gpu/GrRenderTargetProxy.cpp
index 3a84bfe..8f0d88c 100644
--- a/src/gpu/GrRenderTargetProxy.cpp
+++ b/src/gpu/GrRenderTargetProxy.cpp
@@ -79,8 +79,6 @@
 }
 
 sk_sp<GrSurface> GrRenderTargetProxy::createSurface(GrResourceProvider* resourceProvider) const {
-    SkASSERT(resourceProvider->explicitlyAllocateGPUResources());
-
     static constexpr GrSurfaceDescFlags kDescFlags = kRenderTarget_GrSurfaceFlag;
 
     sk_sp<GrSurface> surface = this->createSurfaceImpl(resourceProvider, fSampleCnt, fNeedsStencil,
diff --git a/src/gpu/GrResourceAllocator.cpp b/src/gpu/GrResourceAllocator.cpp
index 8078463..ff09c31 100644
--- a/src/gpu/GrResourceAllocator.cpp
+++ b/src/gpu/GrResourceAllocator.cpp
@@ -119,7 +119,7 @@
 
     // Because readOnly proxies do not get a usage interval we must instantiate them here (since it
     // won't occur in GrResourceAllocator::assign)
-    if (proxy->readOnly() || !fResourceProvider->explicitlyAllocateGPUResources()) {
+    if (proxy->readOnly()) {
         // FIXME: remove this once we can do the lazy instantiation from assign instead.
         if (GrSurfaceProxy::LazyState::kNot != proxy->lazyInstantiationState()) {
             if (proxy->priv().doLazyInstantiation(fResourceProvider)) {
@@ -382,11 +382,6 @@
     SkDebugf("\n");
 #endif
 
-    if (!fResourceProvider->explicitlyAllocateGPUResources()) {
-        fIntvlList.detachAll(); // arena allocator will clean these up for us
-        return true;
-    }
-
     SkDEBUGCODE(fAssigned = true;)
 
 #if GR_ALLOCATION_SPEW
diff --git a/src/gpu/GrResourceProvider.cpp b/src/gpu/GrResourceProvider.cpp
index 622cea2..d4346cd 100644
--- a/src/gpu/GrResourceProvider.cpp
+++ b/src/gpu/GrResourceProvider.cpp
@@ -30,11 +30,9 @@
 #define ASSERT_SINGLE_OWNER \
     SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(fSingleOwner);)
 
-GrResourceProvider::GrResourceProvider(GrGpu* gpu, GrResourceCache* cache, GrSingleOwner* owner,
-                                       bool explicitlyAllocateGPUResources)
+GrResourceProvider::GrResourceProvider(GrGpu* gpu, GrResourceCache* cache, GrSingleOwner* owner)
         : fCache(cache)
         , fGpu(gpu)
-        , fExplicitlyAllocateGPUResources(explicitlyAllocateGPUResources)
 #ifdef SK_DEBUG
         , fSingleOwner(owner)
 #endif
diff --git a/src/gpu/GrResourceProvider.h b/src/gpu/GrResourceProvider.h
index 1a9924b..2d4ef92 100644
--- a/src/gpu/GrResourceProvider.h
+++ b/src/gpu/GrResourceProvider.h
@@ -53,8 +53,7 @@
         kNoPendingIO     = 0x1,
     };
 
-    GrResourceProvider(GrGpu*, GrResourceCache*, GrSingleOwner*,
-                       bool explicitlyAllocateGPUResources);
+    GrResourceProvider(GrGpu*, GrResourceCache*, GrSingleOwner*);
 
     /**
      * Finds a resource in the cache, based on the specified key. Prior to calling this, the caller
@@ -252,10 +251,6 @@
     inline GrResourceProviderPriv priv();
     inline const GrResourceProviderPriv priv() const;
 
-    bool explicitlyAllocateGPUResources() const { return fExplicitlyAllocateGPUResources; }
-
-    bool testingOnly_setExplicitlyAllocateGPUResources(bool newValue);
-
 private:
     sk_sp<GrGpuResource> findResourceByUniqueKey(const GrUniqueKey&);
 
@@ -295,7 +290,6 @@
     GrGpu* fGpu;
     sk_sp<const GrCaps> fCaps;
     sk_sp<const GrGpuBuffer> fQuadIndexBuffer;
-    bool fExplicitlyAllocateGPUResources;
 
     // In debug builds we guard against improper thread handling
     SkDEBUGCODE(mutable GrSingleOwner* fSingleOwner;)
diff --git a/src/gpu/GrTextureOpList.cpp b/src/gpu/GrTextureOpList.cpp
index 23bd58e..285bb3c 100644
--- a/src/gpu/GrTextureOpList.cpp
+++ b/src/gpu/GrTextureOpList.cpp
@@ -21,11 +21,10 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 
-GrTextureOpList::GrTextureOpList(GrResourceProvider* resourceProvider,
-                                 sk_sp<GrOpMemoryPool> opMemoryPool,
+GrTextureOpList::GrTextureOpList(sk_sp<GrOpMemoryPool> opMemoryPool,
                                  sk_sp<GrTextureProxy> proxy,
                                  GrAuditTrail* auditTrail)
-        : INHERITED(resourceProvider, std::move(opMemoryPool), proxy, auditTrail) {
+        : INHERITED(std::move(opMemoryPool), proxy, auditTrail) {
     SkASSERT(fOpMemoryPool);
     SkASSERT(!proxy->readOnly());
 }
diff --git a/src/gpu/GrTextureOpList.h b/src/gpu/GrTextureOpList.h
index b162460..a16dd43 100644
--- a/src/gpu/GrTextureOpList.h
+++ b/src/gpu/GrTextureOpList.h
@@ -23,8 +23,7 @@
 
 class GrTextureOpList final : public GrOpList {
 public:
-    GrTextureOpList(GrResourceProvider*, sk_sp<GrOpMemoryPool>,
-                    sk_sp<GrTextureProxy>, GrAuditTrail*);
+    GrTextureOpList(sk_sp<GrOpMemoryPool>, sk_sp<GrTextureProxy>, GrAuditTrail*);
     ~GrTextureOpList() override;
 
     /**
diff --git a/src/gpu/GrTextureProxy.cpp b/src/gpu/GrTextureProxy.cpp
index b319dba..136315e 100644
--- a/src/gpu/GrTextureProxy.cpp
+++ b/src/gpu/GrTextureProxy.cpp
@@ -94,8 +94,6 @@
 }
 
 sk_sp<GrSurface> GrTextureProxy::createSurface(GrResourceProvider* resourceProvider) const {
-    SkASSERT(resourceProvider->explicitlyAllocateGPUResources());
-
     sk_sp<GrSurface> surface = this->createSurfaceImpl(resourceProvider, 1,
                                                        /* needsStencil = */ false,
                                                        kNone_GrSurfaceFlags,
diff --git a/src/gpu/GrTextureRenderTargetProxy.cpp b/src/gpu/GrTextureRenderTargetProxy.cpp
index af53572..3c2b2c4 100644
--- a/src/gpu/GrTextureRenderTargetProxy.cpp
+++ b/src/gpu/GrTextureRenderTargetProxy.cpp
@@ -101,8 +101,6 @@
 
 sk_sp<GrSurface> GrTextureRenderTargetProxy::createSurface(
                                                     GrResourceProvider* resourceProvider) const {
-    SkASSERT(resourceProvider->explicitlyAllocateGPUResources());
-
     static constexpr GrSurfaceDescFlags kDescFlags = kRenderTarget_GrSurfaceFlag;
 
     sk_sp<GrSurface> surface = this->createSurfaceImpl(resourceProvider, this->numStencilSamples(),
diff --git a/src/gpu/ops/GrCopySurfaceOp.cpp b/src/gpu/ops/GrCopySurfaceOp.cpp
index 548afb9..f2a5257 100644
--- a/src/gpu/ops/GrCopySurfaceOp.cpp
+++ b/src/gpu/ops/GrCopySurfaceOp.cpp
@@ -87,11 +87,7 @@
 }
 
 void GrCopySurfaceOp::onExecute(GrOpFlushState* state, const SkRect& chainBounds) {
-    if (state->resourceProvider()->explicitlyAllocateGPUResources()) {
-        SkASSERT(fSrc.get()->isInstantiated());
-    } else if (!fSrc.get()->instantiate(state->resourceProvider())) {
-        return;
-    }
+    SkASSERT(fSrc.get()->isInstantiated());
 
     state->commandBuffer()->copy(fSrc.get()->peekSurface(), fSrc.get()->origin(), fSrcRect,
                                  fDstPoint);
diff --git a/src/gpu/ops/GrTextureOp.cpp b/src/gpu/ops/GrTextureOp.cpp
index 554dfb8..dd6d70c 100644
--- a/src/gpu/ops/GrTextureOp.cpp
+++ b/src/gpu/ops/GrTextureOp.cpp
@@ -442,11 +442,7 @@
             for (unsigned p = 0; p < op.fProxyCnt; ++p) {
                 numTotalQuads += op.fProxies[p].fQuadCnt;
                 auto* proxy = op.fProxies[p].fProxy;
-                if (target->resourceProvider()->explicitlyAllocateGPUResources()) {
-                    if (!proxy->isInstantiated()) {
-                        return;
-                    }
-                } else if (!proxy->instantiate(target->resourceProvider())) {
+                if (!proxy->isInstantiated()) {
                     return;
                 }
                 SkASSERT(proxy->config() == config);