Cleanup some legacy instantiate calls

Since explicit resource allocation has stuck these instantiate calls are no longer required.

Change-Id: I5a8a7fa714eb1e9550f4f645ce8fced2d5f7aa4e
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/222457
Reviewed-by: Brian Salomon <bsalomon@google.com>
Commit-Queue: Robert Phillips <robertphillips@google.com>
diff --git a/src/gpu/GrDrawingManager.cpp b/src/gpu/GrDrawingManager.cpp
index 54a21ea..478a4d0 100644
--- a/src/gpu/GrDrawingManager.cpp
+++ b/src/gpu/GrDrawingManager.cpp
@@ -321,8 +321,8 @@
         while (alloc.assign(&startIndex, &stopIndex, &error)) {
             if (GrResourceAllocator::AssignError::kFailedProxyInstantiation == error) {
                 for (int i = startIndex; i < stopIndex; ++i) {
-                    if (fDAG.opList(i) && !fDAG.opList(i)->isFullyInstantiated()) {
-                        // If the backing surface wasn't allocated drop the entire opList.
+                    if (fDAG.opList(i) && !fDAG.opList(i)->isInstantiated()) {
+                        // If the backing surface wasn't allocated, drop the entire opList.
                         fDAG.removeOpList(i);
                     }
                     if (fDAG.opList(i)) {
@@ -395,12 +395,6 @@
     }
 #endif
 
-    auto direct = fContext->priv().asDirectContext();
-    if (!direct) {
-        return false;
-    }
-
-    auto resourceProvider = direct->priv().resourceProvider();
     bool anyOpListsExecuted = false;
 
     for (int i = startIndex; i < stopIndex; ++i) {
@@ -409,16 +403,9 @@
         }
 
         GrOpList* opList = fDAG.opList(i);
+        SkASSERT(opList->isInstantiated());
+        SkASSERT(opList->deferredProxiesAreInstantiated());
 
-        if (!opList->isFullyInstantiated()) {
-            // If the backing surface wasn't allocated drop the draw of the entire opList.
-            fDAG.removeOpList(i);
-            continue;
-        }
-
-        // TODO: handle this instantiation via lazy surface proxies?
-        // Instantiate all deferred proxies (being built on worker threads) so we can upload them
-        opList->instantiateDeferredProxies(resourceProvider);
         opList->prepare(flushState);
     }
 
diff --git a/src/gpu/GrFragmentProcessor.cpp b/src/gpu/GrFragmentProcessor.cpp
index 0570763..d2d9902 100644
--- a/src/gpu/GrFragmentProcessor.cpp
+++ b/src/gpu/GrFragmentProcessor.cpp
@@ -75,21 +75,23 @@
     SkDEBUGCODE(transform->setInProcessor();)
 }
 
-bool GrFragmentProcessor::instantiate(GrResourceProvider* resourceProvider) const {
+#ifdef SK_DEBUG
+bool GrFragmentProcessor::isInstantiated() const {
     for (int i = 0; i < fTextureSamplerCnt; ++i) {
-        if (!this->textureSampler(i).instantiate(resourceProvider)) {
+        if (!this->textureSampler(i).isInstantiated()) {
             return false;
         }
     }
 
     for (int i = 0; i < this->numChildProcessors(); ++i) {
-        if (!this->childProcessor(i).instantiate(resourceProvider)) {
+        if (!this->childProcessor(i).isInstantiated()) {
             return false;
         }
     }
 
     return true;
 }
+#endif
 
 int GrFragmentProcessor::registerChildProcessor(std::unique_ptr<GrFragmentProcessor> child) {
     if (child->usesLocalCoords()) {
diff --git a/src/gpu/GrFragmentProcessor.h b/src/gpu/GrFragmentProcessor.h
index 5789a1e..57e7a01 100644
--- a/src/gpu/GrFragmentProcessor.h
+++ b/src/gpu/GrFragmentProcessor.h
@@ -122,7 +122,7 @@
 
     const GrFragmentProcessor& childProcessor(int index) const { return *fChildProcessors[index]; }
 
-    bool instantiate(GrResourceProvider*) const;
+    SkDEBUGCODE(bool isInstantiated() const;)
 
     /** Do any of the coordtransforms for this processor require local coords? */
     bool usesLocalCoords() const { return SkToBool(fFlags & kUsesLocalCoords_Flag); }
@@ -434,11 +434,7 @@
 
     bool operator!=(const TextureSampler& other) const { return !(*this == other); }
 
-    // 'instantiate' should only ever be called at flush time.
-    // TODO: this can go away once explicit allocation has stuck
-    bool instantiate(GrResourceProvider* resourceProvider) const {
-        return fProxy->isInstantiated();
-    }
+    SkDEBUGCODE(bool isInstantiated() const { return fProxy->isInstantiated(); })
 
     // 'peekTexture' should only ever be called after a successful 'instantiate' call
     GrTexture* peekTexture() const {
diff --git a/src/gpu/GrGpuCommandBuffer.cpp b/src/gpu/GrGpuCommandBuffer.cpp
index 292a39a..6038520 100644
--- a/src/gpu/GrGpuCommandBuffer.cpp
+++ b/src/gpu/GrGpuCommandBuffer.cpp
@@ -49,9 +49,8 @@
     SkASSERT(!pipeline.isScissorEnabled() || fixedDynamicState ||
              (dynamicStateArrays && dynamicStateArrays->fScissorRects));
 
-    if (pipeline.isBad()) {
-        return false;
-    }
+    SkASSERT(!pipeline.isBad());
+
 #ifdef SK_DEBUG
     if (fixedDynamicState && fixedDynamicState->fPrimitiveProcessorTextures) {
         GrTextureProxy** processorProxies = fixedDynamicState->fPrimitiveProcessorTextures;
diff --git a/src/gpu/GrOpFlushState.cpp b/src/gpu/GrOpFlushState.cpp
index 5c13afe..8bff565 100644
--- a/src/gpu/GrOpFlushState.cpp
+++ b/src/gpu/GrOpFlushState.cpp
@@ -42,7 +42,6 @@
     pipelineArgs.fInputFlags = pipelineFlags;
     pipelineArgs.fDstProxy = this->dstProxy();
     pipelineArgs.fCaps = &this->caps();
-    pipelineArgs.fResourceProvider = this->resourceProvider();
     pipelineArgs.fUserStencil = stencilSettings;
     pipelineArgs.fOutputSwizzle = this->drawOpArgs().fOutputSwizzle;
     GrPipeline pipeline(pipelineArgs, std::move(processorSet), this->detachAppliedClip());
diff --git a/src/gpu/GrOpList.cpp b/src/gpu/GrOpList.cpp
index 8b7bdb1..9a9ff14 100644
--- a/src/gpu/GrOpList.cpp
+++ b/src/gpu/GrOpList.cpp
@@ -43,12 +43,6 @@
     }
 }
 
-// TODO: this can go away when explicit allocation has stuck
-bool GrOpList::instantiate(GrResourceProvider* resourceProvider) {
-    SkASSERT(fTarget->isInstantiated());
-    return true;
-}
-
 void GrOpList::endFlush() {
     if (fTarget && this == fTarget->getLastOpList()) {
         fTarget->setLastOpList(nullptr);
@@ -59,11 +53,17 @@
     fAuditTrail = nullptr;
 }
 
-void GrOpList::instantiateDeferredProxies(GrResourceProvider* resourceProvider) {
+#ifdef SK_DEBUG
+bool GrOpList::deferredProxiesAreInstantiated() const {
     for (int i = 0; i < fDeferredProxies.count(); ++i) {
-        SkASSERT(fDeferredProxies[i]->isInstantiated());
+        if (!fDeferredProxies[i]->isInstantiated()) {
+            return false;
+        }
     }
+
+    return true;
 }
+#endif
 
 void GrOpList::prepare(GrOpFlushState* flushState) {
     for (int i = 0; i < fDeferredProxies.count(); ++i) {
@@ -148,8 +148,6 @@
 }
 #endif
 
-bool GrOpList::isInstantiated() const { return fTarget->isInstantiated(); }
-
 void GrOpList::closeThoseWhoDependOnMe(const GrCaps& caps) {
     for (int i = 0; i < fDependents.count(); ++i) {
         if (!fDependents[i]->isClosed()) {
@@ -158,8 +156,8 @@
     }
 }
 
-bool GrOpList::isFullyInstantiated() const {
-    if (!this->isInstantiated()) {
+bool GrOpList::isInstantiated() const {
+    if (!fTarget->isInstantiated()) {
         return false;
     }
 
diff --git a/src/gpu/GrOpList.h b/src/gpu/GrOpList.h
index 987ddb0..da2b7b9 100644
--- a/src/gpu/GrOpList.h
+++ b/src/gpu/GrOpList.h
@@ -32,10 +32,7 @@
     GrOpList(sk_sp<GrOpMemoryPool>, sk_sp<GrSurfaceProxy>, GrAuditTrail*);
     ~GrOpList() override;
 
-    // These four methods are invoked at flush time
-    bool instantiate(GrResourceProvider* resourceProvider);
-    // Instantiates any "threaded" texture proxies that are being prepared elsewhere
-    void instantiateDeferredProxies(GrResourceProvider* resourceProvider);
+    // These two methods are only invoked at flush time
     void prepare(GrOpFlushState* flushState);
     bool execute(GrOpFlushState* flushState) { return this->onExecute(flushState); }
 
@@ -74,7 +71,7 @@
     virtual GrTextureOpList* asTextureOpList() { return nullptr; }
 
     /*
-     * Safely case this GrOpList to a GrRenderTargetOpList (if possible).
+     * Safely cast this GrOpList to a GrRenderTargetOpList (if possible).
      */
     virtual GrRenderTargetOpList* asRenderTargetOpList() { return nullptr; }
 
@@ -88,11 +85,11 @@
     SkDEBUGCODE(virtual int numClips() const { return 0; })
 
 protected:
-    bool isInstantiated() const;
-
     // In addition to just the GrSurface being allocated, has the stencil buffer been allocated (if
     // it is required)?
-    bool isFullyInstantiated() const;
+    bool isInstantiated() const;
+
+    SkDEBUGCODE(bool deferredProxiesAreInstantiated() const;)
 
     // This is a backpointer to the GrOpMemoryPool that holds the memory for this opLists' ops.
     // In the DDL case, these back pointers keep the DDL's GrOpMemoryPool alive as long as its
@@ -106,6 +103,8 @@
     GrLoadOp              fStencilLoadOp  = GrLoadOp::kLoad;
 
     // List of texture proxies whose contents are being prepared on a worker thread
+    // TODO: this list exists so we can fire off the proper upload when an opList begins
+    // executing. Can this be replaced?
     SkTArray<GrTextureProxy*, true> fDeferredProxies;
 
 private:
@@ -181,7 +180,7 @@
     virtual void onPrepare(GrOpFlushState* flushState) = 0;
     virtual bool onExecute(GrOpFlushState* flushState) = 0;
 
-    uint32_t               fUniqueID;
+    const uint32_t         fUniqueID;
     uint32_t               fFlags;
 
     // 'this' GrOpList relies on the output of the GrOpLists in 'fDependencies'
diff --git a/src/gpu/GrPipeline.cpp b/src/gpu/GrPipeline.cpp
index 9a338f3..becb9e4 100644
--- a/src/gpu/GrPipeline.cpp
+++ b/src/gpu/GrPipeline.cpp
@@ -52,25 +52,26 @@
                              processors.numCoverageFragmentProcessors() +
                              appliedClip.numClipCoverageFragmentProcessors();
     fFragmentProcessors.reset(numTotalProcessors);
+
     int currFPIdx = 0;
     for (int i = 0; i < processors.numColorFragmentProcessors(); ++i, ++currFPIdx) {
         fFragmentProcessors[currFPIdx] = processors.detachColorFragmentProcessor(i);
-        if (!fFragmentProcessors[currFPIdx]->instantiate(args.fResourceProvider)) {
-            this->markAsBad();
-        }
     }
     for (int i = 0; i < processors.numCoverageFragmentProcessors(); ++i, ++currFPIdx) {
         fFragmentProcessors[currFPIdx] = processors.detachCoverageFragmentProcessor(i);
-        if (!fFragmentProcessors[currFPIdx]->instantiate(args.fResourceProvider)) {
-            this->markAsBad();
-        }
     }
     for (int i = 0; i < appliedClip.numClipCoverageFragmentProcessors(); ++i, ++currFPIdx) {
         fFragmentProcessors[currFPIdx] = appliedClip.detachClipCoverageFragmentProcessor(i);
-        if (!fFragmentProcessors[currFPIdx]->instantiate(args.fResourceProvider)) {
+    }
+
+#ifdef SK_DEBUG
+    for (int i = 0; i < numTotalProcessors; ++i) {
+        if (!fFragmentProcessors[i]->isInstantiated()) {
             this->markAsBad();
+            break;
         }
     }
+#endif
 }
 
 void GrPipeline::addDependenciesTo(GrOpList* opList, const GrCaps& caps) const {
diff --git a/src/gpu/GrPipeline.h b/src/gpu/GrPipeline.h
index de68e7f..c9c9419 100644
--- a/src/gpu/GrPipeline.h
+++ b/src/gpu/GrPipeline.h
@@ -59,7 +59,6 @@
         InputFlags fInputFlags = InputFlags::kNone;
         const GrUserStencilSettings* fUserStencil = &GrUserStencilSettings::kUnused;
         const GrCaps* fCaps = nullptr;
-        GrResourceProvider* fResourceProvider = nullptr;
         GrXferProcessor::DstProxy fDstProxy;
         GrSwizzle fOutputSwizzle;
     };
@@ -184,7 +183,7 @@
     bool isStencilEnabled() const {
         return SkToBool(fFlags & Flags::kStencilEnabled);
     }
-    bool isBad() const { return SkToBool(fFlags & Flags::kIsBad); }
+    SkDEBUGCODE(bool isBad() const { return SkToBool(fFlags & Flags::kIsBad); })
 
     GrXferBarrierType xferBarrierType(GrTexture*, const GrCaps&) const;
 
@@ -194,7 +193,8 @@
     const GrSwizzle& outputSwizzle() const { return fOutputSwizzle; }
 
 private:
-    void markAsBad() { fFlags |= Flags::kIsBad; }
+
+    SkDEBUGCODE(void markAsBad() { fFlags |= Flags::kIsBad; })
 
     static constexpr uint8_t kLastInputFlag = (uint8_t)InputFlags::kSnapVerticesToPixelCenters;
 
@@ -203,7 +203,9 @@
         kHasStencilClip = (kLastInputFlag << 1),
         kStencilEnabled = (kLastInputFlag << 2),
         kScissorEnabled = (kLastInputFlag << 3),
+#ifdef SK_DEBUG
         kIsBad = (kLastInputFlag << 4),
+#endif
     };
 
     GR_DECL_BITFIELD_CLASS_OPS_FRIENDS(Flags);
diff --git a/src/gpu/GrResourceAllocator.cpp b/src/gpu/GrResourceAllocator.cpp
index 67c80c6..4c83509 100644
--- a/src/gpu/GrResourceAllocator.cpp
+++ b/src/gpu/GrResourceAllocator.cpp
@@ -154,6 +154,8 @@
                     GrSurfaceProxy::LazyInstantiationType::kDeinstantiate) {
                     fDeinstantiateTracker->addProxy(proxy);
                 }
+            } else {
+                fLazyInstantiationError = true;
             }
         }
     }
@@ -378,7 +380,8 @@
 
 bool GrResourceAllocator::assign(int* startIndex, int* stopIndex, AssignError* outError) {
     SkASSERT(outError);
-    *outError = AssignError::kNoError;
+    *outError = fLazyInstantiationError ? AssignError::kFailedProxyInstantiation
+                                        : AssignError::kNoError;
 
     SkASSERT(fNumOpLists == fEndOfOpListOpIndices.count());
 
diff --git a/src/gpu/GrResourceAllocator.h b/src/gpu/GrResourceAllocator.h
index a8c4f58..fae6657 100644
--- a/src/gpu/GrResourceAllocator.h
+++ b/src/gpu/GrResourceAllocator.h
@@ -39,6 +39,32 @@
  *
  * Note: the op indices (used in the usage intervals) come from the order of the ops in
  * their opLists after the opList DAG has been linearized.
+ *
+ *************************************************************************************************
+ * How does instantiation failure handling work when explicitly allocating?
+ *
+ * In the gather usage intervals pass all the GrSurfaceProxies used in the flush should be
+ * gathered (i.e., in GrOpList::gatherProxyIntervals).
+ *
+ * The allocator will churn through this list but could fail anywhere.
+ *
+ * Allocation failure handling occurs at two levels:
+ *
+ * 1) If the GrSurface backing an opList fails to allocate then the entire opList is dropped.
+ *
+ * 2) If an individual GrSurfaceProxy fails to allocate then any ops that use it are dropped
+ * (via GrOpList::purgeOpsWithUninstantiatedProxies)
+ *
+ * The pass to determine which ops to drop is a bit laborious so we only check the opLists and
+ * individual ops when something goes wrong in allocation (i.e., when the return code from
+ * GrResourceAllocator::assign is bad)
+ *
+ * All together this means we should never attempt to draw an op which is missing some
+ * required GrSurface.
+ *
+ * One wrinkle in this plan is that promise images are fulfilled during the gather interval pass.
+ * If any of the promise images fail at this stage then the allocator is set into an error
+ * state and all allocations are then scanned for failures during the main allocation pass.
  */
 class GrResourceAllocator {
 public:
@@ -252,6 +278,7 @@
     char                         fStorage[kInitialArenaSize];
     SkArenaAlloc                 fIntervalAllocator{fStorage, kInitialArenaSize, kInitialArenaSize};
     Interval*                    fFreeIntervalList = nullptr;
+    bool                         fLazyInstantiationError = false;
 };
 
 #endif // GrResourceAllocator_DEFINED
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.cpp b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
index 189081f..b8c5a01 100644
--- a/src/gpu/ccpr/GrCCDrawPathsOp.cpp
+++ b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
@@ -410,7 +410,6 @@
 
     GrPipeline::InitArgs initArgs;
     initArgs.fCaps = &flushState->caps();
-    initArgs.fResourceProvider = flushState->resourceProvider();
     initArgs.fDstProxy = flushState->drawOpArgs().fDstProxy;
     initArgs.fOutputSwizzle = flushState->drawOpArgs().fOutputSwizzle;
     auto clip = flushState->detachAppliedClip();
diff --git a/src/gpu/ops/GrAtlasTextOp.cpp b/src/gpu/ops/GrAtlasTextOp.cpp
index 646c9ea..998847d 100644
--- a/src/gpu/ops/GrAtlasTextOp.cpp
+++ b/src/gpu/ops/GrAtlasTextOp.cpp
@@ -328,7 +328,7 @@
 
     void* vertices = target->makeVertexSpace(vertexStride, glyphCount * kVerticesPerGlyph,
                                              &flushInfo.fVertexBuffer, &flushInfo.fVertexOffset);
-    flushInfo.fIndexBuffer = target->resourceProvider()->refQuadIndexBuffer();
+    flushInfo.fIndexBuffer = resourceProvider->refQuadIndexBuffer();
     if (!vertices || !flushInfo.fVertexBuffer) {
         SkDebugf("Could not allocate vertices\n");
         return;
diff --git a/src/gpu/ops/GrDrawPathOp.cpp b/src/gpu/ops/GrDrawPathOp.cpp
index 57f4546..613b8a1 100644
--- a/src/gpu/ops/GrDrawPathOp.cpp
+++ b/src/gpu/ops/GrDrawPathOp.cpp
@@ -49,7 +49,6 @@
     }
     args.fUserStencil = &kCoverPass;
     args.fCaps = &state.caps();
-    args.fResourceProvider = state.resourceProvider();
     args.fDstProxy = state.drawOpArgs().fDstProxy;
     args.fOutputSwizzle = state.drawOpArgs().fOutputSwizzle;
     return args;
diff --git a/src/gpu/ops/GrFillRRectOp.cpp b/src/gpu/ops/GrFillRRectOp.cpp
index 82e286b..d3f810e 100644
--- a/src/gpu/ops/GrFillRRectOp.cpp
+++ b/src/gpu/ops/GrFillRRectOp.cpp
@@ -744,7 +744,6 @@
         initArgs.fInputFlags = GrPipeline::InputFlags::kHWAntialias;
     }
     initArgs.fCaps = &flushState->caps();
-    initArgs.fResourceProvider = flushState->resourceProvider();
     initArgs.fDstProxy = flushState->drawOpArgs().fDstProxy;
     initArgs.fOutputSwizzle = flushState->drawOpArgs().fOutputSwizzle;
     auto clip = flushState->detachAppliedClip();