Extract a GrRenderTask base class from GrOpList

This is a more abstract representation of something that can
participate in the DAG by modifying a GrSurfaceProxy's pixels. This
will serve as the direct base class for future DAG participants that
aren't opLists, and help to eventually remove GrTextureOpList.

Bug: skia:
Change-Id: Ia903c0188e0f6a6602781b90ee8da87ba3cc13f8
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/231277
Reviewed-by: Brian Salomon <bsalomon@google.com>
Reviewed-by: Robert Phillips <robertphillips@google.com>
Commit-Queue: Chris Dalton <csmartdalton@google.com>
diff --git a/src/gpu/GrContextPriv.cpp b/src/gpu/GrContextPriv.cpp
index 59d726f..4778c98 100644
--- a/src/gpu/GrContextPriv.cpp
+++ b/src/gpu/GrContextPriv.cpp
@@ -213,13 +213,13 @@
     this->flushSurfaces(proxy ? &proxy : nullptr, proxy ? 1 : 0, {});
 }
 
-void GrContextPriv::moveOpListsToDDL(SkDeferredDisplayList* ddl) {
-    fContext->drawingManager()->moveOpListsToDDL(ddl);
+void GrContextPriv::moveRenderTasksToDDL(SkDeferredDisplayList* ddl) {
+    fContext->drawingManager()->moveRenderTasksToDDL(ddl);
 }
 
-void GrContextPriv::copyOpListsFromDDL(const SkDeferredDisplayList* ddl,
-                                       GrRenderTargetProxy* newDest) {
-    fContext->drawingManager()->copyOpListsFromDDL(ddl, newDest);
+void GrContextPriv::copyRenderTasksFromDDL(const SkDeferredDisplayList* ddl,
+                                           GrRenderTargetProxy* newDest) {
+    fContext->drawingManager()->copyRenderTasksFromDDL(ddl, newDest);
 }
 
 //////////////////////////////////////////////////////////////////////////////
diff --git a/src/gpu/GrContextPriv.h b/src/gpu/GrContextPriv.h
index 0e129b0..112edac 100644
--- a/src/gpu/GrContextPriv.h
+++ b/src/gpu/GrContextPriv.h
@@ -219,8 +219,8 @@
         return fContext->onGetAtlasManager();
     }
 
-    void moveOpListsToDDL(SkDeferredDisplayList*);
-    void copyOpListsFromDDL(const SkDeferredDisplayList*, GrRenderTargetProxy* newDest);
+    void moveRenderTasksToDDL(SkDeferredDisplayList*);
+    void copyRenderTasksFromDDL(const SkDeferredDisplayList*, GrRenderTargetProxy* newDest);
 
     GrContextOptions::PersistentCache* getPersistentCache() { return fContext->fPersistentCache; }
     GrContextOptions::ShaderErrorHandler* getShaderErrorHandler() const {
diff --git a/src/gpu/GrDrawingManager.cpp b/src/gpu/GrDrawingManager.cpp
index 4c83022..e52df18 100644
--- a/src/gpu/GrDrawingManager.cpp
+++ b/src/gpu/GrDrawingManager.cpp
@@ -17,10 +17,10 @@
 #include "src/gpu/GrGpu.h"
 #include "src/gpu/GrMemoryPool.h"
 #include "src/gpu/GrOnFlushResourceProvider.h"
-#include "src/gpu/GrOpList.h"
 #include "src/gpu/GrRecordingContextPriv.h"
 #include "src/gpu/GrRenderTargetContext.h"
 #include "src/gpu/GrRenderTargetProxy.h"
+#include "src/gpu/GrRenderTask.h"
 #include "src/gpu/GrResourceAllocator.h"
 #include "src/gpu/GrResourceProvider.h"
 #include "src/gpu/GrSoftwarePathRenderer.h"
@@ -35,44 +35,45 @@
 #include "src/gpu/text/GrTextContext.h"
 #include "src/image/SkSurface_Gpu.h"
 
-GrDrawingManager::OpListDAG::OpListDAG(bool sortOpLists) : fSortOpLists(sortOpLists) {}
+GrDrawingManager::RenderTaskDAG::RenderTaskDAG(bool sortRenderTasks)
+        : fSortRenderTasks(sortRenderTasks) {}
 
-GrDrawingManager::OpListDAG::~OpListDAG() {}
+GrDrawingManager::RenderTaskDAG::~RenderTaskDAG() {}
 
-void GrDrawingManager::OpListDAG::gatherIDs(SkSTArray<8, uint32_t, true>* idArray) const {
-    idArray->reset(fOpLists.count());
-    for (int i = 0; i < fOpLists.count(); ++i) {
-        if (fOpLists[i]) {
-            (*idArray)[i] = fOpLists[i]->uniqueID();
+void GrDrawingManager::RenderTaskDAG::gatherIDs(SkSTArray<8, uint32_t, true>* idArray) const {
+    idArray->reset(fRenderTasks.count());
+    for (int i = 0; i < fRenderTasks.count(); ++i) {
+        if (fRenderTasks[i]) {
+            (*idArray)[i] = fRenderTasks[i]->uniqueID();
         }
     }
 }
 
-void GrDrawingManager::OpListDAG::reset() {
-    fOpLists.reset();
+void GrDrawingManager::RenderTaskDAG::reset() {
+    fRenderTasks.reset();
 }
 
-void GrDrawingManager::OpListDAG::removeOpList(int index) {
-    if (!fOpLists[index]->unique()) {
+void GrDrawingManager::RenderTaskDAG::removeRenderTask(int index) {
+    if (!fRenderTasks[index]->unique()) {
         // TODO: Eventually this should be guaranteed unique: http://skbug.com/7111
-        fOpLists[index]->endFlush();
+        fRenderTasks[index]->endFlush();
     }
 
-    fOpLists[index] = nullptr;
+    fRenderTasks[index] = nullptr;
 }
 
-void GrDrawingManager::OpListDAG::removeOpLists(int startIndex, int stopIndex) {
+void GrDrawingManager::RenderTaskDAG::removeRenderTasks(int startIndex, int stopIndex) {
     for (int i = startIndex; i < stopIndex; ++i) {
-        if (!fOpLists[i]) {
+        if (!fRenderTasks[i]) {
             continue;
         }
-        this->removeOpList(i);
+        this->removeRenderTask(i);
     }
 }
 
-bool GrDrawingManager::OpListDAG::isUsed(GrSurfaceProxy* proxy) const {
-    for (int i = 0; i < fOpLists.count(); ++i) {
-        if (fOpLists[i] && fOpLists[i]->isUsed(proxy)) {
+bool GrDrawingManager::RenderTaskDAG::isUsed(GrSurfaceProxy* proxy) const {
+    for (int i = 0; i < fRenderTasks.count(); ++i) {
+        if (fRenderTasks[i] && fRenderTasks[i]->isUsed(proxy)) {
             return true;
         }
     }
@@ -80,32 +81,33 @@
     return false;
 }
 
-void GrDrawingManager::OpListDAG::add(sk_sp<GrOpList> opList) {
-    fOpLists.emplace_back(std::move(opList));
+void GrDrawingManager::RenderTaskDAG::add(sk_sp<GrRenderTask> renderTask) {
+    fRenderTasks.emplace_back(std::move(renderTask));
 }
 
-void GrDrawingManager::OpListDAG::add(const SkTArray<sk_sp<GrOpList>>& opLists) {
-    fOpLists.push_back_n(opLists.count(), opLists.begin());
+void GrDrawingManager::RenderTaskDAG::add(const SkTArray<sk_sp<GrRenderTask>>& opLists) {
+    fRenderTasks.push_back_n(opLists.count(), opLists.begin());
 }
 
-void GrDrawingManager::OpListDAG::swap(SkTArray<sk_sp<GrOpList>>* opLists) {
+void GrDrawingManager::RenderTaskDAG::swap(SkTArray<sk_sp<GrRenderTask>>* opLists) {
     SkASSERT(opLists->empty());
-    opLists->swap(fOpLists);
+    opLists->swap(fRenderTasks);
 }
 
-void GrDrawingManager::OpListDAG::prepForFlush() {
-    if (fSortOpLists) {
-        SkDEBUGCODE(bool result =) SkTTopoSort<GrOpList, GrOpList::TopoSortTraits>(&fOpLists);
+void GrDrawingManager::RenderTaskDAG::prepForFlush() {
+    if (fSortRenderTasks) {
+        SkDEBUGCODE(bool result =) SkTTopoSort<GrRenderTask, GrRenderTask::TopoSortTraits>(
+                &fRenderTasks);
         SkASSERT(result);
     }
 
 #ifdef SK_DEBUG
     // This block checks for any unnecessary splits in the opLists. If two sequential opLists
     // share the same backing GrSurfaceProxy it means the opList was artificially split.
-    if (fOpLists.count()) {
-        GrRenderTargetOpList* prevOpList = fOpLists[0]->asRenderTargetOpList();
-        for (int i = 1; i < fOpLists.count(); ++i) {
-            GrRenderTargetOpList* curOpList = fOpLists[i]->asRenderTargetOpList();
+    if (fRenderTasks.count()) {
+        GrRenderTargetOpList* prevOpList = fRenderTasks[0]->asRenderTargetOpList();
+        for (int i = 1; i < fRenderTasks.count(); ++i) {
+            GrRenderTargetOpList* curOpList = fRenderTasks[i]->asRenderTargetOpList();
 
             if (prevOpList && curOpList) {
                 SkASSERT(prevOpList->fTarget.get() != curOpList->fTarget.get());
@@ -117,46 +119,46 @@
 #endif
 }
 
-void GrDrawingManager::OpListDAG::closeAll(const GrCaps* caps) {
-    for (int i = 0; i < fOpLists.count(); ++i) {
-        if (fOpLists[i]) {
-            fOpLists[i]->makeClosed(*caps);
+void GrDrawingManager::RenderTaskDAG::closeAll(const GrCaps* caps) {
+    for (int i = 0; i < fRenderTasks.count(); ++i) {
+        if (fRenderTasks[i]) {
+            fRenderTasks[i]->makeClosed(*caps);
         }
     }
 }
 
-void GrDrawingManager::OpListDAG::cleanup(const GrCaps* caps) {
-    for (int i = 0; i < fOpLists.count(); ++i) {
-        if (!fOpLists[i]) {
+void GrDrawingManager::RenderTaskDAG::cleanup(const GrCaps* caps) {
+    for (int i = 0; i < fRenderTasks.count(); ++i) {
+        if (!fRenderTasks[i]) {
             continue;
         }
 
-        // no opList should receive a new command after this
-        fOpLists[i]->makeClosed(*caps);
+        // no renderTask should receive a dependency
+        fRenderTasks[i]->makeClosed(*caps);
 
         // We shouldn't need to do this, but it turns out some clients still hold onto opLists
         // after a cleanup.
         // MDB TODO: is this still true?
-        if (!fOpLists[i]->unique()) {
+        if (!fRenderTasks[i]->unique()) {
             // TODO: Eventually this should be guaranteed unique.
             // https://bugs.chromium.org/p/skia/issues/detail?id=7111
-            fOpLists[i]->endFlush();
+            fRenderTasks[i]->endFlush();
         }
     }
 
-    fOpLists.reset();
+    fRenderTasks.reset();
 }
 
 ///////////////////////////////////////////////////////////////////////////////////////////////////
 GrDrawingManager::GrDrawingManager(GrRecordingContext* context,
                                    const GrPathRendererChain::Options& optionsForPathRendererChain,
                                    const GrTextContext::Options& optionsForTextContext,
-                                   bool sortOpLists,
+                                   bool sortRenderTasks,
                                    bool reduceOpListSplitting)
         : fContext(context)
         , fOptionsForPathRendererChain(optionsForPathRendererChain)
         , fOptionsForTextContext(optionsForTextContext)
-        , fDAG(sortOpLists)
+        , fDAG(sortRenderTasks)
         , fTextContext(nullptr)
         , fPathRendererChain(nullptr)
         , fSoftwarePathRenderer(nullptr)
@@ -243,10 +245,10 @@
     auto resourceProvider = direct->priv().resourceProvider();
     auto resourceCache = direct->priv().getResourceCache();
 
-    // Semi-usually the GrOpLists are already closed at this point, but sometimes Ganesh
-    // needs to flush mid-draw. In that case, the SkGpuDevice's GrOpLists won't be closed
-    // but need to be flushed anyway. Closing such GrOpLists here will mean new
-    // GrOpLists will be created to replace them if the SkGpuDevice(s) write to them again.
+    // Semi-usually the GrRenderTasks are already closed at this point, but sometimes Ganesh needs
+    // to flush mid-draw. In that case, the SkGpuDevice's opLists won't be closed but need to be
+    // flushed anyway. Closing such opLists here will mean new ones will be created to replace them
+    // if the SkGpuDevice(s) write to them again.
     fDAG.closeAll(fContext->priv().caps());
     fActiveOpList = nullptr;
 
@@ -267,13 +269,12 @@
 
     // Prepare any onFlush op lists (e.g. atlases).
     if (!fOnFlushCBObjects.empty()) {
-        fDAG.gatherIDs(&fFlushingOpListIDs);
+        fDAG.gatherIDs(&fFlushingRenderTaskIDs);
 
         SkSTArray<4, sk_sp<GrRenderTargetContext>> renderTargetContexts;
         for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
-            onFlushCBObject->preFlush(&onFlushProvider,
-                                      fFlushingOpListIDs.begin(), fFlushingOpListIDs.count(),
-                                      &renderTargetContexts);
+            onFlushCBObject->preFlush(&onFlushProvider, fFlushingRenderTaskIDs.begin(),
+                                      fFlushingRenderTaskIDs.count(), &renderTargetContexts);
             for (const sk_sp<GrRenderTargetContext>& rtc : renderTargetContexts) {
                 sk_sp<GrRenderTargetOpList> onFlushOpList = sk_ref_sp(rtc->getRTOpList());
                 if (!onFlushOpList) {
@@ -297,8 +298,8 @@
 
 #if 0
     // Enable this to print out verbose GrOp information
-    for (int i = 0; i < fOpLists.count(); ++i) {
-        SkDEBUGCODE(fOpLists[i]->dump();)
+    for (int i = 0; i < fRenderTasks.count(); ++i) {
+        SkDEBUGCODE(fRenderTasks[i]->dump();)
     }
 #endif
 
@@ -307,42 +308,46 @@
 
     {
         GrResourceAllocator alloc(resourceProvider, flushState.deinstantiateProxyTracker()
-                                  SkDEBUGCODE(, fDAG.numOpLists()));
-        for (int i = 0; i < fDAG.numOpLists(); ++i) {
-            if (fDAG.opList(i)) {
-                fDAG.opList(i)->gatherProxyIntervals(&alloc);
+                                  SkDEBUGCODE(, fDAG.numRenderTasks()));
+        for (int i = 0; i < fDAG.numRenderTasks(); ++i) {
+            if (fDAG.renderTask(i)) {
+                fDAG.renderTask(i)->gatherProxyIntervals(&alloc);
             }
             alloc.markEndOfOpList(i);
         }
         alloc.determineRecyclability();
 
         GrResourceAllocator::AssignError error = GrResourceAllocator::AssignError::kNoError;
-        int numOpListsExecuted = 0;
+        int numRenderTasksExecuted = 0;
         while (alloc.assign(&startIndex, &stopIndex, &error)) {
             if (GrResourceAllocator::AssignError::kFailedProxyInstantiation == error) {
                 for (int i = startIndex; i < stopIndex; ++i) {
-                    if (fDAG.opList(i) && !fDAG.opList(i)->isInstantiated()) {
-                        // If the backing surface wasn't allocated, drop the entire opList.
-                        fDAG.removeOpList(i);
+                    GrRenderTask* renderTask = fDAG.renderTask(i);
+                    if (!renderTask) {
+                        continue;
                     }
-                    if (fDAG.opList(i)) {
-                        fDAG.opList(i)->purgeOpsWithUninstantiatedProxies();
+                    if (!renderTask->isInstantiated()) {
+                        // If the backing surface wasn't allocated, drop the entire renderTask.
+                        fDAG.removeRenderTask(i);
+                        continue;
                     }
+                    renderTask->handleInternalAllocationFailure();
                 }
             }
 
-            if (this->executeOpLists(startIndex, stopIndex, &flushState, &numOpListsExecuted)) {
+            if (this->executeRenderTasks(
+                    startIndex, stopIndex, &flushState, &numRenderTasksExecuted)) {
                 flushed = true;
             }
         }
     }
 
 #ifdef SK_DEBUG
-    for (int i = 0; i < fDAG.numOpLists(); ++i) {
+    for (int i = 0; i < fDAG.numRenderTasks(); ++i) {
         // If there are any remaining opLists at this point, make sure they will not survive the
         // flush. Otherwise we need to call endFlush() on them.
         // http://skbug.com/7111
-        SkASSERT(!fDAG.opList(i) || fDAG.opList(i)->unique());
+        SkASSERT(!fDAG.renderTask(i) || fDAG.renderTask(i)->unique());
     }
 #endif
     fDAG.reset();
@@ -368,45 +373,45 @@
         flushed = false;
     }
     for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
-        onFlushCBObject->postFlush(fTokenTracker.nextTokenToFlush(), fFlushingOpListIDs.begin(),
-                                   fFlushingOpListIDs.count());
+        onFlushCBObject->postFlush(fTokenTracker.nextTokenToFlush(), fFlushingRenderTaskIDs.begin(),
+                                   fFlushingRenderTaskIDs.count());
         flushed = true;
     }
     if (flushed) {
         resourceCache->purgeAsNeeded();
     }
-    fFlushingOpListIDs.reset();
+    fFlushingRenderTaskIDs.reset();
     fFlushing = false;
 
     return result;
 }
 
-bool GrDrawingManager::executeOpLists(int startIndex, int stopIndex, GrOpFlushState* flushState,
-                                      int* numOpListsExecuted) {
-    SkASSERT(startIndex <= stopIndex && stopIndex <= fDAG.numOpLists());
+bool GrDrawingManager::executeRenderTasks(int startIndex, int stopIndex, GrOpFlushState* flushState,
+                                          int* numRenderTasksExecuted) {
+    SkASSERT(startIndex <= stopIndex && stopIndex <= fDAG.numRenderTasks());
 
 #if GR_FLUSH_TIME_OP_SPEW
     SkDebugf("Flushing opLists: %d to %d out of [%d, %d]\n",
-                            startIndex, stopIndex, 0, fDAG.numOpLists());
+                            startIndex, stopIndex, 0, fDAG.numRenderTasks());
     for (int i = startIndex; i < stopIndex; ++i) {
-        if (fDAG.opList(i)) {
-            fDAG.opList(i)->dump(true);
+        if (fDAG.renderTask(i)) {
+            fDAG.renderTask(i)->dump(true);
         }
     }
 #endif
 
-    bool anyOpListsExecuted = false;
+    bool anyRenderTasksExecuted = false;
 
     for (int i = startIndex; i < stopIndex; ++i) {
-        if (!fDAG.opList(i)) {
+        if (!fDAG.renderTask(i)) {
              continue;
         }
 
-        GrOpList* opList = fDAG.opList(i);
-        SkASSERT(opList->isInstantiated());
-        SkASSERT(opList->deferredProxiesAreInstantiated());
+        GrRenderTask* renderTask = fDAG.renderTask(i);
+        SkASSERT(renderTask->isInstantiated());
+        SkASSERT(renderTask->deferredProxiesAreInstantiated());
 
-        opList->prepare(flushState);
+        renderTask->prepare(flushState);
     }
 
     // Upload all data to the GPU
@@ -417,7 +422,7 @@
     // devices to go OOM. In practice we usually only hit this case in our tests, but to be safe we
     // put a cap on the number of oplists we will execute before flushing to the GPU to relieve some
     // memory pressure.
-    static constexpr int kMaxOpListsBeforeFlush = 100;
+    static constexpr int kMaxRenderTasksBeforeFlush = 100;
 
     // Execute the onFlush op lists first, if any.
     for (sk_sp<GrOpList>& onFlushOpList : fOnFlushCBOpLists) {
@@ -426,43 +431,43 @@
         }
         SkASSERT(onFlushOpList->unique());
         onFlushOpList = nullptr;
-        (*numOpListsExecuted)++;
-        if (*numOpListsExecuted >= kMaxOpListsBeforeFlush) {
+        (*numRenderTasksExecuted)++;
+        if (*numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
             flushState->gpu()->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess,
                                            GrFlushInfo(), GrPrepareForExternalIORequests());
-            *numOpListsExecuted = 0;
+            *numRenderTasksExecuted = 0;
         }
     }
     fOnFlushCBOpLists.reset();
 
     // Execute the normal op lists.
     for (int i = startIndex; i < stopIndex; ++i) {
-        if (!fDAG.opList(i)) {
+        if (!fDAG.renderTask(i)) {
             continue;
         }
 
-        if (fDAG.opList(i)->execute(flushState)) {
-            anyOpListsExecuted = true;
+        if (fDAG.renderTask(i)->execute(flushState)) {
+            anyRenderTasksExecuted = true;
         }
-        (*numOpListsExecuted)++;
-        if (*numOpListsExecuted >= kMaxOpListsBeforeFlush) {
+        (*numRenderTasksExecuted)++;
+        if (*numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
             flushState->gpu()->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess,
                                            GrFlushInfo(), GrPrepareForExternalIORequests());
-            *numOpListsExecuted = 0;
+            *numRenderTasksExecuted = 0;
         }
     }
 
     SkASSERT(!flushState->commandBuffer());
     SkASSERT(fTokenTracker.nextDrawToken() == fTokenTracker.nextTokenToFlush());
 
-    // We reset the flush state before the OpLists so that the last resources to be freed are those
-    // that are written to in the OpLists. This helps to make sure the most recently used resources
-    // are the last to be purged by the resource cache.
+    // We reset the flush state before the RenderTasks so that the last resources to be freed are
+    // those that are written to in the RenderTasks. This helps to make sure the most recently used
+    // resources are the last to be purged by the resource cache.
     flushState->reset();
 
-    fDAG.removeOpLists(startIndex, stopIndex);
+    fDAG.removeRenderTasks(startIndex, stopIndex);
 
-    return anyOpListsExecuted;
+    return anyRenderTasksExecuted;
 }
 
 GrSemaphoresSubmitted GrDrawingManager::flushSurfaces(GrSurfaceProxy* proxies[], int numProxies,
@@ -526,14 +531,14 @@
 }
 #endif
 
-void GrDrawingManager::moveOpListsToDDL(SkDeferredDisplayList* ddl) {
+void GrDrawingManager::moveRenderTasksToDDL(SkDeferredDisplayList* ddl) {
     SkDEBUGCODE(this->validate());
 
-    // no opList should receive a new command after this
+    // no renderTask should receive a new command after this
     fDAG.closeAll(fContext->priv().caps());
     fActiveOpList = nullptr;
 
-    fDAG.swap(&ddl->fOpLists);
+    fDAG.swap(&ddl->fRenderTasks);
 
     if (fPathRendererChain) {
         if (auto ccpr = fPathRendererChain->getCoverageCountingPathRenderer()) {
@@ -544,7 +549,7 @@
     SkDEBUGCODE(this->validate());
 }
 
-void GrDrawingManager::copyOpListsFromDDL(const SkDeferredDisplayList* ddl,
+void GrDrawingManager::copyRenderTasksFromDDL(const SkDeferredDisplayList* ddl,
                                           GrRenderTargetProxy* newDest) {
     SkDEBUGCODE(this->validate());
 
@@ -569,14 +574,14 @@
         ccpr->mergePendingPaths(ddl->fPendingPaths);
     }
 
-    fDAG.add(ddl->fOpLists);
+    fDAG.add(ddl->fRenderTasks);
 
     SkDEBUGCODE(this->validate());
 }
 
 #ifdef SK_DEBUG
 void GrDrawingManager::validate() const {
-    if (fDAG.sortingOpLists() && fReduceOpListSplitting) {
+    if (fDAG.sortingRenderTasks() && fReduceOpListSplitting) {
         SkASSERT(!fActiveOpList);
     } else {
         if (fActiveOpList) {
@@ -585,9 +590,9 @@
             SkASSERT(fActiveOpList == fDAG.back());
         }
 
-        for (int i = 0; i < fDAG.numOpLists(); ++i) {
-            if (fActiveOpList != fDAG.opList(i)) {
-                SkASSERT(fDAG.opList(i)->isClosed());
+        for (int i = 0; i < fDAG.numRenderTasks(); ++i) {
+            if (fActiveOpList != fDAG.renderTask(i)) {
+                SkASSERT(fDAG.renderTask(i)->isClosed());
             }
         }
 
@@ -603,14 +608,14 @@
     SkDEBUGCODE(this->validate());
     SkASSERT(fContext);
 
-    if (fDAG.sortingOpLists() && fReduceOpListSplitting) {
+    if (fDAG.sortingRenderTasks() && fReduceOpListSplitting) {
         // In this case we need to close all the opLists that rely on the current contents of
         // 'rtp'. That is bc we're going to update the content of the proxy so they need to be
         // split in case they use both the old and new content. (This is a bit of an overkill:
         // they really only need to be split if they ever reference proxy's contents again but
         // that is hard to predict/handle).
-        if (GrOpList* lastOpList = rtp->getLastOpList()) {
-            lastOpList->closeThoseWhoDependOnMe(*fContext->priv().caps());
+        if (GrRenderTask* lastRenderTask = rtp->getLastRenderTask()) {
+            lastRenderTask->closeThoseWhoDependOnMe(*fContext->priv().caps());
         }
     } else if (fActiveOpList) {
         // This is  a temporary fix for the partial-MDB world. In that world we're not
@@ -625,12 +630,12 @@
                                                         fContext->priv().refOpMemoryPool(),
                                                         rtp,
                                                         fContext->priv().auditTrail()));
-    SkASSERT(rtp->getLastOpList() == opList.get());
+    SkASSERT(rtp->getLastRenderTask() == opList.get());
 
     if (managedOpList) {
         fDAG.add(opList);
 
-        if (!fDAG.sortingOpLists() || !fReduceOpListSplitting) {
+        if (!fDAG.sortingRenderTasks() || !fReduceOpListSplitting) {
             fActiveOpList = opList.get();
         }
     }
@@ -643,14 +648,14 @@
     SkDEBUGCODE(this->validate());
     SkASSERT(fContext);
 
-    if (fDAG.sortingOpLists() && fReduceOpListSplitting) {
+    if (fDAG.sortingRenderTasks() && fReduceOpListSplitting) {
         // In this case we need to close all the opLists that rely on the current contents of
         // 'texture'. That is bc we're going to update the content of the proxy so they need to
         // be split in case they use both the old and new content. (This is a bit of an
         // overkill: they really only need to be split if they ever reference proxy's contents
         // again but that is hard to predict/handle).
-        if (GrOpList* lastOpList = textureProxy->getLastOpList()) {
-            lastOpList->closeThoseWhoDependOnMe(*fContext->priv().caps());
+        if (GrRenderTask* lastRenderTask = textureProxy->getLastRenderTask()) {
+            lastRenderTask->closeThoseWhoDependOnMe(*fContext->priv().caps());
         }
     } else if (fActiveOpList) {
         // This is  a temporary fix for the partial-MDB world. In that world we're not
@@ -665,10 +670,10 @@
                                                       textureProxy,
                                                       fContext->priv().auditTrail()));
 
-    SkASSERT(textureProxy->getLastOpList() == opList.get());
+    SkASSERT(textureProxy->getLastRenderTask() == opList.get());
 
     fDAG.add(opList);
-    if (!fDAG.sortingOpLists() || !fReduceOpListSplitting) {
+    if (!fDAG.sortingRenderTasks() || !fReduceOpListSplitting) {
         fActiveOpList = opList.get();
     }
 
diff --git a/src/gpu/GrDrawingManager.h b/src/gpu/GrDrawingManager.h
index 7a87ffc..6e3b22a 100644
--- a/src/gpu/GrDrawingManager.h
+++ b/src/gpu/GrDrawingManager.h
@@ -21,6 +21,7 @@
 class GrCoverageCountingPathRenderer;
 class GrOnFlushCallbackObject;
 class GrOpFlushState;
+class GrOpList;
 class GrRecordingContext;
 class GrRenderTargetContext;
 class GrRenderTargetProxy;
@@ -30,11 +31,6 @@
 class GrTextureOpList;
 class SkDeferredDisplayList;
 
-// The GrDrawingManager allocates a new GrRenderTargetContext for each GrRenderTarget
-// but all of them still land in the same GrOpList!
-//
-// In the future this class will allocate a new GrRenderTargetContext for
-// each GrRenderTarget/GrOpList and manage the DAG.
 class GrDrawingManager {
 public:
     ~GrDrawingManager();
@@ -91,15 +87,16 @@
     void testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject*);
 #endif
 
-    void moveOpListsToDDL(SkDeferredDisplayList* ddl);
-    void copyOpListsFromDDL(const SkDeferredDisplayList*, GrRenderTargetProxy* newDest);
+    void moveRenderTasksToDDL(SkDeferredDisplayList* ddl);
+    void copyRenderTasksFromDDL(const SkDeferredDisplayList*, GrRenderTargetProxy* newDest);
 
 private:
-    // This class encapsulates maintenance and manipulation of the drawing manager's DAG of opLists.
-    class OpListDAG {
+    // This class encapsulates maintenance and manipulation of the drawing manager's DAG of
+    // renderTasks.
+    class RenderTaskDAG {
     public:
-        OpListDAG(bool sortOpLists);
-        ~OpListDAG();
+        RenderTaskDAG(bool sortRenderTasks);
+        ~RenderTaskDAG();
 
         // Currently, when explicitly allocating resources, this call will topologically sort the
         // opLists.
@@ -119,35 +116,35 @@
         // remove the opList but don't cleanup any refering pointers (i.e., dependency pointers
         // in the DAG). They work right now bc they are only called at flush time, after the
         // topological sort is complete (so the dangling pointers aren't used).
-        void removeOpList(int index);
-        void removeOpLists(int startIndex, int stopIndex);
+        void removeRenderTask(int index);
+        void removeRenderTasks(int startIndex, int stopIndex);
 
-        bool empty() const { return fOpLists.empty(); }
-        int numOpLists() const { return fOpLists.count(); }
+        bool empty() const { return fRenderTasks.empty(); }
+        int numRenderTasks() const { return fRenderTasks.count(); }
 
         bool isUsed(GrSurfaceProxy*) const;
 
-        GrOpList* opList(int index) { return fOpLists[index].get(); }
-        const GrOpList* opList(int index) const { return fOpLists[index].get(); }
+        GrRenderTask* renderTask(int index) { return fRenderTasks[index].get(); }
+        const GrRenderTask* renderTask(int index) const { return fRenderTasks[index].get(); }
 
-        GrOpList* back() { return fOpLists.back().get(); }
-        const GrOpList* back() const { return fOpLists.back().get(); }
+        GrRenderTask* back() { return fRenderTasks.back().get(); }
+        const GrRenderTask* back() const { return fRenderTasks.back().get(); }
 
-        void add(sk_sp<GrOpList>);
-        void add(const SkTArray<sk_sp<GrOpList>>&);
+        void add(sk_sp<GrRenderTask>);
+        void add(const SkTArray<sk_sp<GrRenderTask>>&);
 
-        void swap(SkTArray<sk_sp<GrOpList>>* opLists);
+        void swap(SkTArray<sk_sp<GrRenderTask>>* renderTasks);
 
-        bool sortingOpLists() const { return fSortOpLists; }
+        bool sortingRenderTasks() const { return fSortRenderTasks; }
 
     private:
-        SkTArray<sk_sp<GrOpList>> fOpLists;
-        bool                      fSortOpLists;
+        SkTArray<sk_sp<GrRenderTask>> fRenderTasks;
+        bool                          fSortRenderTasks;
     };
 
     GrDrawingManager(GrRecordingContext*, const GrPathRendererChain::Options&,
                      const GrTextContext::Options&,
-                     bool sortOpLists,
+                     bool sortRenderTasks,
                      bool reduceOpListSplitting);
 
     bool wasAbandoned() const;
@@ -155,7 +152,8 @@
     void cleanup();
 
     // return true if any opLists were actually executed; false otherwise
-    bool executeOpLists(int startIndex, int stopIndex, GrOpFlushState*, int* numOpListsExecuted);
+    bool executeRenderTasks(int startIndex, int stopIndex, GrOpFlushState*,
+                            int* numRenderTasksExecuted);
 
     GrSemaphoresSubmitted flush(GrSurfaceProxy* proxies[],
                                 int numProxies,
@@ -181,10 +179,10 @@
     // flushes.
     sk_sp<GrBufferAllocPool::CpuBufferCache> fCpuBufferCache;
 
-    OpListDAG                         fDAG;
+    RenderTaskDAG                     fDAG;
     GrOpList*                         fActiveOpList = nullptr;
     // These are the IDs of the opLists currently being flushed (in internalFlush)
-    SkSTArray<8, uint32_t, true>      fFlushingOpListIDs;
+    SkSTArray<8, uint32_t, true>      fFlushingRenderTaskIDs;
     // These are the new opLists generated by the onFlush CBs
     SkSTArray<8, sk_sp<GrOpList>>     fOnFlushCBOpLists;
 
diff --git a/src/gpu/GrOpList.cpp b/src/gpu/GrOpList.cpp
index ae16aa6..e80442e 100644
--- a/src/gpu/GrOpList.cpp
+++ b/src/gpu/GrOpList.cpp
@@ -5,48 +5,24 @@
  * found in the LICENSE file.
  */
 
-#include "src/gpu/GrOpList.h"
-
-#include "include/gpu/GrContext.h"
-#include "src/gpu/GrDeferredProxyUploader.h"
 #include "src/gpu/GrMemoryPool.h"
-#include "src/gpu/GrRenderTargetPriv.h"
-#include "src/gpu/GrStencilAttachment.h"
-#include "src/gpu/GrSurfaceProxy.h"
-#include "src/gpu/GrTextureProxyPriv.h"
-#include <atomic>
-
-uint32_t GrOpList::CreateUniqueID() {
-    static std::atomic<uint32_t> nextID{1};
-    uint32_t id;
-    do {
-        id = nextID++;
-    } while (id == SK_InvalidUniqueID);
-    return id;
-}
+#include "src/gpu/GrOpList.h"
 
 GrOpList::GrOpList(sk_sp<GrOpMemoryPool> opMemoryPool,
                    sk_sp<GrSurfaceProxy> surfaceProxy,
                    GrAuditTrail* auditTrail)
-        : fOpMemoryPool(std::move(opMemoryPool))
-        , fAuditTrail(auditTrail)
-        , fUniqueID(CreateUniqueID())
-        , fFlags(0) {
+        : GrRenderTask(std::move(surfaceProxy))
+        , fOpMemoryPool(std::move(opMemoryPool))
+        , fAuditTrail(auditTrail) {
     SkASSERT(fOpMemoryPool);
-    fTarget = std::move(surfaceProxy);
-    fTarget->setLastOpList(this);
 }
 
 GrOpList::~GrOpList() {
-    if (fTarget && this == fTarget->getLastOpList()) {
-        // Ensure the target proxy doesn't keep hold of a dangling back pointer.
-        fTarget->setLastOpList(nullptr);
-    }
 }
 
 void GrOpList::endFlush() {
-    if (fTarget && this == fTarget->getLastOpList()) {
-        fTarget->setLastOpList(nullptr);
+    if (fTarget && this == fTarget->getLastRenderTask()) {
+        fTarget->setLastRenderTask(nullptr);
     }
 
     fTarget.reset();
@@ -55,165 +31,15 @@
 }
 
 #ifdef SK_DEBUG
-bool GrOpList::deferredProxiesAreInstantiated() const {
-    for (int i = 0; i < fDeferredProxies.count(); ++i) {
-        if (!fDeferredProxies[i]->isInstantiated()) {
-            return false;
-        }
-    }
-
-    return true;
-}
-#endif
-
-void GrOpList::prepare(GrOpFlushState* flushState) {
-    for (int i = 0; i < fDeferredProxies.count(); ++i) {
-        fDeferredProxies[i]->texPriv().scheduleUpload(flushState);
-    }
-
-    this->onPrepare(flushState);
-}
-
-// Add a GrOpList-based dependency
-void GrOpList::addDependency(GrOpList* dependedOn) {
-    SkASSERT(!dependedOn->dependsOn(this));  // loops are bad
-
-    if (this->dependsOn(dependedOn)) {
-        return;  // don't add duplicate dependencies
-    }
-
-    fDependencies.push_back(dependedOn);
-    dependedOn->addDependent(this);
-
-    SkDEBUGCODE(this->validate());
-}
-
-// Convert from a GrSurface-based dependency to a GrOpList one
-void GrOpList::addDependency(GrSurfaceProxy* dependedOn, const GrCaps& caps) {
-    if (dependedOn->getLastOpList()) {
-        // If it is still receiving dependencies, this GrOpList shouldn't be closed
-        SkASSERT(!this->isClosed());
-
-        GrOpList* opList = dependedOn->getLastOpList();
-        if (opList == this) {
-            // self-read - presumably for dst reads. We can't make it closed in the self-read case.
-        } else {
-            this->addDependency(opList);
-
-            // We are closing 'opList' here bc the current contents of it are what 'this' opList
-            // depends on. We need a break in 'opList' so that the usage of that state has a
-            // chance to execute.
-            opList->makeClosed(caps);
-        }
-    }
-
-    if (GrTextureProxy* textureProxy = dependedOn->asTextureProxy()) {
-        if (textureProxy->texPriv().isDeferred()) {
-            fDeferredProxies.push_back(textureProxy);
-        }
-    }
-}
-
-bool GrOpList::dependsOn(const GrOpList* dependedOn) const {
-    for (int i = 0; i < fDependencies.count(); ++i) {
-        if (fDependencies[i] == dependedOn) {
-            return true;
-        }
-    }
-
-    return false;
-}
-
-
-void GrOpList::addDependent(GrOpList* dependent) {
-    fDependents.push_back(dependent);
-}
-
-#ifdef SK_DEBUG
-bool GrOpList::isDependedent(const GrOpList* dependent) const {
-    for (int i = 0; i < fDependents.count(); ++i) {
-        if (fDependents[i] == dependent) {
-            return true;
-        }
-    }
-
-    return false;
-}
-
-void GrOpList::validate() const {
-    // TODO: check for loops and duplicates
-
-    for (int i = 0; i < fDependencies.count(); ++i) {
-        SkASSERT(fDependencies[i]->isDependedent(this));
-    }
-}
-#endif
-
-void GrOpList::closeThoseWhoDependOnMe(const GrCaps& caps) {
-    for (int i = 0; i < fDependents.count(); ++i) {
-        if (!fDependents[i]->isClosed()) {
-            fDependents[i]->makeClosed(caps);
-        }
-    }
-}
-
-bool GrOpList::isInstantiated() const {
-    if (!fTarget->isInstantiated()) {
-        return false;
-    }
-
-    int minStencilSampleCount = (fTarget->asRenderTargetProxy())
-            ? fTarget->asRenderTargetProxy()->numStencilSamples()
-            : 0;
-
-    if (minStencilSampleCount) {
-        GrRenderTarget* rt = fTarget->peekRenderTarget();
-        SkASSERT(rt);
-
-        GrStencilAttachment* stencil = rt->renderTargetPriv().getStencilAttachment();
-        if (!stencil) {
-            return false;
-        }
-        SkASSERT(stencil->numSamples() >= minStencilSampleCount);
-    }
-
-    GrSurface* surface = fTarget->peekSurface();
-    if (surface->wasDestroyed()) {
-        return false;
-    }
-
-    return true;
-}
-
-#ifdef SK_DEBUG
 static const char* op_to_name(GrLoadOp op) {
     return GrLoadOp::kLoad == op ? "load" : GrLoadOp::kClear == op ? "clear" : "discard";
 }
 
 void GrOpList::dump(bool printDependencies) const {
-    SkDebugf("--------------------------------------------------------------\n");
-    SkDebugf("opListID: %d - proxyID: %d - surfaceID: %d\n", fUniqueID,
-             fTarget ? fTarget->uniqueID().asUInt() : -1,
-             fTarget && fTarget->peekSurface()
-                     ? fTarget->peekSurface()->uniqueID().asUInt()
-                     : -1);
+    GrRenderTask::dump(printDependencies);
     SkDebugf("ColorLoadOp: %s %x StencilLoadOp: %s\n",
              op_to_name(fColorLoadOp),
              GrLoadOp::kClear == fColorLoadOp ? fLoadClearColor.toBytes_RGBA() : 0x0,
              op_to_name(fStencilLoadOp));
-
-    if (printDependencies) {
-        SkDebugf("I rely On (%d): ", fDependencies.count());
-        for (int i = 0; i < fDependencies.count(); ++i) {
-            SkDebugf("%d, ", fDependencies[i]->fUniqueID);
-        }
-        SkDebugf("\n");
-
-        SkDebugf("(%d) Rely On Me: ", fDependents.count());
-        for (int i = 0; i < fDependents.count(); ++i) {
-            SkDebugf("%d, ", fDependents[i]->fUniqueID);
-        }
-        SkDebugf("\n");
-    }
 }
 #endif
diff --git a/src/gpu/GrOpList.h b/src/gpu/GrOpList.h
index a16303b..101d481 100644
--- a/src/gpu/GrOpList.h
+++ b/src/gpu/GrOpList.h
@@ -11,32 +11,18 @@
 #include "include/core/SkRefCnt.h"
 #include "include/private/SkColorData.h"
 #include "include/private/SkTDArray.h"
+#include "src/gpu/GrRenderTask.h"
 #include "src/gpu/GrTextureProxy.h"
 
 class GrAuditTrail;
-class GrCaps;
-class GrOpFlushState;
 class GrOpMemoryPool;
-class GrRecordingContext;
-class GrRenderTargetOpList;
-class GrResourceAllocator;
-class GrResourceProvider;
-class GrSurfaceProxy;
-class GrTextureOpList;
 class GrGpuBuffer;
 
-struct SkIPoint;
-struct SkIRect;
-
-class GrOpList : public SkRefCnt {
+class GrOpList : public GrRenderTask {
 public:
     GrOpList(sk_sp<GrOpMemoryPool>, sk_sp<GrSurfaceProxy>, GrAuditTrail*);
     ~GrOpList() override;
 
-    // These two methods are only invoked at flush time
-    void prepare(GrOpFlushState* flushState);
-    bool execute(GrOpFlushState* flushState) { return this->onExecute(flushState); }
-
     virtual bool copySurface(GrRecordingContext*,
                              GrSurfaceProxy* dst,
                              GrSurfaceProxy* src,
@@ -50,153 +36,23 @@
                               sk_sp<GrGpuBuffer> dst,
                               size_t dstOffset) = 0;
 
-    virtual void makeClosed(const GrCaps&) {
-        if (!this->isClosed()) {
-            this->setFlag(kClosed_Flag);
-        }
-    }
-
-    // Called when this class will survive a flush and needs to truncate its ops and start over.
-    // TODO: ultimately it should be invalid for an op list to survive a flush.
-    // https://bugs.chromium.org/p/skia/issues/detail?id=7111
-    virtual void endFlush();
-
-    bool isClosed() const { return this->isSetFlag(kClosed_Flag); }
-
-    /*
-     * Notify this GrOpList that it relies on the contents of 'dependedOn'
-     */
-    void addDependency(GrSurfaceProxy* dependedOn, const GrCaps& caps);
-
-    /*
-     * Does this opList depend on 'dependedOn'?
-     */
-    bool dependsOn(const GrOpList* dependedOn) const;
-
-    /*
-     * Safely cast this GrOpList to a GrTextureOpList (if possible).
-     */
-    virtual GrTextureOpList* asTextureOpList() { return nullptr; }
-
-    /*
-     * Safely cast this GrOpList to a GrRenderTargetOpList (if possible).
-     */
-    virtual GrRenderTargetOpList* asRenderTargetOpList() { return nullptr; }
-
-    uint32_t uniqueID() const { return fUniqueID; }
+    void endFlush() override;
 
     /*
      * Dump out the GrOpList dependency DAG
      */
-    SkDEBUGCODE(virtual void dump(bool printDependencies) const;)
-
-    SkDEBUGCODE(virtual int numClips() const { return 0; })
+    SkDEBUGCODE(void dump(bool printDependencies) const override;)
 
 protected:
-    // In addition to just the GrSurface being allocated, has the stencil buffer been allocated (if
-    // it is required)?
-    bool isInstantiated() const;
-
-    SkDEBUGCODE(bool deferredProxiesAreInstantiated() const;)
-
     // This is a backpointer to the GrOpMemoryPool that holds the memory for this opLists' ops.
     // In the DDL case, these back pointers keep the DDL's GrOpMemoryPool alive as long as its
     // constituent opLists survive.
     sk_sp<GrOpMemoryPool> fOpMemoryPool;
-    sk_sp<GrSurfaceProxy> fTarget;
     GrAuditTrail*         fAuditTrail;
 
     GrLoadOp              fColorLoadOp    = GrLoadOp::kLoad;
     SkPMColor4f           fLoadClearColor = SK_PMColor4fTRANSPARENT;
     GrLoadOp              fStencilLoadOp  = GrLoadOp::kLoad;
-
-    // List of texture proxies whose contents are being prepared on a worker thread
-    // TODO: this list exists so we can fire off the proper upload when an opList begins
-    // executing. Can this be replaced?
-    SkTArray<GrTextureProxy*, true> fDeferredProxies;
-
-private:
-    friend class GrDrawingManager; // for resetFlag, TopoSortTraits & gatherProxyIntervals
-
-    virtual bool onIsUsed(GrSurfaceProxy*) const = 0;
-
-    bool isUsed(GrSurfaceProxy* proxy) const {
-        if (proxy == fTarget.get()) {
-            return true;
-        }
-
-        return this->onIsUsed(proxy);
-    }
-
-    void addDependency(GrOpList* dependedOn);
-    void addDependent(GrOpList* dependent);
-    SkDEBUGCODE(bool isDependedent(const GrOpList* dependent) const;)
-    SkDEBUGCODE(void validate() const;)
-    void closeThoseWhoDependOnMe(const GrCaps&);
-
-    // Remove all Ops which reference proxies that are not instantiated.
-    virtual void purgeOpsWithUninstantiatedProxies() = 0;
-
-    // Feed proxy usage intervals to the GrResourceAllocator class
-    virtual void gatherProxyIntervals(GrResourceAllocator*) const = 0;
-
-    static uint32_t CreateUniqueID();
-
-    enum Flags {
-        kClosed_Flag    = 0x01,   //!< This GrOpList can't accept any more ops
-
-        kWasOutput_Flag = 0x02,   //!< Flag for topological sorting
-        kTempMark_Flag  = 0x04,   //!< Flag for topological sorting
-    };
-
-    void setFlag(uint32_t flag) {
-        fFlags |= flag;
-    }
-
-    void resetFlag(uint32_t flag) {
-        fFlags &= ~flag;
-    }
-
-    bool isSetFlag(uint32_t flag) const {
-        return SkToBool(fFlags & flag);
-    }
-
-    struct TopoSortTraits {
-        static void Output(GrOpList* opList, int /* index */) {
-            opList->setFlag(GrOpList::kWasOutput_Flag);
-        }
-        static bool WasOutput(const GrOpList* opList) {
-            return opList->isSetFlag(GrOpList::kWasOutput_Flag);
-        }
-        static void SetTempMark(GrOpList* opList) {
-            opList->setFlag(GrOpList::kTempMark_Flag);
-        }
-        static void ResetTempMark(GrOpList* opList) {
-            opList->resetFlag(GrOpList::kTempMark_Flag);
-        }
-        static bool IsTempMarked(const GrOpList* opList) {
-            return opList->isSetFlag(GrOpList::kTempMark_Flag);
-        }
-        static int NumDependencies(const GrOpList* opList) {
-            return opList->fDependencies.count();
-        }
-        static GrOpList* Dependency(GrOpList* opList, int index) {
-            return opList->fDependencies[index];
-        }
-    };
-
-    virtual void onPrepare(GrOpFlushState* flushState) = 0;
-    virtual bool onExecute(GrOpFlushState* flushState) = 0;
-
-    const uint32_t         fUniqueID;
-    uint32_t               fFlags;
-
-    // 'this' GrOpList relies on the output of the GrOpLists in 'fDependencies'
-    SkSTArray<1, GrOpList*, true> fDependencies;
-    // 'this' GrOpList's output is relied on by the GrOpLists in 'fDependents'
-    SkSTArray<1, GrOpList*, true> fDependents;
-
-    typedef SkRefCnt INHERITED;
 };
 
 #endif
diff --git a/src/gpu/GrPipeline.cpp b/src/gpu/GrPipeline.cpp
index 98f1487..af2a5e6 100644
--- a/src/gpu/GrPipeline.cpp
+++ b/src/gpu/GrPipeline.cpp
@@ -74,20 +74,6 @@
 #endif
 }
 
-void GrPipeline::addDependenciesTo(GrOpList* opList, const GrCaps& caps) const {
-    for (int i = 0; i < fFragmentProcessors.count(); ++i) {
-        GrFragmentProcessor::TextureAccessIter iter(fFragmentProcessors[i].get());
-        while (const GrFragmentProcessor::TextureSampler* sampler = iter.next()) {
-            opList->addDependency(sampler->proxy(), caps);
-        }
-    }
-
-    if (fDstTextureProxy) {
-        opList->addDependency(fDstTextureProxy.get(), caps);
-    }
-
-}
-
 GrXferBarrierType GrPipeline::xferBarrierType(GrTexture* texture, const GrCaps& caps) const {
     if (fDstTextureProxy.get() && fDstTextureProxy.get()->peekTexture() == texture) {
         return kTexture_GrXferBarrierType;
diff --git a/src/gpu/GrPipeline.h b/src/gpu/GrPipeline.h
index 6eeff47..2978c69 100644
--- a/src/gpu/GrPipeline.h
+++ b/src/gpu/GrPipeline.h
@@ -117,9 +117,6 @@
     ///////////////////////////////////////////////////////////////////////////
     /// @name GrFragmentProcessors
 
-    // Make the renderTargetContext's GrOpList be dependent on any GrOpLists in this pipeline
-    void addDependenciesTo(GrOpList* recipient, const GrCaps&) const;
-
     int numColorFragmentProcessors() const { return fNumColorProcessors; }
     int numCoverageFragmentProcessors() const {
         return fFragmentProcessors.count() - fNumColorProcessors;
diff --git a/src/gpu/GrRenderTargetContext.cpp b/src/gpu/GrRenderTargetContext.cpp
index 87c2bda..b77d81b 100644
--- a/src/gpu/GrRenderTargetContext.cpp
+++ b/src/gpu/GrRenderTargetContext.cpp
@@ -162,7 +162,7 @@
     fRenderTargetProxy->validate(fContext);
 
     if (fOpList && !fOpList->isClosed()) {
-        SkASSERT(fRenderTargetProxy->getLastOpList() == fOpList.get());
+        SkASSERT(fRenderTargetProxy->getLastRenderTask() == fOpList.get());
     }
 }
 #endif
diff --git a/src/gpu/GrRenderTargetOpList.cpp b/src/gpu/GrRenderTargetOpList.cpp
index cd94e9c..7a220d8 100644
--- a/src/gpu/GrRenderTargetOpList.cpp
+++ b/src/gpu/GrRenderTargetOpList.cpp
@@ -610,7 +610,7 @@
     this->addOp(std::move(op), *context->priv().caps());
 }
 
-void GrRenderTargetOpList::purgeOpsWithUninstantiatedProxies() {
+void GrRenderTargetOpList::handleInternalAllocationFailure() {
     bool hasUninstantiatedProxy = false;
     auto checkInstantiation = [&hasUninstantiatedProxy](GrSurfaceProxy* p, GrMipMapped) {
         if (!p->isInstantiated()) {
diff --git a/src/gpu/GrRenderTargetOpList.h b/src/gpu/GrRenderTargetOpList.h
index 6106c76..369d9cd 100644
--- a/src/gpu/GrRenderTargetOpList.h
+++ b/src/gpu/GrRenderTargetOpList.h
@@ -227,7 +227,7 @@
         SkRect fBounds;
     };
 
-    void purgeOpsWithUninstantiatedProxies() override;
+    void handleInternalAllocationFailure() override;
 
     void gatherProxyIntervals(GrResourceAllocator*) const override;
 
diff --git a/src/gpu/GrRenderTask.cpp b/src/gpu/GrRenderTask.cpp
new file mode 100644
index 0000000..677d3b8
--- /dev/null
+++ b/src/gpu/GrRenderTask.cpp
@@ -0,0 +1,191 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrRenderTask.h"
+
+#include "src/gpu/GrRenderTargetPriv.h"
+#include "src/gpu/GrStencilAttachment.h"
+#include "src/gpu/GrTextureProxyPriv.h"
+
+uint32_t GrRenderTask::CreateUniqueID() {
+    static std::atomic<uint32_t> nextID{1};
+    uint32_t id;
+    do {
+        id = nextID++;
+    } while (id == SK_InvalidUniqueID);
+    return id;
+}
+
+GrRenderTask::GrRenderTask(sk_sp<GrSurfaceProxy> target)
+        : fTarget(std::move(target))
+        , fUniqueID(CreateUniqueID())
+        , fFlags(0) {
+    fTarget->setLastRenderTask(this);
+}
+
+GrRenderTask::~GrRenderTask() {
+    if (fTarget && this == fTarget->getLastRenderTask()) {
+        // Ensure the target proxy doesn't keep hold of a dangling back pointer.
+        fTarget->setLastRenderTask(nullptr);
+    }
+}
+
+#ifdef SK_DEBUG
+bool GrRenderTask::deferredProxiesAreInstantiated() const {
+    for (int i = 0; i < fDeferredProxies.count(); ++i) {
+        if (!fDeferredProxies[i]->isInstantiated()) {
+            return false;
+        }
+    }
+
+    return true;
+}
+#endif
+
+void GrRenderTask::prepare(GrOpFlushState* flushState) {
+    for (int i = 0; i < fDeferredProxies.count(); ++i) {
+        fDeferredProxies[i]->texPriv().scheduleUpload(flushState);
+    }
+
+    this->onPrepare(flushState);
+}
+
+// Add a GrRenderTask-based dependency
+void GrRenderTask::addDependency(GrRenderTask* dependedOn) {
+    SkASSERT(!dependedOn->dependsOn(this));  // loops are bad
+
+    if (this->dependsOn(dependedOn)) {
+        return;  // don't add duplicate dependencies
+    }
+
+    fDependencies.push_back(dependedOn);
+    dependedOn->addDependent(this);
+
+    SkDEBUGCODE(this->validate());
+}
+
+// Convert from a GrSurface-based dependency to a GrRenderTask one
+void GrRenderTask::addDependency(GrSurfaceProxy* dependedOn, const GrCaps& caps) {
+    if (dependedOn->getLastRenderTask()) {
+        // If it is still receiving dependencies, this GrRenderTask shouldn't be closed
+        SkASSERT(!this->isClosed());
+
+        GrRenderTask* dependedOnTask = dependedOn->getLastRenderTask();
+        if (dependedOnTask == this) {
+            // self-read - presumably for dst reads. We can't make it closed in the self-read case.
+        } else {
+            this->addDependency(dependedOnTask);
+
+            // We are closing 'dependedOnTask' here bc the current contents of it are what 'this'
+            // dependedOnTask depends on. We need a break in 'dependedOnTask' so that the usage of
+            // that state has a chance to execute.
+            dependedOnTask->makeClosed(caps);
+        }
+    }
+
+    if (GrTextureProxy* textureProxy = dependedOn->asTextureProxy()) {
+        if (textureProxy->texPriv().isDeferred()) {
+            fDeferredProxies.push_back(textureProxy);
+        }
+    }
+}
+
+bool GrRenderTask::dependsOn(const GrRenderTask* dependedOn) const {
+    for (int i = 0; i < fDependencies.count(); ++i) {
+        if (fDependencies[i] == dependedOn) {
+            return true;
+        }
+    }
+
+    return false;
+}
+
+
+void GrRenderTask::addDependent(GrRenderTask* dependent) {
+    fDependents.push_back(dependent);
+}
+
+#ifdef SK_DEBUG
+bool GrRenderTask::isDependedent(const GrRenderTask* dependent) const {
+    for (int i = 0; i < fDependents.count(); ++i) {
+        if (fDependents[i] == dependent) {
+            return true;
+        }
+    }
+
+    return false;
+}
+
+void GrRenderTask::validate() const {
+    // TODO: check for loops and duplicates
+
+    for (int i = 0; i < fDependencies.count(); ++i) {
+        SkASSERT(fDependencies[i]->isDependedent(this));
+    }
+}
+#endif
+
+void GrRenderTask::closeThoseWhoDependOnMe(const GrCaps& caps) {
+    for (int i = 0; i < fDependents.count(); ++i) {
+        if (!fDependents[i]->isClosed()) {
+            fDependents[i]->makeClosed(caps);
+        }
+    }
+}
+
+bool GrRenderTask::isInstantiated() const {
+    if (!fTarget->isInstantiated()) {
+        return false;
+    }
+
+    int minStencilSampleCount = (fTarget->asRenderTargetProxy())
+            ? fTarget->asRenderTargetProxy()->numStencilSamples()
+            : 0;
+
+    if (minStencilSampleCount) {
+        GrRenderTarget* rt = fTarget->peekRenderTarget();
+        SkASSERT(rt);
+
+        GrStencilAttachment* stencil = rt->renderTargetPriv().getStencilAttachment();
+        if (!stencil) {
+            return false;
+        }
+        SkASSERT(stencil->numSamples() >= minStencilSampleCount);
+    }
+
+    GrSurface* surface = fTarget->peekSurface();
+    if (surface->wasDestroyed()) {
+        return false;
+    }
+
+    return true;
+}
+
+#ifdef SK_DEBUG
+void GrRenderTask::dump(bool printDependencies) const {
+    SkDebugf("--------------------------------------------------------------\n");
+    SkDebugf("renderTaskID: %d - proxyID: %d - surfaceID: %d\n", fUniqueID,
+             fTarget ? fTarget->uniqueID().asUInt() : -1,
+             fTarget && fTarget->peekSurface()
+                     ? fTarget->peekSurface()->uniqueID().asUInt()
+                     : -1);
+
+    if (printDependencies) {
+        SkDebugf("I rely On (%d): ", fDependencies.count());
+        for (int i = 0; i < fDependencies.count(); ++i) {
+            SkDebugf("%d, ", fDependencies[i]->fUniqueID);
+        }
+        SkDebugf("\n");
+
+        SkDebugf("(%d) Rely On Me: ", fDependents.count());
+        for (int i = 0; i < fDependents.count(); ++i) {
+            SkDebugf("%d, ", fDependents[i]->fUniqueID);
+        }
+        SkDebugf("\n");
+    }
+}
+#endif
diff --git a/src/gpu/GrRenderTask.h b/src/gpu/GrRenderTask.h
new file mode 100644
index 0000000..74a2222
--- /dev/null
+++ b/src/gpu/GrRenderTask.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrRenderTask_DEFINED
+#define GrRenderTask_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "include/private/SkColorData.h"
+#include "include/private/SkTDArray.h"
+#include "src/gpu/GrTextureProxy.h"
+
+class GrOpFlushState;
+class GrOpList;
+class GrRenderTargetOpList;
+class GrResourceAllocator;
+class GrTextureOpList;
+
+// This class abstracts a task that targets a single GrSurfaceProxy, participates in the
+// GrDrawingManager's DAG, and implements the onExecute method to modify its target proxy's
+// contents. (e.g., an opList that executes a command buffer, a task to regenerate mipmaps, etc.)
+class GrRenderTask : public SkRefCnt {
+public:
+    GrRenderTask(sk_sp<GrSurfaceProxy> target);
+    ~GrRenderTask() override;
+
+    // These two methods are only invoked at flush time
+    void prepare(GrOpFlushState* flushState);
+    bool execute(GrOpFlushState* flushState) { return this->onExecute(flushState); }
+
+    virtual void makeClosed(const GrCaps&) {
+        if (!this->isClosed()) {
+            this->setFlag(kClosed_Flag);
+        }
+    }
+
+    // Called when this class will survive a flush and needs to truncate its ops and start over.
+    // TODO: ultimately it should be invalid for an op list to survive a flush.
+    // https://bugs.chromium.org/p/skia/issues/detail?id=7111
+    virtual void endFlush() {}
+
+    bool isClosed() const { return this->isSetFlag(kClosed_Flag); }
+
+    /*
+     * Notify this GrRenderTask that it relies on the contents of 'dependedOn'
+     */
+    void addDependency(GrSurfaceProxy* dependedOn, const GrCaps& caps);
+
+    /*
+     * Does this renderTask depend on 'dependedOn'?
+     */
+    bool dependsOn(const GrRenderTask* dependedOn) const;
+
+    uint32_t uniqueID() const { return fUniqueID; }
+
+    /*
+     * Safely cast this GrRenderTask to a GrTextureOpList (if possible).
+     */
+    virtual GrTextureOpList* asTextureOpList() { return nullptr; }
+
+    /*
+     * Safely cast this GrRenderTask to a GrRenderTargetOpList (if possible).
+     */
+    virtual GrRenderTargetOpList* asRenderTargetOpList() { return nullptr; }
+
+    /*
+     * Dump out the GrRenderTask dependency DAG
+     */
+    SkDEBUGCODE(virtual void dump(bool printDependencies) const;)
+
+    SkDEBUGCODE(virtual int numClips() const { return 0; })
+
+protected:
+    // In addition to just the GrSurface being allocated, has the stencil buffer been allocated (if
+    // it is required)?
+    bool isInstantiated() const;
+
+    SkDEBUGCODE(bool deferredProxiesAreInstantiated() const;)
+
+    sk_sp<GrSurfaceProxy> fTarget;
+
+    // List of texture proxies whose contents are being prepared on a worker thread
+    // TODO: this list exists so we can fire off the proper upload when an renderTask begins
+    // executing. Can this be replaced?
+    SkTArray<GrTextureProxy*, true> fDeferredProxies;
+
+private:
+    // for resetFlag, TopoSortTraits, gatherProxyIntervals, handleInternalAllocationFailure
+    friend class GrDrawingManager;
+
+    // Drops any pending operations that reference proxies that are not instantiated.
+    // NOTE: Derived classes don't need to check fTarget. That is handled when the drawingManager
+    // calls isInstantiated.
+    virtual void handleInternalAllocationFailure() = 0;
+
+    virtual bool onIsUsed(GrSurfaceProxy*) const = 0;
+
+    bool isUsed(GrSurfaceProxy* proxy) const {
+        if (proxy == fTarget.get()) {
+            return true;
+        }
+
+        return this->onIsUsed(proxy);
+    }
+
+    void addDependency(GrRenderTask* dependedOn);
+    void addDependent(GrRenderTask* dependent);
+    SkDEBUGCODE(bool isDependedent(const GrRenderTask* dependent) const;)
+    SkDEBUGCODE(void validate() const;)
+    void closeThoseWhoDependOnMe(const GrCaps&);
+
+    // Feed proxy usage intervals to the GrResourceAllocator class
+    virtual void gatherProxyIntervals(GrResourceAllocator*) const = 0;
+
+    static uint32_t CreateUniqueID();
+
+    enum Flags {
+        kClosed_Flag    = 0x01,   //!< This GrRenderTask can't accept any more dependencies.
+
+        kWasOutput_Flag = 0x02,   //!< Flag for topological sorting
+        kTempMark_Flag  = 0x04,   //!< Flag for topological sorting
+    };
+
+    void setFlag(uint32_t flag) {
+        fFlags |= flag;
+    }
+
+    void resetFlag(uint32_t flag) {
+        fFlags &= ~flag;
+    }
+
+    bool isSetFlag(uint32_t flag) const {
+        return SkToBool(fFlags & flag);
+    }
+
+    struct TopoSortTraits {
+        static void Output(GrRenderTask* renderTask, int /* index */) {
+            renderTask->setFlag(kWasOutput_Flag);
+        }
+        static bool WasOutput(const GrRenderTask* renderTask) {
+            return renderTask->isSetFlag(kWasOutput_Flag);
+        }
+        static void SetTempMark(GrRenderTask* renderTask) {
+            renderTask->setFlag(kTempMark_Flag);
+        }
+        static void ResetTempMark(GrRenderTask* renderTask) {
+            renderTask->resetFlag(kTempMark_Flag);
+        }
+        static bool IsTempMarked(const GrRenderTask* renderTask) {
+            return renderTask->isSetFlag(kTempMark_Flag);
+        }
+        static int NumDependencies(const GrRenderTask* renderTask) {
+            return renderTask->fDependencies.count();
+        }
+        static GrRenderTask* Dependency(GrRenderTask* renderTask, int index) {
+            return renderTask->fDependencies[index];
+        }
+    };
+
+    virtual void onPrepare(GrOpFlushState* flushState) = 0;
+    virtual bool onExecute(GrOpFlushState* flushState) = 0;
+
+    const uint32_t         fUniqueID;
+    uint32_t               fFlags;
+
+    // 'this' GrOpList relies on the output of the GrOpLists in 'fDependencies'
+    SkSTArray<1, GrRenderTask*, true> fDependencies;
+    // 'this' GrOpList's output is relied on by the GrOpLists in 'fDependents'
+    SkSTArray<1, GrRenderTask*, true> fDependents;
+};
+
+#endif
diff --git a/src/gpu/GrSurfaceProxy.cpp b/src/gpu/GrSurfaceProxy.cpp
index c45d6be..5e874c6 100644
--- a/src/gpu/GrSurfaceProxy.cpp
+++ b/src/gpu/GrSurfaceProxy.cpp
@@ -69,7 +69,7 @@
         , fLazyInstantiationType(lazyType)
         , fIsProtected(isProtected)
         , fGpuMemorySize(kInvalidGpuMemorySize)
-        , fLastOpList(nullptr) {
+        , fLastRenderTask(nullptr) {
     SkASSERT(fFormat.isValid());
     // NOTE: the default fUniqueID ctor pulls a value from the same pool as the GrGpuResources.
     if (fLazyInstantiateCallback) {
@@ -102,14 +102,14 @@
         , fUniqueID(fTarget->uniqueID())  // Note: converting from unique resource ID to a proxy ID!
         , fIsProtected(fTarget->isProtected() ? GrProtected::kYes : GrProtected::kNo)
         , fGpuMemorySize(kInvalidGpuMemorySize)
-        , fLastOpList(nullptr) {
+        , fLastRenderTask(nullptr) {
     SkASSERT(fFormat.isValid());
 }
 
 GrSurfaceProxy::~GrSurfaceProxy() {
     // For this to be deleted the opList that held a ref on it (if there was one) must have been
     // deleted. Which would have cleared out this back pointer.
-    SkASSERT(!fLastOpList);
+    SkASSERT(!fLastRenderTask);
 }
 
 bool GrSurfaceProxyPriv::AttachStencilIfNeeded(GrResourceProvider* resourceProvider,
@@ -289,23 +289,23 @@
                                      mipMapped, key);
 }
 
-void GrSurfaceProxy::setLastOpList(GrOpList* opList) {
+void GrSurfaceProxy::setLastRenderTask(GrRenderTask* renderTask) {
 #ifdef SK_DEBUG
-    if (fLastOpList) {
-        SkASSERT(fLastOpList->isClosed());
+    if (fLastRenderTask) {
+        SkASSERT(fLastRenderTask->isClosed());
     }
 #endif
 
     // Un-reffed
-    fLastOpList = opList;
+    fLastRenderTask = renderTask;
 }
 
 GrRenderTargetOpList* GrSurfaceProxy::getLastRenderTargetOpList() {
-    return fLastOpList ? fLastOpList->asRenderTargetOpList() : nullptr;
+    return fLastRenderTask ? fLastRenderTask->asRenderTargetOpList() : nullptr;
 }
 
 GrTextureOpList* GrSurfaceProxy::getLastTextureOpList() {
-    return fLastOpList ? fLastOpList->asTextureOpList() : nullptr;
+    return fLastRenderTask ? fLastRenderTask->asTextureOpList() : nullptr;
 }
 
 int GrSurfaceProxy::worstCaseWidth() const {
diff --git a/src/gpu/GrSurfaceProxy.h b/src/gpu/GrSurfaceProxy.h
index 0d241c7..7a15059 100644
--- a/src/gpu/GrSurfaceProxy.h
+++ b/src/gpu/GrSurfaceProxy.h
@@ -18,10 +18,10 @@
 
 class GrCaps;
 class GrContext_Base;
-class GrOpList;
 class GrRecordingContext;
 class GrRenderTargetOpList;
 class GrRenderTargetProxy;
+class GrRenderTask;
 class GrResourceProvider;
 class GrSurfaceContext;
 class GrSurfaceProxyPriv;
@@ -269,8 +269,8 @@
      */
     bool readOnly() const { return fSurfaceFlags & GrInternalSurfaceFlags::kReadOnly; }
 
-    void setLastOpList(GrOpList* opList);
-    GrOpList* getLastOpList() { return fLastOpList; }
+    void setLastRenderTask(GrRenderTask*);
+    GrRenderTask* getLastRenderTask() { return fLastRenderTask; }
 
     GrRenderTargetOpList* getLastRenderTargetOpList();
     GrTextureOpList* getLastTextureOpList();
@@ -437,8 +437,8 @@
     // This back-pointer is required so that we can add a dependancy between
     // the opList used to create the current contents of this surface
     // and the opList of a destination surface to which this one is being drawn or copied.
-    // This pointer is unreffed. OpLists own a ref on their surface proxies.
-    GrOpList*              fLastOpList;
+    // This pointer is unreffed. GrRenderTasks own a ref on their surface proxies.
+    GrRenderTask*          fLastRenderTask;
 
     typedef GrIORefProxy INHERITED;
 };
diff --git a/src/gpu/GrTextureContext.cpp b/src/gpu/GrTextureContext.cpp
index 1ba1243..a04d84a 100644
--- a/src/gpu/GrTextureContext.cpp
+++ b/src/gpu/GrTextureContext.cpp
@@ -33,7 +33,7 @@
     fTextureProxy->validate(fContext);
 
     if (fOpList && !fOpList->isClosed()) {
-        SkASSERT(fTextureProxy->getLastOpList() == fOpList.get());
+        SkASSERT(fTextureProxy->getLastRenderTask() == fOpList.get());
     }
 }
 #endif
diff --git a/src/gpu/GrTextureOpList.cpp b/src/gpu/GrTextureOpList.cpp
index 7e4d6c3..ed083da 100644
--- a/src/gpu/GrTextureOpList.cpp
+++ b/src/gpu/GrTextureOpList.cpp
@@ -183,7 +183,7 @@
     this->recordOp(std::move(op));
 }
 
-void GrTextureOpList::purgeOpsWithUninstantiatedProxies() {
+void GrTextureOpList::handleInternalAllocationFailure() {
     bool hasUninstantiatedProxy = false;
     auto checkInstantiation = [&hasUninstantiatedProxy](GrSurfaceProxy* p, GrMipMapped) {
         if (!p->isInstantiated()) {
diff --git a/src/gpu/GrTextureOpList.h b/src/gpu/GrTextureOpList.h
index fbfc6e1..e869289 100644
--- a/src/gpu/GrTextureOpList.h
+++ b/src/gpu/GrTextureOpList.h
@@ -71,7 +71,7 @@
     void deleteOp(int index);
     void deleteOps();
 
-    void purgeOpsWithUninstantiatedProxies() override;
+    void handleInternalAllocationFailure() override;
 
     void gatherProxyIntervals(GrResourceAllocator*) const override;
 
diff --git a/src/gpu/vk/GrVkSecondaryCBDrawContext.cpp b/src/gpu/vk/GrVkSecondaryCBDrawContext.cpp
index cea1478..9ffbdd4 100644
--- a/src/gpu/vk/GrVkSecondaryCBDrawContext.cpp
+++ b/src/gpu/vk/GrVkSecondaryCBDrawContext.cpp
@@ -170,7 +170,7 @@
     GrRenderTargetContext* rtc = fDevice->accessRenderTargetContext();
     GrContext* ctx = fDevice->context();
 
-    ctx->priv().copyOpListsFromDDL(ddl, rtc->asRenderTargetProxy());
+    ctx->priv().copyRenderTasksFromDDL(ddl, rtc->asRenderTargetProxy());
     return true;
 }