Detemplatize render task clustering

This simplifies our world and opens the door for more optimization.

Bug: skia:10877
Change-Id: I3c721f12a23bfa73dbdf1e02d9c77d7c6a889aa0
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/356309
Commit-Queue: Adlai Holler <adlai@google.com>
Reviewed-by: Robert Phillips <robertphillips@google.com>
diff --git a/src/gpu/GrDrawingManager.cpp b/src/gpu/GrDrawingManager.cpp
index ccc1ea7..7437d32 100644
--- a/src/gpu/GrDrawingManager.cpp
+++ b/src/gpu/GrDrawingManager.cpp
@@ -27,13 +27,13 @@
 #include "src/gpu/GrRecordingContextPriv.h"
 #include "src/gpu/GrRenderTargetProxy.h"
 #include "src/gpu/GrRenderTask.h"
+#include "src/gpu/GrRenderTaskCluster.h"
 #include "src/gpu/GrResourceAllocator.h"
 #include "src/gpu/GrResourceProvider.h"
 #include "src/gpu/GrSoftwarePathRenderer.h"
 #include "src/gpu/GrSurfaceContext.h"
 #include "src/gpu/GrSurfaceDrawContext.h"
 #include "src/gpu/GrSurfaceProxyPriv.h"
-#include "src/gpu/GrTCluster.h"
 #include "src/gpu/GrTTopoSort.h"
 #include "src/gpu/GrTexture.h"
 #include "src/gpu/GrTextureProxy.h"
@@ -440,7 +440,7 @@
 void GrDrawingManager::reorderTasks() {
     SkASSERT(fReduceOpsTaskSplitting);
     SkTInternalLList<GrRenderTask> llist;
-    bool clustered = GrTCluster<GrRenderTask, GrRenderTask::ClusterTraits>(fDAG, &llist);
+    bool clustered = GrClusterRenderTasks(fDAG, &llist);
     if (!clustered) {
         return;
     }
diff --git a/src/gpu/GrRenderTask.h b/src/gpu/GrRenderTask.h
index 701e7b7..598deac 100644
--- a/src/gpu/GrRenderTask.h
+++ b/src/gpu/GrRenderTask.h
@@ -16,6 +16,7 @@
 #include "src/gpu/GrTextureResolveManager.h"
 #include "src/gpu/ops/GrOp.h"
 
+class GrMockRenderTask;
 class GrOpFlushState;
 class GrOpsTask;
 class GrResourceAllocator;
@@ -63,6 +64,8 @@
      */
     void addDependenciesFromOtherTask(GrRenderTask* otherTask);
 
+    SkSpan<GrRenderTask*> dependencies() { return SkSpan<GrRenderTask*>(fDependencies); }
+
     /*
      * Does this renderTask depend on 'dependedOn'?
      */
@@ -126,7 +129,7 @@
     // it is required)?
     bool isInstantiated() const;
 
-    // Used by GrTCluster.
+    // Used by GrRenderTaskCluster.
     SK_DECLARE_INTERNAL_LLIST_INTERFACE(GrRenderTask);
 
 protected:
@@ -189,6 +192,7 @@
 private:
     // for TopoSortTraits, fTextureResolveTask, closeThoseWhoDependOnMe, addDependency
     friend class GrDrawingManager;
+    friend class GrMockRenderTask;
 
     // Derived classes can override to indicate usage of proxies _other than target proxies_.
     // GrRenderTask itself will handle checking the target proxies.
@@ -230,24 +234,6 @@
         }
     };
 
-    struct ClusterTraits {
-        static int NumTargets(GrRenderTask* renderTask) {
-            return renderTask->numTargets();
-        }
-        static uint32_t GetTarget(GrRenderTask* renderTask, int index) {
-            return renderTask->target(index).proxy()->uniqueID().asUInt();
-        }
-        static int NumDependencies(const GrRenderTask* renderTask) {
-            return renderTask->fDependencies.count();
-        }
-        static GrRenderTask* Dependency(GrRenderTask* renderTask, int index) {
-            return renderTask->fDependencies[index];
-        }
-        static uint32_t GetID(GrRenderTask* renderTask) {
-            return renderTask->uniqueID();
-        }
-    };
-
     virtual void onPrePrepare(GrRecordingContext*) {} // Only the GrOpsTask currently overrides this
     virtual void onPrepare(GrOpFlushState*) {} // Only GrOpsTask and GrDDLTask override this virtual
     virtual bool onExecute(GrOpFlushState* flushState) = 0;
diff --git a/src/gpu/GrRenderTaskCluster.cpp b/src/gpu/GrRenderTaskCluster.cpp
new file mode 100644
index 0000000..9cf0343
--- /dev/null
+++ b/src/gpu/GrRenderTaskCluster.cpp
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2021 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrRenderTaskCluster.h"
+
+#include "include/private/SkTHash.h"
+#include "src/gpu/GrRenderTask.h"
+
+// Uncomment to get lots of logging.
+#define CLUSTER_DEBUGF(...) //SkDebugf(__VA_ARGS__)
+
+static GrSurfaceProxy* first_target(GrRenderTask* task) {
+    return task->target(0).proxy();
+}
+
+#ifdef SK_DEBUG
+[[maybe_unused]] static SkString describe_task(GrRenderTask* t) {
+    if (GrSurfaceProxy* target = first_target(t)) {
+        return SkStringPrintf("%s(%d)", target->getDebugName().c_str(), t->uniqueID());
+    } else {
+        return SkStringPrintf("%d", t->uniqueID());
+    }
+}
+
+[[maybe_unused]] static SkString describe_tasks(SkSpan<const sk_sp<GrRenderTask>> collection) {
+    SkString s;
+    for (const sk_sp<GrRenderTask>& t : collection) {
+        s.appendf("%s ", describe_task(t.get()).c_str());
+    }
+    return s;
+}
+
+[[maybe_unused]] static SkString describe_tasks(const SkTInternalLList<GrRenderTask>& collection) {
+    SkString s;
+    for (GrRenderTask* t : collection) {
+        s.appendf("%s ", describe_task(t).c_str());
+    }
+    return s;
+}
+
+static void validate(SkSpan<const sk_sp<GrRenderTask>> input,
+                     const SkTInternalLList<GrRenderTask>& llist) {
+    // Check that we didn't break dependencies.
+    SkTHashSet<GrRenderTask*> seen;
+    for (GrRenderTask* t : llist) {
+        seen.add(t);
+        for (GrRenderTask* dep : t->dependencies()) {
+            SkASSERTF(seen.contains(dep),
+                      "%s came before dependency %s",
+                      describe_task(t).c_str(),
+                      describe_task(dep).c_str());
+        }
+    }
+    // Check that llist has the same entries as the input.
+    for (const auto& orig : input) {
+        seen.remove(orig.get());
+    }
+    SkASSERT(seen.empty());
+}
+
+#endif  // SK_DEBUG
+
+// Returns whether reordering occurred.
+static bool task_cluster_visit(GrRenderTask* task, SkTInternalLList<GrRenderTask>* llist,
+                               SkTHashMap<GrSurfaceProxy*, GrRenderTask*>* lastTaskMap) {
+    CLUSTER_DEBUGF("Cluster: ***Step***\nLooking at %s\n",
+                   describe_task(task).c_str());
+    if (task->numTargets() != 1) {
+        CLUSTER_DEBUGF("Cluster: %d targets. Emitting barriers.\n", task->numTargets());
+        // Tasks with 0 or multiple targets are treated as full barriers
+        // for all their targets.
+        for (int j = 0; j < task->numTargets(); j++) {
+            lastTaskMap->remove(task->target(0).proxy());
+        }
+        return false;
+    }
+
+    GrSurfaceProxy* target = first_target(task);
+    GrRenderTask* clusterTail = (lastTaskMap->find(target) ? *lastTaskMap->find(target) : nullptr);
+    lastTaskMap->set(target, task);
+
+    if (!clusterTail) {
+        CLUSTER_DEBUGF("Cluster: Bail, no cluster to extend.\n");
+        return false;
+    }
+
+    CLUSTER_DEBUGF("Cluster: clusterTail is %s.\n", describe_task(clusterTail).c_str());
+
+    if (clusterTail == llist->tail()) {
+        CLUSTER_DEBUGF("Cluster: Bail, cluster is already tail.\n");
+        return false;
+    }
+    GrRenderTask* movedHead = clusterTail->fNext;
+
+    // Now, let's refer to the "cluster" as the chain of tasks with the same target, that we're
+    // hoping to extend by reordering. Let "moved tasks" be the ones we want to move to extend the
+    // cluster.
+    GrRenderTask* clusterHead = clusterTail;
+    while (clusterHead->fPrev
+           && 1 == clusterHead->fPrev->numTargets()
+           && target == first_target(clusterHead->fPrev)) {
+        clusterHead = clusterHead->fPrev;
+    }
+
+    // We can't reorder if any moved task depends on anything in the cluster.
+    // Time complexity here is high, but making a hash set is a lot worse.
+    for (GrRenderTask* moved = movedHead; moved; moved = moved->fNext) {
+        for (GrRenderTask* dep : moved->dependencies()) {
+            for (GrRenderTask* t = clusterHead; t != movedHead; t = t->fNext) {
+                if (t == dep) {
+                    CLUSTER_DEBUGF("Cluster: Bail, %s depends on %s.\n",
+                                   describe_task(moved).c_str(),
+                                   describe_task(dep).c_str());
+                    return false;
+                }
+            }
+        }
+    }
+
+    // Grab the moved tasks and pull them before clusterHead.
+    for (GrRenderTask* moved = movedHead; moved;) {
+        CLUSTER_DEBUGF("Cluster: Reorder %s behind %s.\n",
+                       describe_task(moved).c_str(),
+                       describe_task(clusterHead).c_str());
+        // Be careful to save fNext before each move.
+        GrRenderTask* nextMoved = moved->fNext;
+        llist->remove(moved);
+        llist->addBefore(moved, clusterHead);
+        moved = nextMoved;
+    }
+    return true;
+}
+
+bool GrClusterRenderTasks(SkSpan<const sk_sp<GrRenderTask>> input,
+                          SkTInternalLList<GrRenderTask>* llist) {
+    SkASSERT(llist->isEmpty());
+
+    if (input.count() < 3) {
+        return false;
+    }
+
+    CLUSTER_DEBUGF("Cluster: Original order is %s\n", describe_tasks(input).c_str());
+
+    SkTHashMap<GrSurfaceProxy*, GrRenderTask*> lastTaskMap;
+    bool didReorder = false;
+    for (const auto& t : input) {
+        didReorder |= task_cluster_visit(t.get(), llist, &lastTaskMap);
+        llist->addToTail(t.get());
+        CLUSTER_DEBUGF("Cluster: Output order is now: %s\n", describe_tasks(*llist).c_str());
+    }
+
+#ifdef SK_DEBUG
+    if (didReorder) {
+        validate(input, *llist);
+    }
+#endif
+
+    return didReorder;
+}
+
+#undef CLUSTER_DEBUGF
diff --git a/src/gpu/GrRenderTaskCluster.h b/src/gpu/GrRenderTaskCluster.h
new file mode 100644
index 0000000..fa125a1
--- /dev/null
+++ b/src/gpu/GrRenderTaskCluster.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2021 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrRenderTaskCluster_DEFINED
+#define GrRenderTaskCluster_DEFINED
+
+#include "include/core/SkRefCnt.h"
+#include "src/core/SkSpan.h"
+#include "src/core/SkTInternalLList.h"
+
+class GrRenderTask;
+
+// Take a topologically-sorted DAG and cluster the tasks together while maintaining the
+// dependencies.
+//
+// If no clustering is possible, returns false.
+// Otherwise, returns true and populates the provided llist as such:
+//   - Contains the same set of tasks as `input`.
+//   - Obeys the dependency rules in `input`.
+//   - Places tasks with the same target adjacent to each other.
+//   - Tasks with multiple targets act as reordering barriers for all their targets.
+bool GrClusterRenderTasks(SkSpan<const sk_sp<GrRenderTask>> input,
+                          SkTInternalLList<GrRenderTask>* llist);
+
+#endif
diff --git a/src/gpu/GrSurfaceProxy.h b/src/gpu/GrSurfaceProxy.h
index f162cc3..ae14446 100644
--- a/src/gpu/GrSurfaceProxy.h
+++ b/src/gpu/GrSurfaceProxy.h
@@ -321,7 +321,13 @@
     SkString dump() const;
 #endif
 
-    SkDEBUGCODE(void validate(GrContext_Base*) const;)
+#ifdef SK_DEBUG
+    void validate(GrContext_Base*) const;
+    SkString getDebugName() {
+        return fDebugName.isEmpty() ? SkStringPrintf("%d", this->uniqueID().asUInt()) : fDebugName;
+    }
+    void setDebugName(SkString name) { fDebugName = std::move(name); }
+#endif
 
     // Provides access to functions that aren't part of the public API.
     inline GrSurfaceProxyPriv priv();
@@ -434,6 +440,7 @@
     // If the proxy computes its own answer that answer is checked (in debug mode) in
     // the instantiation method.
     mutable size_t         fGpuMemorySize;
+    SkDEBUGCODE(SkString   fDebugName;)
 };
 
 GR_MAKE_BITFIELD_CLASS_OPS(GrSurfaceProxy::ResolveFlags)
diff --git a/src/gpu/GrTCluster.h b/src/gpu/GrTCluster.h
deleted file mode 100644
index f54295d..0000000
--- a/src/gpu/GrTCluster.h
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright 2020 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef GrTCluster_DEFINED
-#define GrTCluster_DEFINED
-
-#include "include/core/SkRefCnt.h"
-#include "include/private/SkTHash.h"
-#include "src/core/SkScopeExit.h"
-#include "src/core/SkSpan.h"
-#include "src/core/SkTInternalLList.h"
-
-// Uncomment to get lots of logging.
-#define CLUSTER_DEBUGF(...) //SkDebugf(__VA_ARGS__)
-
-#ifdef SK_DEBUG
-template <typename T, typename Traits>
-SkString GrTCluster_DebugStr(T* t) {
-    if (Traits::NumTargets(t) != 1) {
-        return SkStringPrintf("%d", Traits::GetID(t));
-    } else {
-        return SkStringPrintf("%d(%d)", Traits::GetID(t), Traits::GetTarget(t, 0));
-    }
-}
-
-template <typename T, typename Traits>
-SkString GrTCluster_DebugStr(SkSpan<const sk_sp<T>> collection) {
-    SkString s;
-    for (const sk_sp<T>& t_sp : collection) {
-        s.appendf("%s ", GrTCluster_DebugStr<T, Traits>(t_sp.get()).c_str());
-    }
-    return s;
-}
-
-template <typename T, typename Traits>
-SkString GrTCluster_DebugStr(const SkTInternalLList<T>& collection) {
-    SkString s;
-    for (T* t : collection) {
-        s.appendf("%s ", GrTCluster_DebugStr<T, Traits>(t).c_str());
-    }
-    return s;
-}
-
-template<typename T, typename Traits>
-void GrTCluster_Validate(SkSpan<const sk_sp<T>> input, const SkTInternalLList<T>& llist) {
-    // Check that we didn't break dependencies.
-    SkTHashSet<T*> seen;
-    for (T* t : llist) {
-        seen.add(t);
-        for (int i = 0; i < Traits::NumDependencies(t); i++) {
-            T* dep = Traits::Dependency(t, i);
-            SkASSERTF(seen.contains(dep),
-                      "%s came before dependency %s",
-                      GrTCluster_DebugStr<T, Traits>(t).c_str(),
-                      GrTCluster_DebugStr<T, Traits>(dep).c_str());
-        }
-    }
-    // Check that llist has the same entries as the input.
-    for (const auto& orig : input) {
-        seen.remove(orig.get());
-    }
-    SkASSERT(seen.empty());
-}
-
-#endif  // SK_DEBUG
-
-// Returns whether reordering occurred.
-template <typename T, typename Traits>
-bool GrTCluster_Visit(T* task, SkTInternalLList<T>* llist,
-                      SkTHashMap<uint32_t, T*>* lastTaskMap) {
-    // TODO: This wrapper incurs surprising overhead. Move this code out of Visit into loop?
-    SkScopeExit exit([&]{
-        llist->addToTail(task);
-        CLUSTER_DEBUGF("Cluster: Output order is now: %s\n",
-                       GrTCluster_DebugStr<T, Traits>(*llist).c_str());
-    });
-
-    CLUSTER_DEBUGF("Cluster: ***Step***\nLooking at %s\n",
-                   GrTCluster_DebugStr<T, Traits>(task).c_str());
-    if (Traits::NumTargets(task) != 1) {
-        CLUSTER_DEBUGF("Cluster: %d targets. Emitting barriers.\n", Traits::NumTargets(task));
-        // Tasks with 0 or multiple targets are treated as full barriers
-        // for all their targets.
-        for (int j = 0; j < Traits::NumTargets(task); j++) {
-            lastTaskMap->remove(Traits::GetTarget(task, j));
-        }
-        return false;
-    }
-
-    uint32_t target = Traits::GetTarget(task, 0);
-    T* clusterTail = (lastTaskMap->find(target) ? *lastTaskMap->find(target) : nullptr);
-    // TODO: This is bottlenecking the whole algorithm. De-templatify this, and add temporary
-    // storage to the task instead.
-    lastTaskMap->set(target, task);
-
-    if (!clusterTail) {
-        CLUSTER_DEBUGF("Cluster: Bail, no cluster to extend.\n", SkToInt(target));
-        return false;
-    }
-
-    CLUSTER_DEBUGF("Cluster: clusterTail is %s.\n",
-                   GrTCluster_DebugStr<T, Traits>(clusterTail).c_str());
-
-    if (clusterTail == llist->tail()) {
-        CLUSTER_DEBUGF("Cluster: Bail, cluster is already tail.\n");
-        return false;
-    }
-    T* movedHead = clusterTail->fNext;
-
-    // Now, let's refer to the "cluster" as the chain of tasks with the same target, that we're
-    // hoping to extend by reordering. Let "moved tasks" be the ones we want to move to extend the
-    // cluster.
-    T* clusterHead = clusterTail;
-    while (clusterHead->fPrev
-           && 1 == Traits::NumTargets(clusterHead->fPrev)
-           && target == Traits::GetTarget(clusterHead->fPrev, 0)) {
-        clusterHead = clusterHead->fPrev;
-    }
-
-    // We can't reorder if any moved task depends on anything in the cluster.
-    // Then check the constraint.
-    // Time complexity here is high, but making a hash set is a lot worse.
-    for (T* moved = movedHead; moved; moved = moved->fNext) {
-        for (int j = 0; j < Traits::NumDependencies(moved); j++) {
-            T* dep = Traits::Dependency(moved, j);
-            for (T* c = clusterHead; c != movedHead; c = c->fNext) {
-                if (dep == c) {
-                    CLUSTER_DEBUGF("Cluster: Bail, %s depends on %s.\n",
-                                   GrTCluster_DebugStr<T, Traits>(moved).c_str(),
-                                   GrTCluster_DebugStr<T, Traits>(dep).c_str());
-                    return false;
-                }
-            }
-        }
-    }
-
-    // Grab the moved tasks and pull them before clusterHead.
-    for (T* moved = movedHead; moved;) {
-        CLUSTER_DEBUGF("Cluster: Reorder %s behind %s.\n",
-                       GrTCluster_DebugStr<T, Traits>(moved).c_str(),
-                       GrTCluster_DebugStr<T, Traits>(clusterHead).c_str());
-        // Be careful to save fNext before each move.
-        T* nextMoved = moved->fNext;
-        llist->remove(moved);
-        llist->addBefore(moved, clusterHead);
-        moved = nextMoved;
-    }
-    return true;
-}
-
-// Take a topologically-sorted DAG and cluster the entries together while maintaining the
-// dependency order.
-//
-// If no clustering is possible, returns false.
-// Otherwise, returns true and populates the provided llist as such:
-//   - Contains the same set of entries as `input`.
-//   - Obeys the dependency rules in `input`.
-//   - Places entries with the same Target (see Traits) adjacent to each other.
-//   - Entries with multiple targets act as reordering barriers for all their targets.
-//
-// T must declare a public SkTInternalLList interface.
-//
-// Traits requires:
-//
-//   static int NumTargets(const T* t) { ... }      //
-//   static uint32_t GetTarget(const T* t, int i)   //
-//   static int NumDependencies(const T* t) { ... } //
-//   static T* Dependency(T* t, int index) { ... }  //
-//   static uint32_t GetID(T* t) { ... }            //
-template <typename T, typename Traits = T>
-bool GrTCluster(SkSpan<const sk_sp<T>> input, SkTInternalLList<T>* llist) {
-    SkASSERT(llist->isEmpty());
-
-    if (input.count() < 3) {
-        return false;
-    }
-
-    CLUSTER_DEBUGF("Cluster: Original order is %s\n",
-                   GrTCluster_DebugStr<T, Traits>(input).c_str());
-
-    SkTHashMap<uint32_t, T*> lastTaskMap;
-    bool didReorder = false;
-    for (const auto& t : input) {
-        didReorder |= GrTCluster_Visit<T, Traits>(t.get(), llist, &lastTaskMap);
-    }
-
-#ifdef SK_DEBUG
-    if (didReorder) {
-        GrTCluster_Validate<T, Traits>(input, *llist);
-    }
-#endif
-
-    return didReorder;
-}
-
-#undef CLUSTER_DEBUGF
-
-#endif
diff --git a/src/gpu/mock/GrMockRenderTask.h b/src/gpu/mock/GrMockRenderTask.h
new file mode 100644
index 0000000..697cc52
--- /dev/null
+++ b/src/gpu/mock/GrMockRenderTask.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2021 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMockRenderTask_DEFINED
+#define GrMockRenderTask_DEFINED
+
+#include "src/gpu/GrRenderTask.h"
+
+class GrMockRenderTask : public GrRenderTask {
+public:
+    GrMockRenderTask() : GrRenderTask() {
+        // Mock tasks are never "owned" by a drawmgr in the first place.
+        this->setFlag(kDisowned_Flag);
+    }
+
+    void addTarget(GrSurfaceProxyView view) { fTargets.push_back(std::move(view)); }
+    void addDependency(GrRenderTask* dep) { fDependencies.push_back(dep); }
+
+    // Overrides.
+#ifdef SK_DEBUG
+    void visitProxies_debugOnly(const GrOp::VisitProxyFunc&) const override { return; }
+#endif
+    void handleInternalAllocationFailure() override {}
+    void gatherProxyIntervals(GrResourceAllocator*) const override {}
+    ExpectedOutcome onMakeClosed(const GrCaps&, SkIRect*) override { SkUNREACHABLE; }
+    bool onIsUsed(GrSurfaceProxy*) const override { return false; }
+    bool onExecute(GrOpFlushState*) override { return true; }
+
+#if GR_TEST_UTILS
+    const char* name() const final { return "Mock"; }
+#endif
+};
+
+#endif
diff --git a/src/gpu/mock/GrMockSurfaceProxy.h b/src/gpu/mock/GrMockSurfaceProxy.h
new file mode 100644
index 0000000..af541f2
--- /dev/null
+++ b/src/gpu/mock/GrMockSurfaceProxy.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2021 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrMockSurfaceProxy_DEFINED
+#define GrMockSurfaceProxy_DEFINED
+
+#include "src/gpu/GrSurfaceProxy.h"
+
+class GrMockSurfaceProxy : public GrSurfaceProxy {
+public:
+    GrMockSurfaceProxy(SkString name) : GrSurfaceProxy(
+            GrBackendFormat::MakeMock(GrColorType::kRGBA_8888, SkImage::CompressionType::kNone),
+            SkISize::Make(1, 1),
+            SkBackingFit::kExact,
+            SkBudgeted::kNo,
+            GrProtected::kNo,
+            GrInternalSurfaceFlags::kNone,
+            UseAllocator::kNo) {
+        SkDEBUGCODE(this->setDebugName(std::move(name)));
+    }
+
+    bool instantiate(GrResourceProvider*) override { return false; }
+    SkDEBUGCODE(void onValidateSurface(const GrSurface*) override {} )
+    size_t onUninstantiatedGpuMemorySize() const override { return 0; }
+
+protected:
+    sk_sp<GrSurface> createSurface(GrResourceProvider*) const override { return nullptr; }
+
+private:
+    LazySurfaceDesc callbackDesc() const override { SkUNREACHABLE; }
+};
+
+#endif