Detemplatize render task clustering
This simplifies our world and opens the door for more optimization.
Bug: skia:10877
Change-Id: I3c721f12a23bfa73dbdf1e02d9c77d7c6a889aa0
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/356309
Commit-Queue: Adlai Holler <adlai@google.com>
Reviewed-by: Robert Phillips <robertphillips@google.com>
diff --git a/src/gpu/GrRenderTaskCluster.cpp b/src/gpu/GrRenderTaskCluster.cpp
new file mode 100644
index 0000000..9cf0343
--- /dev/null
+++ b/src/gpu/GrRenderTaskCluster.cpp
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2021 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrRenderTaskCluster.h"
+
+#include "include/private/SkTHash.h"
+#include "src/gpu/GrRenderTask.h"
+
+// Uncomment to get lots of logging.
+#define CLUSTER_DEBUGF(...) //SkDebugf(__VA_ARGS__)
+
+static GrSurfaceProxy* first_target(GrRenderTask* task) {
+ return task->target(0).proxy();
+}
+
+#ifdef SK_DEBUG
+[[maybe_unused]] static SkString describe_task(GrRenderTask* t) {
+ if (GrSurfaceProxy* target = first_target(t)) {
+ return SkStringPrintf("%s(%d)", target->getDebugName().c_str(), t->uniqueID());
+ } else {
+ return SkStringPrintf("%d", t->uniqueID());
+ }
+}
+
+[[maybe_unused]] static SkString describe_tasks(SkSpan<const sk_sp<GrRenderTask>> collection) {
+ SkString s;
+ for (const sk_sp<GrRenderTask>& t : collection) {
+ s.appendf("%s ", describe_task(t.get()).c_str());
+ }
+ return s;
+}
+
+[[maybe_unused]] static SkString describe_tasks(const SkTInternalLList<GrRenderTask>& collection) {
+ SkString s;
+ for (GrRenderTask* t : collection) {
+ s.appendf("%s ", describe_task(t).c_str());
+ }
+ return s;
+}
+
+static void validate(SkSpan<const sk_sp<GrRenderTask>> input,
+ const SkTInternalLList<GrRenderTask>& llist) {
+ // Check that we didn't break dependencies.
+ SkTHashSet<GrRenderTask*> seen;
+ for (GrRenderTask* t : llist) {
+ seen.add(t);
+ for (GrRenderTask* dep : t->dependencies()) {
+ SkASSERTF(seen.contains(dep),
+ "%s came before dependency %s",
+ describe_task(t).c_str(),
+ describe_task(dep).c_str());
+ }
+ }
+ // Check that llist has the same entries as the input.
+ for (const auto& orig : input) {
+ seen.remove(orig.get());
+ }
+ SkASSERT(seen.empty());
+}
+
+#endif // SK_DEBUG
+
+// Returns whether reordering occurred.
+static bool task_cluster_visit(GrRenderTask* task, SkTInternalLList<GrRenderTask>* llist,
+ SkTHashMap<GrSurfaceProxy*, GrRenderTask*>* lastTaskMap) {
+ CLUSTER_DEBUGF("Cluster: ***Step***\nLooking at %s\n",
+ describe_task(task).c_str());
+ if (task->numTargets() != 1) {
+ CLUSTER_DEBUGF("Cluster: %d targets. Emitting barriers.\n", task->numTargets());
+ // Tasks with 0 or multiple targets are treated as full barriers
+ // for all their targets.
+ for (int j = 0; j < task->numTargets(); j++) {
+ lastTaskMap->remove(task->target(0).proxy());
+ }
+ return false;
+ }
+
+ GrSurfaceProxy* target = first_target(task);
+ GrRenderTask* clusterTail = (lastTaskMap->find(target) ? *lastTaskMap->find(target) : nullptr);
+ lastTaskMap->set(target, task);
+
+ if (!clusterTail) {
+ CLUSTER_DEBUGF("Cluster: Bail, no cluster to extend.\n");
+ return false;
+ }
+
+ CLUSTER_DEBUGF("Cluster: clusterTail is %s.\n", describe_task(clusterTail).c_str());
+
+ if (clusterTail == llist->tail()) {
+ CLUSTER_DEBUGF("Cluster: Bail, cluster is already tail.\n");
+ return false;
+ }
+ GrRenderTask* movedHead = clusterTail->fNext;
+
+ // Now, let's refer to the "cluster" as the chain of tasks with the same target, that we're
+ // hoping to extend by reordering. Let "moved tasks" be the ones we want to move to extend the
+ // cluster.
+ GrRenderTask* clusterHead = clusterTail;
+ while (clusterHead->fPrev
+ && 1 == clusterHead->fPrev->numTargets()
+ && target == first_target(clusterHead->fPrev)) {
+ clusterHead = clusterHead->fPrev;
+ }
+
+ // We can't reorder if any moved task depends on anything in the cluster.
+ // Time complexity here is high, but making a hash set is a lot worse.
+ for (GrRenderTask* moved = movedHead; moved; moved = moved->fNext) {
+ for (GrRenderTask* dep : moved->dependencies()) {
+ for (GrRenderTask* t = clusterHead; t != movedHead; t = t->fNext) {
+ if (t == dep) {
+ CLUSTER_DEBUGF("Cluster: Bail, %s depends on %s.\n",
+ describe_task(moved).c_str(),
+ describe_task(dep).c_str());
+ return false;
+ }
+ }
+ }
+ }
+
+ // Grab the moved tasks and pull them before clusterHead.
+ for (GrRenderTask* moved = movedHead; moved;) {
+ CLUSTER_DEBUGF("Cluster: Reorder %s behind %s.\n",
+ describe_task(moved).c_str(),
+ describe_task(clusterHead).c_str());
+ // Be careful to save fNext before each move.
+ GrRenderTask* nextMoved = moved->fNext;
+ llist->remove(moved);
+ llist->addBefore(moved, clusterHead);
+ moved = nextMoved;
+ }
+ return true;
+}
+
+bool GrClusterRenderTasks(SkSpan<const sk_sp<GrRenderTask>> input,
+ SkTInternalLList<GrRenderTask>* llist) {
+ SkASSERT(llist->isEmpty());
+
+ if (input.count() < 3) {
+ return false;
+ }
+
+ CLUSTER_DEBUGF("Cluster: Original order is %s\n", describe_tasks(input).c_str());
+
+ SkTHashMap<GrSurfaceProxy*, GrRenderTask*> lastTaskMap;
+ bool didReorder = false;
+ for (const auto& t : input) {
+ didReorder |= task_cluster_visit(t.get(), llist, &lastTaskMap);
+ llist->addToTail(t.get());
+ CLUSTER_DEBUGF("Cluster: Output order is now: %s\n", describe_tasks(*llist).c_str());
+ }
+
+#ifdef SK_DEBUG
+ if (didReorder) {
+ validate(input, *llist);
+ }
+#endif
+
+ return didReorder;
+}
+
+#undef CLUSTER_DEBUGF