Use SkSpan to clean up GrOnFlushCallbackObject API

Also adjust the OpsTask terminology to the broader RenderTask term.

Change-Id: I8549e74a3e2f6b2caf765103f31243b776823c16
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/332724
Commit-Queue: Adlai Holler <adlai@google.com>
Reviewed-by: Robert Phillips <robertphillips@google.com>
diff --git a/src/gpu/GrDrawingManager.cpp b/src/gpu/GrDrawingManager.cpp
index bc980e7..79b6e45 100644
--- a/src/gpu/GrDrawingManager.cpp
+++ b/src/gpu/GrDrawingManager.cpp
@@ -233,8 +233,7 @@
         fDAG.gatherIDs(&fFlushingRenderTaskIDs);
 
         for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
-            onFlushCBObject->preFlush(&onFlushProvider, fFlushingRenderTaskIDs.begin(),
-                                      fFlushingRenderTaskIDs.count());
+            onFlushCBObject->preFlush(&onFlushProvider, fFlushingRenderTaskIDs);
         }
         for (const auto& onFlushRenderTask : fOnFlushRenderTasks) {
             onFlushRenderTask->makeClosed(*fContext->priv().caps());
@@ -339,8 +338,7 @@
         flushed = false;
     }
     for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) {
-        onFlushCBObject->postFlush(fTokenTracker.nextTokenToFlush(), fFlushingRenderTaskIDs.begin(),
-                                   fFlushingRenderTaskIDs.count());
+        onFlushCBObject->postFlush(fTokenTracker.nextTokenToFlush(), fFlushingRenderTaskIDs);
         flushed = true;
     }
     if (flushed) {
diff --git a/src/gpu/GrOnFlushResourceProvider.h b/src/gpu/GrOnFlushResourceProvider.h
index 3a6df37..9308d36 100644
--- a/src/gpu/GrOnFlushResourceProvider.h
+++ b/src/gpu/GrOnFlushResourceProvider.h
@@ -10,6 +10,7 @@
 
 #include "include/core/SkRefCnt.h"
 #include "include/private/SkTArray.h"
+#include "src/core/SkSpan.h"
 #include "src/gpu/GrDeferredUpload.h"
 #include "src/gpu/GrOpFlushState.h"
 #include "src/gpu/GrResourceProvider.h"
@@ -31,19 +32,18 @@
 
     /*
      * The preFlush callback allows subsystems (e.g., text, path renderers) to create atlases
-     * for a specific flush. All the GrOpsTask IDs required for the flush are passed into the
+     * for a specific flush. All the GrRenderTask IDs required for the flush are passed into the
      * callback.
      */
-    virtual void preFlush(GrOnFlushResourceProvider*, const uint32_t* opsTaskIDs,
-                          int numOpsTaskIDs) = 0;
+    virtual void preFlush(GrOnFlushResourceProvider*, SkSpan<const uint32_t> renderTaskIDs) = 0;
 
     /**
-     * Called once flushing is complete and all opsTasks indicated by preFlush have been executed
+     * Called once flushing is complete and all renderTasks indicated by preFlush have been executed
      * and released. startTokenForNextFlush can be used to track resources used in the current
      * flush.
      */
     virtual void postFlush(GrDeferredUploadToken startTokenForNextFlush,
-                           const uint32_t* opsTaskIDs, int numOpsTaskIDs) {}
+                           SkSpan<const uint32_t> renderTaskIDs) {}
 
     /**
      * Tells the callback owner to hold onto this object when freeing GPU resources.
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
index 473bbe1..64b5ad8 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
@@ -223,7 +223,7 @@
 }
 
 void GrCoverageCountingPathRenderer::preFlush(
-        GrOnFlushResourceProvider* onFlushRP, const uint32_t* opsTaskIDs, int numOpsTaskIDs) {
+        GrOnFlushResourceProvider* onFlushRP, SkSpan<const uint32_t> taskIDs) {
     using DoCopiesToA8Coverage = GrCCDrawPathsOp::DoCopiesToA8Coverage;
     SkASSERT(!fFlushing);
     SkASSERT(fFlushingPaths.empty());
@@ -246,9 +246,9 @@
 
     // Move the per-opsTask paths that are about to be flushed from fPendingPaths to fFlushingPaths,
     // and count them up so we can preallocate buffers.
-    fFlushingPaths.reserve_back(numOpsTaskIDs);
-    for (int i = 0; i < numOpsTaskIDs; ++i) {
-        auto iter = fPendingPaths.find(opsTaskIDs[i]);
+    fFlushingPaths.reserve_back(taskIDs.count());
+    for (uint32_t taskID : taskIDs) {
+        auto iter = fPendingPaths.find(taskID);
         if (fPendingPaths.end() == iter) {
             continue;  // No paths on this opsTask.
         }
@@ -311,8 +311,8 @@
     }
 }
 
-void GrCoverageCountingPathRenderer::postFlush(GrDeferredUploadToken, const uint32_t* opsTaskIDs,
-                                               int numOpsTaskIDs) {
+void GrCoverageCountingPathRenderer::postFlush(GrDeferredUploadToken,
+                                               SkSpan<const uint32_t> /* taskIDs */) {
     SkASSERT(fFlushing);
 
     if (!fFlushingPaths.empty()) {
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
index 5e4d2ba..13f6c7d 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
@@ -67,9 +67,8 @@
             const SkPath& deviceSpacePath, const SkIRect& accessRect, const GrCaps& caps);
 
     // GrOnFlushCallbackObject overrides.
-    void preFlush(GrOnFlushResourceProvider*, const uint32_t* opsTaskIDs,
-                  int numOpsTaskIDs) override;
-    void postFlush(GrDeferredUploadToken, const uint32_t* opsTaskIDs, int numOpsTaskIDs) override;
+    void preFlush(GrOnFlushResourceProvider*, SkSpan<const uint32_t> taskIDs) override;
+    void postFlush(GrDeferredUploadToken, SkSpan<const uint32_t> taskIDs) override;
 
     void purgeCacheEntriesOlderThan(GrProxyProvider*, const GrStdSteadyClock::time_point&);
 
diff --git a/src/gpu/ops/GrSmallPathAtlasMgr.h b/src/gpu/ops/GrSmallPathAtlasMgr.h
index 86fec24..3b8f403 100644
--- a/src/gpu/ops/GrSmallPathAtlasMgr.h
+++ b/src/gpu/ops/GrSmallPathAtlasMgr.h
@@ -48,16 +48,14 @@
 
     // GrOnFlushCallbackObject overrides
     void preFlush(GrOnFlushResourceProvider* onFlushRP,
-                  const uint32_t* /* opsTaskIDs */,
-                  int /* numOpsTaskIDs */) override {
+                  SkSpan<const uint32_t> /* taskIDs */) override {
         if (fAtlas) {
             fAtlas->instantiate(onFlushRP);
         }
     }
 
     void postFlush(GrDeferredUploadToken startTokenForNextFlush,
-                   const uint32_t* /* opsTaskIDs */,
-                   int /* numOpsTaskIDs */) override {
+                   SkSpan<const uint32_t> /* taskIDs */) override {
         if (fAtlas) {
             fAtlas->compact(startTokenForNextFlush);
         }
diff --git a/src/gpu/tessellate/GrTessellationPathRenderer.cpp b/src/gpu/tessellate/GrTessellationPathRenderer.cpp
index b95d277..a1d75f8 100644
--- a/src/gpu/tessellate/GrTessellationPathRenderer.cpp
+++ b/src/gpu/tessellate/GrTessellationPathRenderer.cpp
@@ -361,7 +361,7 @@
 }
 
 void GrTessellationPathRenderer::preFlush(GrOnFlushResourceProvider* onFlushRP,
-                                          const uint32_t* opsTaskIDs, int numOpsTaskIDs) {
+                                          SkSpan<const uint32_t> /* taskIDs */) {
     if (!fAtlas.drawBounds().isEmpty()) {
         this->renderAtlas(onFlushRP);
         fAtlas.reset(kAtlasInitialSize, *onFlushRP->caps());
diff --git a/src/gpu/tessellate/GrTessellationPathRenderer.h b/src/gpu/tessellate/GrTessellationPathRenderer.h
index 1eff182..88840fc 100644
--- a/src/gpu/tessellate/GrTessellationPathRenderer.h
+++ b/src/gpu/tessellate/GrTessellationPathRenderer.h
@@ -54,8 +54,7 @@
     CanDrawPath onCanDrawPath(const CanDrawPathArgs&) const override;
     bool onDrawPath(const DrawPathArgs&) override;
     void onStencilPath(const StencilPathArgs&) override;
-    void preFlush(GrOnFlushResourceProvider*, const uint32_t* opsTaskIDs,
-                  int numOpsTaskIDs) override;
+    void preFlush(GrOnFlushResourceProvider*, SkSpan<const uint32_t> taskIDs) override;
 
 private:
     void initAtlasFlags(GrRecordingContext*);
diff --git a/src/gpu/text/GrAtlasManager.h b/src/gpu/text/GrAtlasManager.h
index 9005070..109d128 100644
--- a/src/gpu/text/GrAtlasManager.h
+++ b/src/gpu/text/GrAtlasManager.h
@@ -83,7 +83,7 @@
 
     // GrOnFlushCallbackObject overrides
 
-    void preFlush(GrOnFlushResourceProvider* onFlushRP, const uint32_t*, int) override {
+    void preFlush(GrOnFlushResourceProvider* onFlushRP, SkSpan<const uint32_t>) override {
         for (int i = 0; i < kMaskFormatCount; ++i) {
             if (fAtlases[i]) {
                 fAtlases[i]->instantiate(onFlushRP);
@@ -91,8 +91,7 @@
         }
     }
 
-    void postFlush(GrDeferredUploadToken startTokenForNextFlush,
-                   const uint32_t* opsTaskIDs, int numOpsTaskIDs) override {
+    void postFlush(GrDeferredUploadToken startTokenForNextFlush, SkSpan<const uint32_t>) override {
         for (int i = 0; i < kMaskFormatCount; ++i) {
             if (fAtlases[i]) {
                 fAtlases[i]->compact(startTokenForNextFlush);