Add GrPrepareCallback, always run at the start of flush

This is like an op, but only has one virtual, and always runs before
any ops prepare. To be used in threaded software mask rendering
(to schedule ASAP uploads).

Bug: skia:
Change-Id: I647482e2472d7321f3685e5bdbe49e10ac59c0b1
Reviewed-on: https://skia-review.googlesource.com/37160
Reviewed-by: Brian Salomon <bsalomon@google.com>
Commit-Queue: Brian Osman <brianosman@google.com>
diff --git a/src/gpu/GrDrawingManager.cpp b/src/gpu/GrDrawingManager.cpp
index 9e7372c..22594e7 100644
--- a/src/gpu/GrDrawingManager.cpp
+++ b/src/gpu/GrDrawingManager.cpp
@@ -152,8 +152,8 @@
                     continue;   // Odd - but not a big deal
                 }
                 opList->makeClosed(*fContext->caps());
-                opList->prepareOps(&fFlushState);
-                if (!opList->executeOps(&fFlushState)) {
+                opList->prepare(&fFlushState);
+                if (!opList->execute(&fFlushState)) {
                     continue;         // This is bad
                 }
             }
@@ -174,7 +174,7 @@
             continue;
         }
 
-        fOpLists[i]->prepareOps(&fFlushState);
+        fOpLists[i]->prepare(&fFlushState);
     }
 
     // Upload all data to the GPU
@@ -185,7 +185,7 @@
             continue;
         }
 
-        if (fOpLists[i]->executeOps(&fFlushState)) {
+        if (fOpLists[i]->execute(&fFlushState)) {
             flushed = true;
         }
         fOpLists[i]->reset();
diff --git a/src/gpu/GrOpList.cpp b/src/gpu/GrOpList.cpp
index 5a9a1bd..c87d520 100644
--- a/src/gpu/GrOpList.cpp
+++ b/src/gpu/GrOpList.cpp
@@ -54,9 +54,18 @@
     }
 
     fTarget.reset();
+    fPrepareCallbacks.reset();
     fAuditTrail = nullptr;
 }
 
+void GrOpList::prepare(GrOpFlushState* flushState) {
+    for (int i = 0; i < fPrepareCallbacks.count(); ++i) {
+        (*fPrepareCallbacks[i])(flushState);
+    }
+
+    this->onPrepare(flushState);
+}
+
 // Add a GrOpList-based dependency
 void GrOpList::addDependency(GrOpList* dependedOn) {
     SkASSERT(!dependedOn->dependsOn(this));  // loops are bad
diff --git a/src/gpu/GrOpList.h b/src/gpu/GrOpList.h
index 00dc344..a9fa7bd 100644
--- a/src/gpu/GrOpList.h
+++ b/src/gpu/GrOpList.h
@@ -26,6 +26,12 @@
 struct SkIPoint;
 struct SkIRect;
 
+class GrPrepareCallback : SkNoncopyable {
+public:
+    virtual ~GrPrepareCallback() {}
+    virtual void operator()(GrOpFlushState*) = 0;
+};
+
 class GrOpList : public SkRefCnt {
 public:
     GrOpList(GrResourceProvider*, GrSurfaceProxy*, GrAuditTrail*);
@@ -33,8 +39,8 @@
 
     // These three methods are invoked at flush time
     bool instantiate(GrResourceProvider* resourceProvider);
-    virtual void prepareOps(GrOpFlushState* flushState) = 0;
-    virtual bool executeOps(GrOpFlushState* flushState) = 0;
+    void prepare(GrOpFlushState* flushState);
+    bool execute(GrOpFlushState* flushState) { return this->onExecute(flushState); }
 
     virtual bool copySurface(const GrCaps& caps,
                              GrSurfaceProxy* dst,
@@ -51,6 +57,10 @@
 
     virtual void reset();
 
+    void addPrepareCallback(std::unique_ptr<GrPrepareCallback> callback) {
+        fPrepareCallbacks.push_back(std::move(callback));
+    }
+
     // TODO: in an MDB world, where the OpLists don't allocate GPU resources, it seems like
     // these could go away
     virtual void abandonGpuResources() = 0;
@@ -147,6 +157,9 @@
         }
     };
 
+    virtual void onPrepare(GrOpFlushState* flushState) = 0;
+    virtual bool onExecute(GrOpFlushState* flushState) = 0;
+
     void addDependency(GrOpList* dependedOn);
 
     uint32_t              fUniqueID;
@@ -155,6 +168,9 @@
     // 'this' GrOpList relies on the output of the GrOpLists in 'fDependencies'
     SkTDArray<GrOpList*>  fDependencies;
 
+    // These are used rarely, most clients never produce any
+    SkTArray<std::unique_ptr<GrPrepareCallback>> fPrepareCallbacks;
+
     typedef SkRefCnt INHERITED;
 };
 
diff --git a/src/gpu/GrRenderTargetOpList.cpp b/src/gpu/GrRenderTargetOpList.cpp
index 74534e4..ca449d3 100644
--- a/src/gpu/GrRenderTargetOpList.cpp
+++ b/src/gpu/GrRenderTargetOpList.cpp
@@ -61,7 +61,7 @@
 }
 #endif
 
-void GrRenderTargetOpList::prepareOps(GrOpFlushState* flushState) {
+void GrRenderTargetOpList::onPrepare(GrOpFlushState* flushState) {
     SkASSERT(fTarget.get()->priv().peekRenderTarget());
     SkASSERT(this->isClosed());
 #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
@@ -130,7 +130,7 @@
 // TODO: this is where GrOp::renderTarget is used (which is fine since it
 // is at flush time). However, we need to store the RenderTargetProxy in the
 // Ops and instantiate them here.
-bool GrRenderTargetOpList::executeOps(GrOpFlushState* flushState) {
+bool GrRenderTargetOpList::onExecute(GrOpFlushState* flushState) {
     if (0 == fRecordedOps.count()) {
         return false;
     }
diff --git a/src/gpu/GrRenderTargetOpList.h b/src/gpu/GrRenderTargetOpList.h
index f162fcc..b62cee5 100644
--- a/src/gpu/GrRenderTargetOpList.h
+++ b/src/gpu/GrRenderTargetOpList.h
@@ -65,8 +65,8 @@
      * Together these two functions flush all queued up draws to GrCommandBuffer. The return value
      * of executeOps() indicates whether any commands were actually issued to the GPU.
      */
-    void prepareOps(GrOpFlushState* flushState) override;
-    bool executeOps(GrOpFlushState* flushState) override;
+    void onPrepare(GrOpFlushState* flushState) override;
+    bool onExecute(GrOpFlushState* flushState) override;
 
     uint32_t addOp(std::unique_ptr<GrOp> op, const GrCaps& caps) {
         this->recordOp(std::move(op), caps, nullptr, nullptr);
diff --git a/src/gpu/GrTextureOpList.cpp b/src/gpu/GrTextureOpList.cpp
index eb2380f..f36cc35 100644
--- a/src/gpu/GrTextureOpList.cpp
+++ b/src/gpu/GrTextureOpList.cpp
@@ -45,7 +45,7 @@
 
 #endif
 
-void GrTextureOpList::prepareOps(GrOpFlushState* flushState) {
+void GrTextureOpList::onPrepare(GrOpFlushState* flushState) {
     SkASSERT(this->isClosed());
 
     // Loop over the ops that haven't yet generated their geometry
@@ -57,7 +57,7 @@
     }
 }
 
-bool GrTextureOpList::executeOps(GrOpFlushState* flushState) {
+bool GrTextureOpList::onExecute(GrOpFlushState* flushState) {
     if (0 == fRecordedOps.count()) {
         return false;
     }
diff --git a/src/gpu/GrTextureOpList.h b/src/gpu/GrTextureOpList.h
index 3b57ba7..5f1e331 100644
--- a/src/gpu/GrTextureOpList.h
+++ b/src/gpu/GrTextureOpList.h
@@ -38,8 +38,8 @@
      * Together these two functions flush all queued ops to GrGpuCommandBuffer. The return value
      * of executeOps() indicates whether any commands were actually issued to the GPU.
      */
-    void prepareOps(GrOpFlushState* flushState) override;
-    bool executeOps(GrOpFlushState* flushState) override;
+    void onPrepare(GrOpFlushState* flushState) override;
+    bool onExecute(GrOpFlushState* flushState) override;
 
     /**
      * Copies a pixel rectangle from one surface to another. This call may finalize