Rename GrBatch to GrOp

Change-Id: I27b6324f8040899fafeda23ca524bc54a4dbf090
Reviewed-on: https://skia-review.googlesource.com/5392
Commit-Queue: Brian Salomon <bsalomon@google.com>
Reviewed-by: Brian Osman <brianosman@google.com>
diff --git a/src/gpu/GrAppliedClip.h b/src/gpu/GrAppliedClip.h
index 3e98c6c..27fbde0 100644
--- a/src/gpu/GrAppliedClip.h
+++ b/src/gpu/GrAppliedClip.h
@@ -15,7 +15,7 @@
 
 /**
  * Produced by GrClip. It provides a set of modifications to the drawing state that are used to
- * create the final GrPipeline for a GrBatch.
+ * create the final GrPipeline for a GrOp.
  */
 class GrAppliedClip : public SkNoncopyable {
 public:
diff --git a/src/gpu/GrAuditTrail.cpp b/src/gpu/GrAuditTrail.cpp
index 93694cb..8f6f566 100644
--- a/src/gpu/GrAuditTrail.cpp
+++ b/src/gpu/GrAuditTrail.cpp
@@ -6,11 +6,11 @@
  */
 
 #include "GrAuditTrail.h"
-#include "batches/GrBatch.h"
+#include "batches/GrOp.h"
 
 const int GrAuditTrail::kGrAuditTrailInvalidID = -1;
 
-void GrAuditTrail::addBatch(const GrBatch* batch) {
+void GrAuditTrail::addBatch(const GrOp* batch) {
     SkASSERT(fEnabled);
     Batch* auditBatch = new Batch;
     fBatchPool.emplace_back(auditBatch);
@@ -51,7 +51,7 @@
     fBatchList.emplace_back(batchNode);
 }
 
-void GrAuditTrail::batchingResultCombined(const GrBatch* consumer, const GrBatch* consumed) {
+void GrAuditTrail::batchingResultCombined(const GrOp* consumer, const GrOp* consumed) {
     // Look up the batch we are going to glom onto
     int* indexPtr = fIDLookup.find(consumer->uniqueID());
     SkASSERT(indexPtr);
diff --git a/src/gpu/GrBatchTest.h b/src/gpu/GrBatchTest.h
index 32e8e28..c6cfa0d 100644
--- a/src/gpu/GrBatchTest.h
+++ b/src/gpu/GrBatchTest.h
@@ -18,7 +18,7 @@
 
 /*
  * This file defines some macros for testing batches, and also declares functions / objects which
- * are generally useful for GrBatch testing
+ * are generally useful for GrOp testing
  */
 
 // Batches should define test functions using DRAW_BATCH_TEST_DEFINE.  The other macros defined
diff --git a/src/gpu/GrMesh.h b/src/gpu/GrMesh.h
index 964e0b4..5d1ce6f 100644
--- a/src/gpu/GrMesh.h
+++ b/src/gpu/GrMesh.h
@@ -35,7 +35,7 @@
 };
 
 /**
- * Used to communicate index and vertex buffers, counts, and offsets for a draw from GrBatch to
+ * Used to communicate index and vertex buffers, counts, and offsets for a draw from GrOp to
  * GrGpu. It also holds the primitive type for the draw. TODO: Consider moving ownership of this
  * and draw-issuing responsibility to GrPrimitiveProcessor. The rest of the vertex info lives there
  * already (stride, attribute mappings).
diff --git a/src/gpu/GrOvalRenderer.cpp b/src/gpu/GrOvalRenderer.cpp
index ea126b1..b5fa7fd 100644
--- a/src/gpu/GrOvalRenderer.cpp
+++ b/src/gpu/GrOvalRenderer.cpp
@@ -26,7 +26,7 @@
 #include "glsl/GrGLSLUniformHandler.h"
 #include "glsl/GrGLSLUtil.h"
 
-// TODO(joshualitt) - Break this file up during GrBatch post implementation cleanup
+// TODO(joshualitt) - Break this file up during GrOp post implementation cleanup
 
 namespace {
 
@@ -599,7 +599,7 @@
 
 class CircleBatch : public GrVertexBatch {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     /** Optional extra params to render a partial arc rather than a full circle. */
     struct ArcParams {
@@ -1098,7 +1098,7 @@
         target->draw(gp.get(), mesh);
     }
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+    bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
         CircleBatch* that = t->cast<CircleBatch>();
         if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
                                     that->bounds(), caps)) {
@@ -1150,7 +1150,7 @@
 
 class EllipseBatch : public GrVertexBatch {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
     static GrDrawBatch* Create(GrColor color, const SkMatrix& viewMatrix, const SkRect& ellipse,
                                const SkStrokeRec& stroke) {
         SkASSERT(viewMatrix.rectStaysRect());
@@ -1340,7 +1340,7 @@
         helper.recordDraw(target, gp.get());
     }
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+    bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
         EllipseBatch* that = t->cast<EllipseBatch>();
 
         if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
@@ -1381,7 +1381,7 @@
 
 class DIEllipseBatch : public GrVertexBatch {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     static GrDrawBatch* Create(GrColor color,
                                const SkMatrix& viewMatrix,
@@ -1558,7 +1558,7 @@
         helper.recordDraw(target, gp.get());
     }
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+    bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
         DIEllipseBatch* that = t->cast<DIEllipseBatch>();
         if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
                                     that->bounds(), caps)) {
@@ -1723,7 +1723,7 @@
 
 class RRectCircleRendererBatch : public GrVertexBatch {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     // A devStrokeWidth <= 0 indicates a fill only. If devStrokeWidth > 0 then strokeOnly indicates
     // whether the rrect is only stroked or stroked and filled.
@@ -2022,7 +2022,7 @@
         target->draw(gp.get(), mesh);
     }
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+    bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
         RRectCircleRendererBatch* that = t->cast<RRectCircleRendererBatch>();
         if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
                                     that->bounds(), caps)) {
@@ -2083,7 +2083,7 @@
 
 class RRectEllipseRendererBatch : public GrVertexBatch {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     // If devStrokeWidths values are <= 0 indicates then fill only. Otherwise, strokeOnly indicates
     // whether the rrect is only stroked or stroked and filled.
@@ -2275,7 +2275,7 @@
         helper.recordDraw(target, gp.get());
     }
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+    bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
         RRectEllipseRendererBatch* that = t->cast<RRectEllipseRendererBatch>();
 
         if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
diff --git a/src/gpu/GrPipeline.cpp b/src/gpu/GrPipeline.cpp
index c58e0ee..0e4cac5 100644
--- a/src/gpu/GrPipeline.cpp
+++ b/src/gpu/GrPipeline.cpp
@@ -16,7 +16,7 @@
 #include "GrRenderTargetPriv.h"
 #include "GrXferProcessor.h"
 
-#include "batches/GrBatch.h"
+#include "batches/GrOp.h"
 
 GrPipeline* GrPipeline::CreateAt(void* memory, const CreateArgs& args,
                                  GrXPOverridesForBatch* overrides) {
diff --git a/src/gpu/GrPipeline.h b/src/gpu/GrPipeline.h
index 2c7c779..0b0b898 100644
--- a/src/gpu/GrPipeline.h
+++ b/src/gpu/GrPipeline.h
@@ -26,10 +26,10 @@
 #include "effects/GrPorterDuffXferProcessor.h"
 #include "effects/GrSimpleTextureEffect.h"
 
-class GrBatch;
-class GrRenderTargetContext;
 class GrDeviceCoordTexture;
+class GrOp;
 class GrPipelineBuilder;
+class GrRenderTargetContext;
 
 struct GrBatchToXPOverrides {
     GrBatchToXPOverrides()
@@ -81,7 +81,7 @@
     static bool AreEqual(const GrPipeline& a, const GrPipeline& b);
 
     /**
-     * Allows a GrBatch subclass to determine whether two GrBatches can combine. This is a stricter
+     * Allows a GrOp subclass to determine whether two GrBatches can combine. This is a stricter
      * test than isEqual because it also considers blend barriers when the two batches' bounds
      * overlap
      */
diff --git a/src/gpu/GrPipelineBuilder.cpp b/src/gpu/GrPipelineBuilder.cpp
index 864d6f1..fac5752 100644
--- a/src/gpu/GrPipelineBuilder.cpp
+++ b/src/gpu/GrPipelineBuilder.cpp
@@ -12,7 +12,7 @@
 #include "GrPipeline.h"
 #include "GrProcOptInfo.h"
 #include "GrXferProcessor.h"
-#include "batches/GrBatch.h"
+#include "batches/GrOp.h"
 #include "effects/GrPorterDuffXferProcessor.h"
 
 GrPipelineBuilder::GrPipelineBuilder()
diff --git a/src/gpu/GrPrimitiveProcessor.h b/src/gpu/GrPrimitiveProcessor.h
index cb23897..addc1c1 100644
--- a/src/gpu/GrPrimitiveProcessor.h
+++ b/src/gpu/GrPrimitiveProcessor.h
@@ -58,7 +58,7 @@
 
 /*
  * This class allows the GrPipeline to communicate information about the pipeline to a
- * GrBatch which should be forwarded to the GrPrimitiveProcessor(s) created by the batch.
+ * GrOp which should be forwarded to the GrPrimitiveProcessor(s) created by the batch.
  * These are not properly part of the pipeline because they assume the specific inputs
  * that the batch provided when it created the pipeline. Identical pipelines may be
  * created by different batches with different input assumptions and therefore different
diff --git a/src/gpu/GrRenderTargetContext.cpp b/src/gpu/GrRenderTargetContext.cpp
index 4cba6b9..f6645e3 100644
--- a/src/gpu/GrRenderTargetContext.cpp
+++ b/src/gpu/GrRenderTargetContext.cpp
@@ -20,7 +20,7 @@
 #include "GrResourceProvider.h"
 #include "SkSurfacePriv.h"
 
-#include "batches/GrBatch.h"
+#include "batches/GrOp.h"
 #include "batches/GrClearBatch.h"
 #include "batches/GrDrawAtlasBatch.h"
 #include "batches/GrDrawVerticesBatch.h"
@@ -274,7 +274,7 @@
         if (!this->accessRenderTarget()) {
             return;
         }
-        sk_sp<GrBatch> batch(GrClearBatch::Make(clip, color, this->accessRenderTarget()));
+        sk_sp<GrOp> batch(GrClearBatch::Make(clip, color, this->accessRenderTarget()));
         if (!batch) {
             return;
         }
diff --git a/src/gpu/GrRenderTargetOpList.cpp b/src/gpu/GrRenderTargetOpList.cpp
index 7b16a55..ad20771 100644
--- a/src/gpu/GrRenderTargetOpList.cpp
+++ b/src/gpu/GrRenderTargetOpList.cpp
@@ -177,7 +177,7 @@
     }
 }
 
-// TODO: this is where GrBatch::renderTarget is used (which is fine since it
+// TODO: this is where GrOp::renderTarget is used (which is fine since it
 // is at flush time). However, we need to store the RenderTargetProxy in the
 // Batches and instantiate them here.
 bool GrRenderTargetOpList::drawBatches(GrBatchFlushState* flushState) {
@@ -242,7 +242,7 @@
     }
 }
 
-static void batch_bounds(SkRect* bounds, const GrBatch* batch) {
+static void batch_bounds(SkRect* bounds, const GrOp* batch) {
     *bounds = batch->bounds();
     if (batch->hasZeroArea()) {
         if (batch->hasAABloat()) {
@@ -388,19 +388,19 @@
         return;
     }
 
-    GrBatch* batch = GrStencilPathBatch::Create(viewMatrix,
-                                                useHWAA,
-                                                path->getFillType(),
-                                                appliedClip.hasStencilClip(),
-                                                stencilAttachment->bits(),
-                                                appliedClip.scissorState(),
-                                                renderTargetContext->accessRenderTarget(),
-                                                path);
+    GrOp* batch = GrStencilPathBatch::Create(viewMatrix,
+                                             useHWAA,
+                                             path->getFillType(),
+                                             appliedClip.hasStencilClip(),
+                                             stencilAttachment->bits(),
+                                             appliedClip.scissorState(),
+                                             renderTargetContext->accessRenderTarget(),
+                                             path);
     this->recordBatch(batch, appliedClip.clippedDrawBounds());
     batch->unref();
 }
 
-void GrRenderTargetOpList::addBatch(sk_sp<GrBatch> batch) {
+void GrRenderTargetOpList::addBatch(sk_sp<GrOp> batch) {
     this->recordBatch(batch.get(), batch->bounds());
 }
 
@@ -427,7 +427,7 @@
     // Currently this just inserts a discard batch. However, once in MDB this can remove all the
     // previously recorded batches and change the load op to discard.
     if (this->caps()->discardRenderTargetSupport()) {
-        GrBatch* batch = new GrDiscardBatch(renderTarget);
+        GrOp* batch = new GrDiscardBatch(renderTarget);
         this->recordBatch(batch, batch->bounds());
         batch->unref();
     }
@@ -439,7 +439,7 @@
                                        GrSurface* src,
                                        const SkIRect& srcRect,
                                        const SkIPoint& dstPoint) {
-    GrBatch* batch = GrCopySurfaceBatch::Create(dst, src, srcRect, dstPoint);
+    GrOp* batch = GrCopySurfaceBatch::Create(dst, src, srcRect, dstPoint);
     if (!batch) {
         return false;
     }
@@ -466,7 +466,7 @@
     out->fBottom = SkTMax(a.fBottom, b.fBottom);
 }
 
-GrBatch* GrRenderTargetOpList::recordBatch(GrBatch* batch, const SkRect& clippedBounds) {
+GrOp* GrRenderTargetOpList::recordBatch(GrOp* batch, const SkRect& clippedBounds) {
     // A closed GrOpList should never receive new/more batches
     SkASSERT(!this->isClosed());
 
@@ -475,31 +475,31 @@
     // 2) intersect with something
     // 3) find a 'blocker'
     GR_AUDIT_TRAIL_ADDBATCH(fAuditTrail, batch);
-    GrBATCH_INFO("Re-Recording (%s, B%u)\n"
-        "\tBounds LRTB (%f, %f, %f, %f)\n",
-        batch->name(),
-        batch->uniqueID(),
-        batch->bounds().fLeft, batch->bounds().fRight,
-        batch->bounds().fTop, batch->bounds().fBottom);
-    GrBATCH_INFO(SkTabString(batch->dumpInfo(), 1).c_str());
-    GrBATCH_INFO("\tClipped Bounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n",
-                 clippedBounds.fLeft, clippedBounds.fTop, clippedBounds.fRight,
-                 clippedBounds.fBottom);
-    GrBATCH_INFO("\tOutcome:\n");
+    GrOP_INFO("Re-Recording (%s, B%u)\n"
+              "\tBounds LRTB (%f, %f, %f, %f)\n",
+               batch->name(),
+               batch->uniqueID(),
+               batch->bounds().fLeft, batch->bounds().fRight,
+               batch->bounds().fTop, batch->bounds().fBottom);
+    GrOP_INFO(SkTabString(batch->dumpInfo(), 1).c_str());
+    GrOP_INFO("\tClipped Bounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n",
+              clippedBounds.fLeft, clippedBounds.fTop, clippedBounds.fRight,
+              clippedBounds.fBottom);
+    GrOP_INFO("\tOutcome:\n");
     int maxCandidates = SkTMin(fMaxBatchLookback, fRecordedBatches.count());
     if (maxCandidates) {
         int i = 0;
         while (true) {
-            GrBatch* candidate = fRecordedBatches.fromBack(i).fBatch.get();
+            GrOp* candidate = fRecordedBatches.fromBack(i).fBatch.get();
             // We cannot continue to search backwards if the render target changes
             if (candidate->renderTargetUniqueID() != batch->renderTargetUniqueID()) {
-                GrBATCH_INFO("\t\tBreaking because of (%s, B%u) Rendertarget\n",
-                    candidate->name(), candidate->uniqueID());
+                GrOP_INFO("\t\tBreaking because of (%s, B%u) Rendertarget\n",
+                          candidate->name(), candidate->uniqueID());
                 break;
             }
             if (candidate->combineIfPossible(batch, *this->caps())) {
-                GrBATCH_INFO("\t\tCombining with (%s, B%u)\n", candidate->name(),
-                    candidate->uniqueID());
+                GrOP_INFO("\t\tCombining with (%s, B%u)\n", candidate->name(),
+                          candidate->uniqueID());
                 GR_AUDIT_TRAIL_BATCHING_RESULT_COMBINED(fAuditTrail, candidate, batch);
                 join(&fRecordedBatches.fromBack(i).fClippedBounds,
                      fRecordedBatches.fromBack(i).fClippedBounds, clippedBounds);
@@ -508,18 +508,18 @@
             // Stop going backwards if we would cause a painter's order violation.
             const SkRect& candidateBounds = fRecordedBatches.fromBack(i).fClippedBounds;
             if (!can_reorder(candidateBounds, clippedBounds)) {
-                GrBATCH_INFO("\t\tIntersects with (%s, B%u)\n", candidate->name(),
-                    candidate->uniqueID());
+                GrOP_INFO("\t\tIntersects with (%s, B%u)\n", candidate->name(),
+                          candidate->uniqueID());
                 break;
             }
             ++i;
             if (i == maxCandidates) {
-                GrBATCH_INFO("\t\tReached max lookback or beginning of batch array %d\n", i);
+                GrOP_INFO("\t\tReached max lookback or beginning of batch array %d\n", i);
                 break;
             }
         }
     } else {
-        GrBATCH_INFO("\t\tFirstBatch\n");
+        GrOP_INFO("\t\tFirstBatch\n");
     }
     GR_AUDIT_TRAIL_BATCHING_RESULT_NEW(fAuditTrail, batch);
     fRecordedBatches.emplace_back(RecordedBatch{sk_ref_sp(batch), clippedBounds});
@@ -532,16 +532,16 @@
         return;
     }
     for (int i = 0; i < fRecordedBatches.count() - 2; ++i) {
-        GrBatch* batch = fRecordedBatches[i].fBatch.get();
+        GrOp* batch = fRecordedBatches[i].fBatch.get();
         const SkRect& batchBounds = fRecordedBatches[i].fClippedBounds;
         int maxCandidateIdx = SkTMin(i + fMaxBatchLookahead, fRecordedBatches.count() - 1);
         int j = i + 1;
         while (true) {
-            GrBatch* candidate = fRecordedBatches[j].fBatch.get();
+            GrOp* candidate = fRecordedBatches[j].fBatch.get();
             // We cannot continue to search if the render target changes
             if (candidate->renderTargetUniqueID() != batch->renderTargetUniqueID()) {
-                GrBATCH_INFO("\t\tBreaking because of (%s, B%u) Rendertarget\n",
-                             candidate->name(), candidate->uniqueID());
+                GrOP_INFO("\t\tBreaking because of (%s, B%u) Rendertarget\n",
+                          candidate->name(), candidate->uniqueID());
                 break;
             }
             if (j == i +1) {
@@ -549,8 +549,8 @@
                 // via backwards combining in recordBatch.
                 SkASSERT(!batch->combineIfPossible(candidate, *this->caps()));
             } else if (batch->combineIfPossible(candidate, *this->caps())) {
-                GrBATCH_INFO("\t\tCombining with (%s, B%u)\n", candidate->name(),
-                             candidate->uniqueID());
+                GrOP_INFO("\t\tCombining with (%s, B%u)\n", candidate->name(),
+                          candidate->uniqueID());
                 GR_AUDIT_TRAIL_BATCHING_RESULT_COMBINED(fAuditTrail, batch, candidate);
                 fRecordedBatches[j].fBatch = std::move(fRecordedBatches[i].fBatch);
                 join(&fRecordedBatches[j].fClippedBounds, fRecordedBatches[j].fClippedBounds,
@@ -560,13 +560,13 @@
             // Stop going traversing if we would cause a painter's order violation.
             const SkRect& candidateBounds = fRecordedBatches[j].fClippedBounds;
             if (!can_reorder(candidateBounds, batchBounds)) {
-                GrBATCH_INFO("\t\tIntersects with (%s, B%u)\n", candidate->name(),
-                             candidate->uniqueID());
+                GrOP_INFO("\t\tIntersects with (%s, B%u)\n", candidate->name(),
+                          candidate->uniqueID());
                 break;
             }
             ++j;
             if (j > maxCandidateIdx) {
-                GrBATCH_INFO("\t\tReached max lookahead or end of batch array %d\n", i);
+                GrOP_INFO("\t\tReached max lookahead or end of batch array %d\n", i);
                 break;
             }
         }
@@ -578,7 +578,7 @@
 void GrRenderTargetOpList::clearStencilClip(const GrFixedClip& clip,
                                             bool insideStencilMask,
                                             GrRenderTarget* rt) {
-    GrBatch* batch = new GrClearStencilClipBatch(clip, insideStencilMask, rt);
+    GrOp* batch = new GrClearStencilClipBatch(clip, insideStencilMask, rt);
     this->recordBatch(batch, batch->bounds());
     batch->unref();
 }
diff --git a/src/gpu/GrRenderTargetOpList.h b/src/gpu/GrRenderTargetOpList.h
index 2f4a347..aad1417 100644
--- a/src/gpu/GrRenderTargetOpList.h
+++ b/src/gpu/GrRenderTargetOpList.h
@@ -28,12 +28,12 @@
 #include "SkTypes.h"
 
 class GrAuditTrail;
-class GrBatch;
 class GrClearBatch;
 class GrClip;
 class GrCaps;
 class GrPath;
 class GrDrawPathBatchBase;
+class GrOp;
 class GrPipelineBuilder;
 class GrRenderTargetProxy;
 
@@ -84,7 +84,7 @@
 
     void drawBatch(const GrPipelineBuilder&, GrRenderTargetContext*, const GrClip&, GrDrawBatch*);
 
-    void addBatch(sk_sp<GrBatch>);
+    void addBatch(sk_sp<GrOp>);
 
     /**
      * Draws the path into user stencil bits. Upon return, all user stencil values
@@ -134,7 +134,7 @@
 
     // Returns the batch that the input batch was combined with or the input batch if it wasn't
     // combined.
-    GrBatch* recordBatch(GrBatch*, const SkRect& clippedBounds);
+    GrOp* recordBatch(GrOp*, const SkRect& clippedBounds);
     void forwardCombine();
 
     // Makes a copy of the dst if it is necessary for the draw. Returns false if a copy is required
@@ -151,7 +151,7 @@
     void clearStencilClip(const GrFixedClip&, bool insideStencilMask, GrRenderTarget*);
 
     struct RecordedBatch {
-        sk_sp<GrBatch> fBatch;
+        sk_sp<GrOp> fBatch;
         SkRect         fClippedBounds;
     };
     SkSTArray<256, RecordedBatch, true>             fRecordedBatches;
diff --git a/src/gpu/GrResourceProvider.h b/src/gpu/GrResourceProvider.h
index abcd699..70535e2 100644
--- a/src/gpu/GrResourceProvider.h
+++ b/src/gpu/GrResourceProvider.h
@@ -97,7 +97,7 @@
     enum Flags {
         /** If the caller intends to do direct reads/writes to/from the CPU then this flag must be
          *  set when accessing resources during a GrOpList flush. This includes the execution of
-         *  GrBatch objects. The reason is that these memory operations are done immediately and
+         *  GrOp objects. The reason is that these memory operations are done immediately and
          *  will occur out of order WRT the operations being flushed.
          *  Make this automatic: https://bug.skia.org/4156
          */
@@ -129,7 +129,7 @@
     }
 
     /**  Returns a GrBatchAtlas. This function can be called anywhere, but the returned atlas should
-     *   only be used inside of GrBatch::generateGeometry
+     *   only be used inside of GrOp::generateGeometry
      *   @param GrPixelConfig    The pixel config which this atlas will store
      *   @param width            width in pixels of the atlas
      *   @param height           height in pixels of the atlas
diff --git a/src/gpu/GrTextureOpList.cpp b/src/gpu/GrTextureOpList.cpp
index 974ee9b..ffffa0b 100644
--- a/src/gpu/GrTextureOpList.cpp
+++ b/src/gpu/GrTextureOpList.cpp
@@ -82,7 +82,7 @@
                                   GrSurface* src,
                                   const SkIRect& srcRect,
                                   const SkIPoint& dstPoint) {
-    GrBatch* batch = GrCopySurfaceBatch::Create(dst, src, srcRect, dstPoint);
+    GrOp* batch = GrCopySurfaceBatch::Create(dst, src, srcRect, dstPoint);
     if (!batch) {
         return false;
     }
@@ -95,18 +95,18 @@
     return true;
 }
 
-void GrTextureOpList::recordBatch(GrBatch* batch) {
+void GrTextureOpList::recordBatch(GrOp* batch) {
     // A closed GrOpList should never receive new/more batches
     SkASSERT(!this->isClosed());
 
     GR_AUDIT_TRAIL_ADDBATCH(fAuditTrail, batch);
-    GrBATCH_INFO("Re-Recording (%s, B%u)\n"
+    GrOP_INFO("Re-Recording (%s, B%u)\n"
         "\tBounds LRTB (%f, %f, %f, %f)\n",
         batch->name(),
         batch->uniqueID(),
         batch->bounds().fLeft, batch->bounds().fRight,
         batch->bounds().fTop, batch->bounds().fBottom);
-    GrBATCH_INFO(SkTabString(batch->dumpInfo(), 1).c_str());
+    GrOP_INFO(SkTabString(batch->dumpInfo(), 1).c_str());
     GR_AUDIT_TRAIL_BATCHING_RESULT_NEW(fAuditTrail, batch);
 
     fRecordedBatches.emplace_back(sk_ref_sp(batch));
diff --git a/src/gpu/GrTextureOpList.h b/src/gpu/GrTextureOpList.h
index 7674184..934c4b0 100644
--- a/src/gpu/GrTextureOpList.h
+++ b/src/gpu/GrTextureOpList.h
@@ -13,8 +13,8 @@
 #include "SkTArray.h"
 
 class GrAuditTrail;
-class GrBatch;
 class GrGpu;
+class GrOp;
 class GrTextureProxy;
 struct SkIPoint;
 struct SkIRect;
@@ -60,10 +60,10 @@
     SkDEBUGCODE(void dump() const override;)
 
 private:
-    void recordBatch(GrBatch*);
+    void recordBatch(GrOp*);
 
-    SkSTArray<2, sk_sp<GrBatch>, true> fRecordedBatches;
-    GrGpu*                             fGpu;
+    SkSTArray<2, sk_sp<GrOp>, true> fRecordedBatches;
+    GrGpu*                          fGpu;
 
     typedef GrOpList INHERITED;
 };
diff --git a/src/gpu/SkGrPriv.h b/src/gpu/SkGrPriv.h
index bb41aad..44ff8e7 100644
--- a/src/gpu/SkGrPriv.h
+++ b/src/gpu/SkGrPriv.h
@@ -67,7 +67,7 @@
                                    GrPaint* grPaint);
 
 /** Blends the SkPaint's shader (or color if no shader) with the color which specified via a
-    GrBatch's GrPrimitiveProcesssor. Currently there is a bool param to indicate whether the
+    GrOp's GrPrimitiveProcesssor. Currently there is a bool param to indicate whether the
     primitive color is the dst or src color to the blend in order to work around differences between
     drawVertices and drawAtlas. */
 bool SkPaintToGrPaintWithXfermode(GrContext* context,
diff --git a/src/gpu/batches/GrAAConvexPathRenderer.cpp b/src/gpu/batches/GrAAConvexPathRenderer.cpp
index 1be84c7..8f10050 100644
--- a/src/gpu/batches/GrAAConvexPathRenderer.cpp
+++ b/src/gpu/batches/GrAAConvexPathRenderer.cpp
@@ -734,7 +734,7 @@
 
 class AAConvexPathBatch : public GrVertexBatch {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
     AAConvexPathBatch(GrColor color, const SkMatrix& viewMatrix, const SkPath& path)
         : INHERITED(ClassID()) {
         fGeoData.emplace_back(Geometry{color, viewMatrix, path});
@@ -932,7 +932,7 @@
         }
     }
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+    bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
         AAConvexPathBatch* that = t->cast<AAConvexPathBatch>();
         if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
                                     that->bounds(), caps)) {
diff --git a/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp b/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp
index e911927..8ece0c9 100644
--- a/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp
+++ b/src/gpu/batches/GrAADistanceFieldPathRenderer.cpp
@@ -120,7 +120,7 @@
 
 class AADistanceFieldPathBatch : public GrVertexBatch {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     typedef GrAADistanceFieldPathRenderer::ShapeData ShapeData;
     typedef SkTDynamicHash<ShapeData, ShapeData::Key> ShapeCache;
@@ -480,7 +480,7 @@
     const SkMatrix& viewMatrix() const { return fBatch.fViewMatrix; }
     bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+    bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
         AADistanceFieldPathBatch* that = t->cast<AADistanceFieldPathBatch>();
         if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
                                     that->bounds(), caps)) {
diff --git a/src/gpu/batches/GrAAFillRectBatch.cpp b/src/gpu/batches/GrAAFillRectBatch.cpp
index 46a5d50..c8804da 100644
--- a/src/gpu/batches/GrAAFillRectBatch.cpp
+++ b/src/gpu/batches/GrAAFillRectBatch.cpp
@@ -159,7 +159,7 @@
 }
 class AAFillRectBatch : public GrVertexBatch {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     AAFillRectBatch(GrColor color,
                     const SkMatrix& viewMatrix,
@@ -266,7 +266,7 @@
         helper.recordDraw(target, gp.get());
     }
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+    bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
         AAFillRectBatch* that = t->cast<AAFillRectBatch>();
         if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
                                     that->bounds(), caps)) {
diff --git a/src/gpu/batches/GrAAFillRectBatch.h b/src/gpu/batches/GrAAFillRectBatch.h
index 1dbec99..b4fa232 100644
--- a/src/gpu/batches/GrAAFillRectBatch.h
+++ b/src/gpu/batches/GrAAFillRectBatch.h
@@ -10,9 +10,9 @@
 
 #include "GrColor.h"
 
-class GrBatch;
 class GrDrawBatch;
 class SkMatrix;
+class GrOp;
 struct SkRect;
 
 namespace GrAAFillRectBatch {
diff --git a/src/gpu/batches/GrAAHairLinePathRenderer.cpp b/src/gpu/batches/GrAAHairLinePathRenderer.cpp
index b2ad9ba..d7b3d69 100644
--- a/src/gpu/batches/GrAAHairLinePathRenderer.cpp
+++ b/src/gpu/batches/GrAAHairLinePathRenderer.cpp
@@ -677,7 +677,7 @@
 
 class AAHairlineBatch : public GrVertexBatch {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     AAHairlineBatch(GrColor color,
                     uint8_t coverage,
@@ -732,7 +732,7 @@
     typedef SkTArray<int, true> IntArray;
     typedef SkTArray<float, true> FloatArray;
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+    bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
         AAHairlineBatch* that = t->cast<AAHairlineBatch>();
 
         if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
diff --git a/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp b/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp
index f8516b9..10f1d72 100644
--- a/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp
+++ b/src/gpu/batches/GrAALinearizingConvexPathRenderer.cpp
@@ -125,7 +125,7 @@
 
 class AAFlatteningConvexPathBatch : public GrVertexBatch {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     AAFlatteningConvexPathBatch(GrColor color,
                                 const SkMatrix& viewMatrix,
@@ -285,7 +285,7 @@
         sk_free(indices);
     }
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+    bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
         AAFlatteningConvexPathBatch* that = t->cast<AAFlatteningConvexPathBatch>();
         if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
                                     that->bounds(), caps)) {
diff --git a/src/gpu/batches/GrAAStrokeRectBatch.cpp b/src/gpu/batches/GrAAStrokeRectBatch.cpp
index 7f87ad6..8f13adc 100644
--- a/src/gpu/batches/GrAAStrokeRectBatch.cpp
+++ b/src/gpu/batches/GrAAStrokeRectBatch.cpp
@@ -118,7 +118,7 @@
 
 class AAStrokeRectBatch : public GrVertexBatch {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     AAStrokeRectBatch(GrColor color, const SkMatrix& viewMatrix,
                       const SkRect& devOutside, const SkRect& devInside)
@@ -204,7 +204,7 @@
     const SkMatrix& viewMatrix() const { return fViewMatrix; }
     bool miterStroke() const { return fMiterStroke; }
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps&) override;
+    bool onCombineIfPossible(GrOp* t, const GrCaps&) override;
 
     void generateAAStrokeRectGeometry(void* vertices,
                                       size_t offset,
@@ -402,7 +402,7 @@
     }
 }
 
-bool AAStrokeRectBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
+bool AAStrokeRectBatch::onCombineIfPossible(GrOp* t, const GrCaps& caps) {
     AAStrokeRectBatch* that = t->cast<AAStrokeRectBatch>();
 
     if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
diff --git a/src/gpu/batches/GrAAStrokeRectBatch.h b/src/gpu/batches/GrAAStrokeRectBatch.h
index 964cc5b..73020d5 100644
--- a/src/gpu/batches/GrAAStrokeRectBatch.h
+++ b/src/gpu/batches/GrAAStrokeRectBatch.h
@@ -10,7 +10,6 @@
 
 #include "GrColor.h"
 
-class GrBatch;
 class GrDrawBatch;
 class GrResourceProvider;
 class SkMatrix;
diff --git a/src/gpu/batches/GrAnalyticRectBatch.cpp b/src/gpu/batches/GrAnalyticRectBatch.cpp
index 655644f..5e196cc 100644
--- a/src/gpu/batches/GrAnalyticRectBatch.cpp
+++ b/src/gpu/batches/GrAnalyticRectBatch.cpp
@@ -236,7 +236,7 @@
 
 class AnalyticRectBatch : public GrVertexBatch {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     AnalyticRectBatch(GrColor color, const SkMatrix& viewMatrix, const SkRect& rect,
                       const SkRect& croppedRect, const SkRect& bounds)
@@ -357,7 +357,7 @@
         helper.recordDraw(target, gp.get());
     }
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+    bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
         AnalyticRectBatch* that = t->cast<AnalyticRectBatch>();
         if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
                                     that->bounds(), caps)) {
diff --git a/src/gpu/batches/GrAtlasTextBatch.cpp b/src/gpu/batches/GrAtlasTextBatch.cpp
index cf4ca24..df81e2c 100644
--- a/src/gpu/batches/GrAtlasTextBatch.cpp
+++ b/src/gpu/batches/GrAtlasTextBatch.cpp
@@ -183,7 +183,7 @@
     flushInfo->fGlyphsToFlush = 0;
 }
 
-bool GrAtlasTextBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
+bool GrAtlasTextBatch::onCombineIfPossible(GrOp* t, const GrCaps& caps) {
     GrAtlasTextBatch* that = t->cast<GrAtlasTextBatch>();
     if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
                                 that->bounds(), caps)) {
diff --git a/src/gpu/batches/GrAtlasTextBatch.h b/src/gpu/batches/GrAtlasTextBatch.h
index b3b88df..ca715a7 100644
--- a/src/gpu/batches/GrAtlasTextBatch.h
+++ b/src/gpu/batches/GrAtlasTextBatch.h
@@ -15,7 +15,7 @@
 
 class GrAtlasTextBatch : public GrVertexBatch {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     static const int kVerticesPerGlyph = GrAtlasTextBlob::kVerticesPerGlyph;
     static const int kIndicesPerGlyph = 6;
@@ -152,7 +152,7 @@
     bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
     int numGlyphs() const { return fBatch.fNumGlyphs; }
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override;
+    bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override;
 
     // TODO just use class params
     // TODO trying to figure out why lcd is so whack
diff --git a/src/gpu/batches/GrClearBatch.h b/src/gpu/batches/GrClearBatch.h
index 24905d3..f187048 100644
--- a/src/gpu/batches/GrClearBatch.h
+++ b/src/gpu/batches/GrClearBatch.h
@@ -8,16 +8,16 @@
 #ifndef GrClearBatch_DEFINED
 #define GrClearBatch_DEFINED
 
-#include "GrBatch.h"
 #include "GrBatchFlushState.h"
 #include "GrFixedClip.h"
 #include "GrGpu.h"
 #include "GrGpuCommandBuffer.h"
+#include "GrOp.h"
 #include "GrRenderTarget.h"
 
-class GrClearBatch final : public GrBatch {
+class GrClearBatch final : public GrOp {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     static sk_sp<GrClearBatch> Make(const GrFixedClip& clip, GrColor color, GrRenderTarget* rt) {
         sk_sp<GrClearBatch> batch(new GrClearBatch(clip, color, rt));
@@ -68,7 +68,7 @@
         fRenderTarget.reset(rt);
     }
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+    bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
         // This could be much more complicated. Currently we look at cases where the new clear
         // contains the old clear, or when the new clear is a subset of the old clear and is the
         // same color.
@@ -105,7 +105,7 @@
     GrColor                                                 fColor;
     GrPendingIOResource<GrRenderTarget, kWrite_GrIOType>    fRenderTarget;
 
-    typedef GrBatch INHERITED;
+    typedef GrOp INHERITED;
 };
 
 #endif
diff --git a/src/gpu/batches/GrClearStencilClipBatch.h b/src/gpu/batches/GrClearStencilClipBatch.h
index 0f26173..87c12f0 100644
--- a/src/gpu/batches/GrClearStencilClipBatch.h
+++ b/src/gpu/batches/GrClearStencilClipBatch.h
@@ -8,16 +8,16 @@
 #ifndef GrClearStencilClipBatch_DEFINED
 #define GrClearStencilClipBatch_DEFINED
 
-#include "GrBatch.h"
 #include "GrBatchFlushState.h"
 #include "GrFixedClip.h"
 #include "GrGpu.h"
 #include "GrGpuCommandBuffer.h"
+#include "GrOp.h"
 #include "GrRenderTarget.h"
 
-class GrClearStencilClipBatch final : public GrBatch {
+class GrClearStencilClipBatch final : public GrOp {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     GrClearStencilClipBatch(const GrFixedClip& clip, bool insideStencilMask, GrRenderTarget* rt)
         : INHERITED(ClassID())
@@ -49,7 +49,7 @@
     }
 
 private:
-    bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { return false; }
+    bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override { return false; }
 
     void onPrepare(GrBatchFlushState*) override {}
 
@@ -61,7 +61,7 @@
     const bool                                              fInsideStencilMask;
     GrPendingIOResource<GrRenderTarget, kWrite_GrIOType>    fRenderTarget;
 
-    typedef GrBatch INHERITED;
+    typedef GrOp INHERITED;
 };
 
 #endif
diff --git a/src/gpu/batches/GrCopySurfaceBatch.cpp b/src/gpu/batches/GrCopySurfaceBatch.cpp
index 7246098..9aa46e6 100644
--- a/src/gpu/batches/GrCopySurfaceBatch.cpp
+++ b/src/gpu/batches/GrCopySurfaceBatch.cpp
@@ -59,7 +59,7 @@
     return !clippedSrcRect->isEmpty();
 }
 
-GrBatch* GrCopySurfaceBatch::Create(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+GrOp* GrCopySurfaceBatch::Create(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
                                     const SkIPoint& dstPoint) {
     SkASSERT(dst);
     SkASSERT(src);
diff --git a/src/gpu/batches/GrCopySurfaceBatch.h b/src/gpu/batches/GrCopySurfaceBatch.h
index 3d9fc78..a808b2f 100644
--- a/src/gpu/batches/GrCopySurfaceBatch.h
+++ b/src/gpu/batches/GrCopySurfaceBatch.h
@@ -8,14 +8,14 @@
 #ifndef GrCopySurfaceBatch_DEFINED
 #define GrCopySurfaceBatch_DEFINED
 
-#include "GrBatch.h"
 #include "GrBatchFlushState.h"
 #include "GrGpu.h"
+#include "GrOp.h"
 #include "GrRenderTarget.h"
 
-class GrCopySurfaceBatch final : public GrBatch {
+class GrCopySurfaceBatch final : public GrOp {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     /** This should not really be exposed as Create() will apply this clipping, but there is
      *  currently a workaround in GrContext::copySurface() for non-render target dsts that relies
@@ -27,7 +27,7 @@
                                        SkIRect* clippedSrcRect,
                                        SkIPoint* clippedDstPoint);
 
-    static GrBatch* Create(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+    static GrOp* Create(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
                            const SkIPoint& dstPoint);
 
     const char* name() const override { return "CopySurface"; }
@@ -64,7 +64,7 @@
         this->setBounds(bounds, HasAABloat::kNo, IsZeroArea::kNo);
     }
 
-    bool onCombineIfPossible(GrBatch* that, const GrCaps& caps) override { return false; }
+    bool onCombineIfPossible(GrOp* that, const GrCaps& caps) override { return false; }
 
     void onPrepare(GrBatchFlushState*) override {}
 
@@ -83,7 +83,7 @@
     SkIRect                                         fSrcRect;
     SkIPoint                                        fDstPoint;
 
-    typedef GrBatch INHERITED;
+    typedef GrOp INHERITED;
 };
 
 #endif
diff --git a/src/gpu/batches/GrDefaultPathRenderer.cpp b/src/gpu/batches/GrDefaultPathRenderer.cpp
index 21e2289..5faf81a 100644
--- a/src/gpu/batches/GrDefaultPathRenderer.cpp
+++ b/src/gpu/batches/GrDefaultPathRenderer.cpp
@@ -96,7 +96,7 @@
 
 class DefaultPathBatch : public GrVertexBatch {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     DefaultPathBatch(GrColor color, const SkPath& path, SkScalar tolerance,
                      uint8_t coverage, const SkMatrix& viewMatrix, bool isHairline,
@@ -268,7 +268,7 @@
         target->putBackVertices((size_t)(maxVertices - vertexOffset), (size_t)vertexStride);
     }
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+    bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
         DefaultPathBatch* that = t->cast<DefaultPathBatch>();
         if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
                                      that->bounds(), caps)) {
diff --git a/src/gpu/batches/GrDiscardBatch.h b/src/gpu/batches/GrDiscardBatch.h
index d2ebb46..a53feff 100644
--- a/src/gpu/batches/GrDiscardBatch.h
+++ b/src/gpu/batches/GrDiscardBatch.h
@@ -8,14 +8,14 @@
 #ifndef GrDiscardBatch_DEFINED
 #define GrDiscardBatch_DEFINED
 
-#include "GrBatch.h"
 #include "GrBatchFlushState.h"
 #include "GrGpu.h"
+#include "GrOp.h"
 #include "GrRenderTarget.h"
 
-class GrDiscardBatch final : public GrBatch {
+class GrDiscardBatch final : public GrOp {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     GrDiscardBatch(GrRenderTarget* rt)
         : INHERITED(ClassID())
@@ -39,7 +39,7 @@
     }
 
 private:
-    bool onCombineIfPossible(GrBatch* that, const GrCaps& caps) override {
+    bool onCombineIfPossible(GrOp* that, const GrCaps& caps) override {
         return this->renderTargetUniqueID() == that->renderTargetUniqueID();
     }
 
@@ -51,7 +51,7 @@
 
     GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget;
 
-    typedef GrBatch INHERITED;
+    typedef GrOp INHERITED;
 };
 
 #endif
diff --git a/src/gpu/batches/GrDrawAtlasBatch.cpp b/src/gpu/batches/GrDrawAtlasBatch.cpp
index 6f1bfed..95c8f2c 100644
--- a/src/gpu/batches/GrDrawAtlasBatch.cpp
+++ b/src/gpu/batches/GrDrawAtlasBatch.cpp
@@ -162,7 +162,7 @@
     this->setTransformedBounds(bounds, viewMatrix, HasAABloat::kNo, IsZeroArea::kNo);
 }
 
-bool GrDrawAtlasBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
+bool GrDrawAtlasBatch::onCombineIfPossible(GrOp* t, const GrCaps& caps) {
     GrDrawAtlasBatch* that = t->cast<GrDrawAtlasBatch>();
 
     if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
diff --git a/src/gpu/batches/GrDrawAtlasBatch.h b/src/gpu/batches/GrDrawAtlasBatch.h
index f8d88e0..449882c 100644
--- a/src/gpu/batches/GrDrawAtlasBatch.h
+++ b/src/gpu/batches/GrDrawAtlasBatch.h
@@ -14,7 +14,7 @@
 
 class GrDrawAtlasBatch : public GrVertexBatch {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     GrDrawAtlasBatch(GrColor color, const SkMatrix& viewMatrix, int spriteCount,
                      const SkRSXform* xforms, const SkRect* rects, const SkColor* colors);
@@ -55,7 +55,7 @@
     int quadCount() const { return fQuadCount; }
     bool coverageIgnored() const { return fCoverageIgnored; }
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps&) override;
+    bool onCombineIfPossible(GrOp* t, const GrCaps&) override;
 
     struct Geometry {
         GrColor                 fColor;
diff --git a/src/gpu/batches/GrDrawBatch.h b/src/gpu/batches/GrDrawBatch.h
index c92dca3..2184552 100644
--- a/src/gpu/batches/GrDrawBatch.h
+++ b/src/gpu/batches/GrDrawBatch.h
@@ -9,7 +9,7 @@
 #define GrDrawBatch_DEFINED
 
 #include <functional>
-#include "GrBatch.h"
+#include "GrOp.h"
 #include "GrPipeline.h"
 
 struct GrInitInvariantOutput;
@@ -44,7 +44,7 @@
 /**
  * Base class for GrBatches that draw. These batches have a GrPipeline installed by GrOpList.
  */
-class GrDrawBatch : public GrBatch {
+class GrDrawBatch : public GrOp {
 public:
     /** Method that performs an upload on behalf of a DeferredUploadFn. */
     using WritePixelsFn = std::function<bool(GrSurface* texture,
@@ -136,7 +136,7 @@
 private:
     SkAlignedSTStorage<1, GrPipeline>               fPipelineStorage;
     bool                                            fPipelineInstalled;
-    typedef GrBatch INHERITED;
+    typedef GrOp INHERITED;
 };
 
 #endif
diff --git a/src/gpu/batches/GrDrawPathBatch.cpp b/src/gpu/batches/GrDrawPathBatch.cpp
index fb458ec..8d0bc42 100644
--- a/src/gpu/batches/GrDrawPathBatch.cpp
+++ b/src/gpu/batches/GrDrawPathBatch.cpp
@@ -62,7 +62,7 @@
     this->setBounds(bounds, HasAABloat::kNo, IsZeroArea::kNo);
 }
 
-bool GrDrawPathRangeBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
+bool GrDrawPathRangeBatch::onCombineIfPossible(GrOp* t, const GrCaps& caps) {
     GrDrawPathRangeBatch* that = t->cast<GrDrawPathRangeBatch>();
     if (this->fPathRange.get() != that->fPathRange.get() ||
         this->transformType() != that->transformType() ||
diff --git a/src/gpu/batches/GrDrawPathBatch.h b/src/gpu/batches/GrDrawPathBatch.h
index 3a46f46..9de29e2 100644
--- a/src/gpu/batches/GrDrawPathBatch.h
+++ b/src/gpu/batches/GrDrawPathBatch.h
@@ -63,7 +63,7 @@
 
 class GrDrawPathBatch final : public GrDrawPathBatchBase {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     static GrDrawBatch* Create(const SkMatrix& viewMatrix, GrColor color, const GrPath* path) {
         return new GrDrawPathBatch(viewMatrix, color, path);
@@ -80,7 +80,7 @@
         this->setTransformedBounds(path->getBounds(), viewMatrix, HasAABloat::kNo, IsZeroArea::kNo);
     }
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { return false; }
+    bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override { return false; }
 
     void onDraw(GrBatchFlushState* state, const SkRect& bounds) override;
 
@@ -94,7 +94,7 @@
 public:
     typedef GrPathRendering::PathTransformType TransformType;
 
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     struct InstanceData : public SkNoncopyable {
     public:
@@ -173,7 +173,7 @@
 
     TransformType transformType() const { return fDraws.head()->fInstanceData->transformType(); }
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override;
+    bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override;
 
     void onDraw(GrBatchFlushState* state, const SkRect& bounds) override;
 
diff --git a/src/gpu/batches/GrDrawVerticesBatch.cpp b/src/gpu/batches/GrDrawVerticesBatch.cpp
index e565022..1d02320 100644
--- a/src/gpu/batches/GrDrawVerticesBatch.cpp
+++ b/src/gpu/batches/GrDrawVerticesBatch.cpp
@@ -171,7 +171,7 @@
     target->draw(gp.get(), mesh);
 }
 
-bool GrDrawVerticesBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
+bool GrDrawVerticesBatch::onCombineIfPossible(GrOp* t, const GrCaps& caps) {
     GrDrawVerticesBatch* that = t->cast<GrDrawVerticesBatch>();
 
     if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
diff --git a/src/gpu/batches/GrDrawVerticesBatch.h b/src/gpu/batches/GrDrawVerticesBatch.h
index d71fb0d..821321a 100644
--- a/src/gpu/batches/GrDrawVerticesBatch.h
+++ b/src/gpu/batches/GrDrawVerticesBatch.h
@@ -20,7 +20,7 @@
 
 class GrDrawVerticesBatch : public GrVertexBatch {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
 
     GrDrawVerticesBatch(GrColor color, GrPrimitiveType primitiveType,
@@ -55,7 +55,7 @@
                kPoints_GrPrimitiveType == fPrimitiveType;
     }
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps&) override;
+    bool onCombineIfPossible(GrOp* t, const GrCaps&) override;
 
     struct Mesh {
         GrColor fColor; // Only used if there are no per-vertex colors
diff --git a/src/gpu/batches/GrMSAAPathRenderer.cpp b/src/gpu/batches/GrMSAAPathRenderer.cpp
index ab98a15..77d4e6b 100644
--- a/src/gpu/batches/GrMSAAPathRenderer.cpp
+++ b/src/gpu/batches/GrMSAAPathRenderer.cpp
@@ -216,7 +216,7 @@
 
 class MSAAPathBatch : public GrVertexBatch {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     MSAAPathBatch(GrColor color, const SkPath& path, const SkMatrix& viewMatrix,
                   const SkRect& devBounds)
@@ -447,7 +447,7 @@
         }
     }
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+    bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
         MSAAPathBatch* that = t->cast<MSAAPathBatch>();
         if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
                                      that->bounds(), caps)) {
diff --git a/src/gpu/batches/GrNinePatch.cpp b/src/gpu/batches/GrNinePatch.cpp
index 2e1809d..7ca8d68 100644
--- a/src/gpu/batches/GrNinePatch.cpp
+++ b/src/gpu/batches/GrNinePatch.cpp
@@ -25,7 +25,7 @@
 
 class GrNonAANinePatchBatch : public GrVertexBatch {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     static const int kVertsPerRect = 4;
     static const int kIndicesPerRect = 6;
@@ -143,7 +143,7 @@
         fOverrides = overrides;
     }
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+    bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
         GrNonAANinePatchBatch* that = t->cast<GrNonAANinePatchBatch>();
         if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
                                     that->bounds(), caps)) {
diff --git a/src/gpu/batches/GrNonAAFillRectBatch.cpp b/src/gpu/batches/GrNonAAFillRectBatch.cpp
index 0e66134..8f45a8f 100644
--- a/src/gpu/batches/GrNonAAFillRectBatch.cpp
+++ b/src/gpu/batches/GrNonAAFillRectBatch.cpp
@@ -73,7 +73,7 @@
 
 class NonAAFillRectBatch : public GrVertexBatch {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     NonAAFillRectBatch(GrColor color, const SkMatrix& viewMatrix, const SkRect& rect,
                        const SkRect* localRect, const SkMatrix* localMatrix)
@@ -159,7 +159,7 @@
         helper.recordDraw(target, gp.get());
     }
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+    bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
         NonAAFillRectBatch* that = t->cast<NonAAFillRectBatch>();
         if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
                                     that->bounds(), caps)) {
diff --git a/src/gpu/batches/GrNonAAFillRectPerspectiveBatch.cpp b/src/gpu/batches/GrNonAAFillRectPerspectiveBatch.cpp
index 9a946c8..2dcd3e2 100644
--- a/src/gpu/batches/GrNonAAFillRectPerspectiveBatch.cpp
+++ b/src/gpu/batches/GrNonAAFillRectPerspectiveBatch.cpp
@@ -92,7 +92,7 @@
 // We handle perspective in the local matrix or viewmatrix with special batches
 class GrNonAAFillRectPerspectiveBatch : public GrVertexBatch {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     GrNonAAFillRectPerspectiveBatch(GrColor color, const SkMatrix& viewMatrix, const SkRect& rect,
                                     const SkRect* localRect, const SkMatrix* localMatrix)
@@ -187,7 +187,7 @@
         helper.recordDraw(target, gp.get());
     }
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+    bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
         GrNonAAFillRectPerspectiveBatch* that = t->cast<GrNonAAFillRectPerspectiveBatch>();
         if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
                                     that->bounds(), caps)) {
diff --git a/src/gpu/batches/GrNonAAStrokeRectBatch.cpp b/src/gpu/batches/GrNonAAStrokeRectBatch.cpp
index 9139dab..2cc1a7d 100644
--- a/src/gpu/batches/GrNonAAStrokeRectBatch.cpp
+++ b/src/gpu/batches/GrNonAAStrokeRectBatch.cpp
@@ -47,7 +47,7 @@
 
 class NonAAStrokeRectBatch : public GrVertexBatch {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     const char* name() const override { return "NonAAStrokeRectBatch"; }
 
@@ -167,7 +167,7 @@
         fOverrides = overrides;
     }
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps&) override {
+    bool onCombineIfPossible(GrOp* t, const GrCaps&) override {
         // NonAA stroke rects cannot batch right now
         // TODO make these batchable
         return false;
diff --git a/src/gpu/batches/GrBatch.cpp b/src/gpu/batches/GrOp.cpp
similarity index 66%
rename from src/gpu/batches/GrBatch.cpp
rename to src/gpu/batches/GrOp.cpp
index 6755cf9..1d86419 100644
--- a/src/gpu/batches/GrBatch.cpp
+++ b/src/gpu/batches/GrOp.cpp
@@ -5,14 +5,14 @@
  * found in the LICENSE file.
  */
 
-#include "GrBatch.h"
+#include "GrOp.h"
 
 #include "GrMemoryPool.h"
 #include "SkSpinlock.h"
 
-// TODO I noticed a small benefit to using a larger exclusive pool for batches.  Its very small,
-// but seems to be mostly consistent.  There is a lot in flux right now, but we should really
-// revisit this when batch is everywhere
+// TODO I noticed a small benefit to using a larger exclusive pool for ops. Its very small, but
+// seems to be mostly consistent.  There is a lot in flux right now, but we should really revisit
+// this.
 
 
 // We use a global pool protected by a mutex(spinlock). Chrome may use the same GrContext on
@@ -20,7 +20,7 @@
 // memory barrier between accesses of a context on different threads. Also, there may be multiple
 // GrContexts and those contexts may be in use concurrently on different threads.
 namespace {
-static SkSpinlock gBatchSpinlock;
+static SkSpinlock gOpPoolSpinLock;
 class MemoryPoolAccessor {
 public:
 
@@ -29,8 +29,8 @@
     MemoryPoolAccessor() {}
     ~MemoryPoolAccessor() {}
 #else
-    MemoryPoolAccessor() { gBatchSpinlock.acquire(); }
-    ~MemoryPoolAccessor() { gBatchSpinlock.release(); }
+    MemoryPoolAccessor() { gOpPoolSpinLock.acquire(); }
+    ~MemoryPoolAccessor() { gOpPoolSpinLock.release(); }
 #endif
 
     GrMemoryPool* pool() const {
@@ -40,24 +40,24 @@
 };
 }
 
-int32_t GrBatch::gCurrBatchClassID = GrBatch::kIllegalBatchID;
+int32_t GrOp::gCurrOpClassID = GrOp::kIllegalOpID;
 
-int32_t GrBatch::gCurrBatchUniqueID = GrBatch::kIllegalBatchID;
+int32_t GrOp::gCurrOpUniqueID = GrOp::kIllegalOpID;
 
-void* GrBatch::operator new(size_t size) {
+void* GrOp::operator new(size_t size) {
     return MemoryPoolAccessor().pool()->allocate(size);
 }
 
-void GrBatch::operator delete(void* target) {
+void GrOp::operator delete(void* target) {
     return MemoryPoolAccessor().pool()->release(target);
 }
 
-GrBatch::GrBatch(uint32_t classID)
+GrOp::GrOp(uint32_t classID)
     : fClassID(classID)
-    , fUniqueID(kIllegalBatchID) {
+    , fUniqueID(kIllegalOpID) {
     SkASSERT(classID == SkToU32(fClassID));
     SkDEBUGCODE(fUsed = false;)
     SkDEBUGCODE(fBoundsFlags = kUninitialized_BoundsFlag);
 }
 
-GrBatch::~GrBatch() {}
+GrOp::~GrOp() {}
diff --git a/src/gpu/batches/GrBatch.h b/src/gpu/batches/GrOp.h
similarity index 66%
rename from src/gpu/batches/GrBatch.h
rename to src/gpu/batches/GrOp.h
index e4065ec..79fbfa4 100644
--- a/src/gpu/batches/GrBatch.h
+++ b/src/gpu/batches/GrOp.h
@@ -22,48 +22,47 @@
 class GrBatchFlushState;
 
 /**
- * GrBatch is the base class for all Ganesh deferred geometry generators.  To facilitate
- * reorderable batching, Ganesh does not generate geometry inline with draw calls.  Instead, it
- * captures the arguments to the draw and then generates the geometry on demand.  This gives GrBatch
- * subclasses complete freedom to decide how / what they can batch.
+ * GrOp is the base class for all Ganesh deferred GPU operations. To facilitate reorderable
+ * batching, Ganesh does not generate geometry inline with draw calls. Instead, it captures the
+ * arguments to the draw and then generates the geometry on demand. This gives GrOp subclasses
+ * complete freedom to decide how/what they can batch.
  *
- * Batches are created when GrContext processes a draw call. Batches of the same  subclass may be
- * merged using combineIfPossible. When two batches merge, one takes on the union of the data
- * and the other is left empty. The merged batch becomes responsible for drawing the data from both
- * the original batches.
+ * Ops of the same subclass may be merged using combineIfPossible. When two ops merge, one
+ * takes on the union of the data and the other is left empty. The merged op becomes responsible
+ * for drawing the data from both the original ops.
  *
  * If there are any possible optimizations which might require knowing more about the full state of
- * the draw, ie whether or not the GrBatch is allowed to tweak alpha for coverage, then this
- * information will be communicated to the GrBatch prior to geometry generation.
+ * the draw, e.g. whether or not the GrOp is allowed to tweak alpha for coverage, then this
+ * information will be communicated to the GrOp prior to geometry generation.
  *
- * The bounds of the batch must contain all the vertices in device space *irrespective* of the clip.
+ * The bounds of the op must contain all the vertices in device space *irrespective* of the clip.
  * The bounds are used in determining which clip elements must be applied and thus the bounds cannot
  * in turn depend upon the clip.
  */
-#define GR_BATCH_SPEW 0
-#if GR_BATCH_SPEW
-    #define GrBATCH_INFO(...) SkDebugf(__VA_ARGS__)
-    #define GrBATCH_SPEW(code) code
+#define GR_OP_SPEW 0
+#if GR_OP_SPEW
+    #define GrOP_SPEW(code) code
+    #define GrOP_INFO(...) SkDebugf(__VA_ARGS__)
 #else
-    #define GrBATCH_SPEW(code)
-    #define GrBATCH_INFO(...)
+    #define GrOP_SPEW(code)
+    #define GrOP_INFO(...)
 #endif
 
 // A helper macro to generate a class static id
-#define DEFINE_BATCH_CLASS_ID \
+#define DEFINE_OP_CLASS_ID \
     static uint32_t ClassID() { \
-        static uint32_t kClassID = GenBatchClassID(); \
+        static uint32_t kClassID = GenOpClassID(); \
         return kClassID; \
     }
 
-class GrBatch : public GrNonAtomicRef<GrBatch> {
+class GrOp : public GrNonAtomicRef<GrOp> {
 public:
-    GrBatch(uint32_t classID);
-    virtual ~GrBatch();
+    GrOp(uint32_t classID);
+    virtual ~GrOp();
 
     virtual const char* name() const = 0;
 
-    bool combineIfPossible(GrBatch* that, const GrCaps& caps) {
+    bool combineIfPossible(GrOp* that, const GrCaps& caps) {
         if (this->classID() != that->classID()) {
             return false;
         }
@@ -97,7 +96,7 @@
     }
 
     /**
-     * Helper for safely down-casting to a GrBatch subclass
+     * Helper for safely down-casting to a GrOp subclass
      */
     template <typename T> const T& cast() const {
         SkASSERT(T::ClassID() == this->classID());
@@ -109,40 +108,40 @@
         return static_cast<T*>(this);
     }
 
-    uint32_t classID() const { SkASSERT(kIllegalBatchID != fClassID); return fClassID; }
+    uint32_t classID() const { SkASSERT(kIllegalOpID != fClassID); return fClassID; }
 
     // We lazily initialize the uniqueID because currently the only user is GrAuditTrail
     uint32_t uniqueID() const {
-        if (kIllegalBatchID == fUniqueID) {
-            fUniqueID = GenBatchID();
+        if (kIllegalOpID == fUniqueID) {
+            fUniqueID = GenOpID();
         }
         return fUniqueID;
     }
     SkDEBUGCODE(bool isUsed() const { return fUsed; })
 
-    /** Called prior to drawing. The batch should perform any resource creation necessary to
+    /** Called prior to drawing. The op should perform any resource creation necessary to
         to quickly issue its draw when draw is called. */
     void prepare(GrBatchFlushState* state) { this->onPrepare(state); }
 
-    /** Issues the batches commands to GrGpu. */
+    /** Issues the op's commands to GrGpu. */
     void draw(GrBatchFlushState* state, const SkRect& bounds) { this->onDraw(state, bounds); }
 
     /** Used to block batching across render target changes. Remove this once we store
-        GrBatches for different RTs in different targets. */
+        GrOps for different RTs in different targets. */
     // TODO: this needs to be updated to return GrSurfaceProxy::UniqueID
     virtual GrGpuResource::UniqueID renderTargetUniqueID() const = 0;
 
-    /** Used for spewing information about batches when debugging. */
+    /** Used for spewing information about ops when debugging. */
     virtual SkString dumpInfo() const {
         SkString string;
-        string.appendf("BatchBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n",
+        string.appendf("OpBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n",
                        fBounds.fLeft, fBounds.fTop, fBounds.fRight, fBounds.fBottom);
         return string;
     }
 
 protected:
     /**
-     * Indicates that the batch will produce geometry that extends beyond its bounds for the
+     * Indicates that the op will produce geometry that extends beyond its bounds for the
      * purpose of ensuring that the fragment shader runs on partially covered pixels for
      * non-MSAA antialiasing.
      */
@@ -151,8 +150,8 @@
         kNo
     };
     /**
-     * Indicates that the geometry represented by the batch has zero area (i.e. it is hairline
-     * or points).
+     * Indicates that the geometry represented by the op has zero area (e.g. it is hairline or
+     * points).
      */
     enum class IsZeroArea {
         kYes,
@@ -168,7 +167,7 @@
         this->setBoundsFlags(aabloat, zeroArea);
     }
 
-    void joinBounds(const GrBatch& that) {
+    void joinBounds(const GrOp& that) {
         if (that.hasAABloat()) {
             fBoundsFlags |= kAABloat_BoundsFlag;
         }
@@ -178,15 +177,15 @@
         return fBounds.joinPossiblyEmptyRect(that.fBounds);
     }
 
-    void replaceBounds(const GrBatch& that) {
+    void replaceBounds(const GrOp& that) {
         fBounds = that.fBounds;
         fBoundsFlags = that.fBoundsFlags;
     }
 
-    static uint32_t GenBatchClassID() { return GenID(&gCurrBatchClassID); }
+    static uint32_t GenOpClassID() { return GenID(&gCurrOpClassID); }
 
 private:
-    virtual bool onCombineIfPossible(GrBatch*, const GrCaps& caps) = 0;
+    virtual bool onCombineIfPossible(GrOp*, const GrCaps& caps) = 0;
 
     virtual void onPrepare(GrBatchFlushState*) = 0;
     virtual void onDraw(GrBatchFlushState*, const SkRect& bounds) = 0;
@@ -196,7 +195,7 @@
         // 1 to the returned value.
         uint32_t id = static_cast<uint32_t>(sk_atomic_inc(idCounter)) + 1;
         if (!id) {
-            SkFAIL("This should never wrap as it should only be called once for each GrBatch "
+            SkFAIL("This should never wrap as it should only be called once for each GrOp "
                    "subclass.");
         }
         return id;
@@ -209,7 +208,7 @@
     }
 
     enum {
-        kIllegalBatchID = 0,
+        kIllegalOpID = 0,
     };
 
     enum BoundsFlags {
@@ -222,12 +221,12 @@
     const uint16_t                      fClassID;
     uint16_t                            fBoundsFlags;
 
-    static uint32_t GenBatchID() { return GenID(&gCurrBatchUniqueID); }
+    static uint32_t GenOpID() { return GenID(&gCurrOpUniqueID); }
     mutable uint32_t                    fUniqueID;
     SkRect                              fBounds;
 
-    static int32_t                      gCurrBatchUniqueID;
-    static int32_t                      gCurrBatchClassID;
+    static int32_t                      gCurrOpUniqueID;
+    static int32_t                      gCurrOpClassID;
 };
 
 #endif
diff --git a/src/gpu/batches/GrPLSPathRenderer.cpp b/src/gpu/batches/GrPLSPathRenderer.cpp
index f31c323..924e2a8 100644
--- a/src/gpu/batches/GrPLSPathRenderer.cpp
+++ b/src/gpu/batches/GrPLSPathRenderer.cpp
@@ -765,7 +765,7 @@
 
 class PLSPathBatch : public GrVertexBatch {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
     PLSPathBatch(GrColor color, const SkPath& path, const SkMatrix& viewMatrix)
             : INHERITED(ClassID())
             , fColor(color)
@@ -915,7 +915,7 @@
     }
 
 private:
-    bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+    bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
         return false;
     }
 
diff --git a/src/gpu/batches/GrRectBatchFactory.h b/src/gpu/batches/GrRectBatchFactory.h
index c9b6843..5ae1934 100644
--- a/src/gpu/batches/GrRectBatchFactory.h
+++ b/src/gpu/batches/GrRectBatchFactory.h
@@ -17,7 +17,7 @@
 #include "GrPaint.h"
 #include "SkMatrix.h"
 
-class GrBatch;
+class GrOp;
 struct SkRect;
 class SkStrokeRec;
 
diff --git a/src/gpu/batches/GrRegionBatch.cpp b/src/gpu/batches/GrRegionBatch.cpp
index ae09a5f..e1eaf28 100644
--- a/src/gpu/batches/GrRegionBatch.cpp
+++ b/src/gpu/batches/GrRegionBatch.cpp
@@ -52,7 +52,7 @@
 
 class RegionBatch : public GrVertexBatch {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     RegionBatch(GrColor color, const SkMatrix& viewMatrix, const SkRegion& region)
             : INHERITED(ClassID())
@@ -130,7 +130,7 @@
         helper.recordDraw(target, gp.get());
     }
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+    bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
         RegionBatch* that = t->cast<RegionBatch>();
         if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
                                     that->bounds(), caps)) {
diff --git a/src/gpu/batches/GrShadowRRectBatch.cpp b/src/gpu/batches/GrShadowRRectBatch.cpp
index c4b56b8..6ae6e62 100755
--- a/src/gpu/batches/GrShadowRRectBatch.cpp
+++ b/src/gpu/batches/GrShadowRRectBatch.cpp
@@ -61,7 +61,7 @@
 
 class ShadowCircleBatch : public GrVertexBatch {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     static GrDrawBatch* Create(GrColor color, const SkMatrix& viewMatrix, SkPoint center,
                                SkScalar radius, SkScalar blurRadius, const GrStyle& style) {
@@ -367,7 +367,7 @@
         target->draw(gp.get(), mesh);
     }
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+    bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
         ShadowCircleBatch* that = t->cast<ShadowCircleBatch>();
         if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
                                     that->bounds(), caps)) {
@@ -506,7 +506,7 @@
 
 class ShadowCircularRRectBatch : public GrVertexBatch {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     // A devStrokeWidth <= 0 indicates a fill only. If devStrokeWidth > 0 then strokeOnly indicates
     // whether the rrect is only stroked or stroked and filled.
@@ -796,7 +796,7 @@
         target->draw(gp.get(), mesh);
     }
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+    bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
         ShadowCircularRRectBatch* that = t->cast<ShadowCircularRRectBatch>();
         if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
                                     that->bounds(), caps)) {
diff --git a/src/gpu/batches/GrStencilPathBatch.h b/src/gpu/batches/GrStencilPathBatch.h
index b95c75a..293da12 100644
--- a/src/gpu/batches/GrStencilPathBatch.h
+++ b/src/gpu/batches/GrStencilPathBatch.h
@@ -8,18 +8,18 @@
 #ifndef GrStencilPathBatch_DEFINED
 #define GrStencilPathBatch_DEFINED
 
-#include "GrBatch.h"
 #include "GrBatchFlushState.h"
 #include "GrGpu.h"
+#include "GrOp.h"
 #include "GrPath.h"
 #include "GrPathRendering.h"
 #include "GrRenderTarget.h"
 
-class GrStencilPathBatch final : public GrBatch {
+class GrStencilPathBatch final : public GrOp {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
-    static GrBatch* Create(const SkMatrix& viewMatrix,
+    static GrOp* Create(const SkMatrix& viewMatrix,
                            bool useHWAA,
                            GrPathRendering::FillType fillType,
                            bool hasStencilClip,
@@ -64,7 +64,7 @@
         this->setBounds(path->getBounds(), HasAABloat::kNo, IsZeroArea::kNo);
     }
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override { return false; }
+    bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override { return false; }
 
     void onPrepare(GrBatchFlushState*) override {}
 
@@ -81,7 +81,7 @@
     GrPendingIOResource<GrRenderTarget, kWrite_GrIOType>    fRenderTarget;
     GrPendingIOResource<const GrPath, kRead_GrIOType>       fPath;
 
-    typedef GrBatch INHERITED;
+    typedef GrOp INHERITED;
 };
 
 #endif
diff --git a/src/gpu/batches/GrTessellatingPathRenderer.cpp b/src/gpu/batches/GrTessellatingPathRenderer.cpp
index 970af5c..46dd2f7 100644
--- a/src/gpu/batches/GrTessellatingPathRenderer.cpp
+++ b/src/gpu/batches/GrTessellatingPathRenderer.cpp
@@ -162,7 +162,7 @@
 
 class TessellatingPathBatch : public GrVertexBatch {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     static GrDrawBatch* Create(const GrColor& color,
                                const GrShape& shape,
@@ -324,7 +324,7 @@
         target->draw(gp, mesh);
     }
 
-    bool onCombineIfPossible(GrBatch*, const GrCaps&) override { return false; }
+    bool onCombineIfPossible(GrOp*, const GrCaps&) override { return false; }
 
     TessellatingPathBatch(const GrColor& color,
                           const GrShape& shape,
diff --git a/src/gpu/batches/GrTestBatch.h b/src/gpu/batches/GrTestBatch.h
index 5bac48a..9d76c43 100644
--- a/src/gpu/batches/GrTestBatch.h
+++ b/src/gpu/batches/GrTestBatch.h
@@ -55,9 +55,7 @@
     const Optimizations optimizations() const { return fOptimizations; }
 
 private:
-    bool onCombineIfPossible(GrBatch* t, const GrCaps&) override {
-        return false;
-    }
+    bool onCombineIfPossible(GrOp*, const GrCaps&) override { return false; }
 
     GrColor       fColor;
     Optimizations fOptimizations;
diff --git a/src/gpu/effects/GrDashingEffect.cpp b/src/gpu/effects/GrDashingEffect.cpp
index d9cb72f..43a28ec 100644
--- a/src/gpu/effects/GrDashingEffect.cpp
+++ b/src/gpu/effects/GrDashingEffect.cpp
@@ -240,7 +240,7 @@
 
 class DashBatch : public GrVertexBatch {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
     struct Geometry {
         SkMatrix fViewMatrix;
         SkMatrix fSrcRotInv;
@@ -646,7 +646,7 @@
         helper.recordDraw(target, gp.get());
     }
 
-    bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+    bool onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
         DashBatch* that = t->cast<DashBatch>();
         if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *that->pipeline(),
                                     that->bounds(), caps)) {
diff --git a/src/gpu/instanced/GLInstancedRendering.cpp b/src/gpu/instanced/GLInstancedRendering.cpp
index 49b059d..d680189 100644
--- a/src/gpu/instanced/GLInstancedRendering.cpp
+++ b/src/gpu/instanced/GLInstancedRendering.cpp
@@ -17,7 +17,7 @@
 
 class GLInstancedRendering::GLBatch : public InstancedRendering::Batch {
 public:
-    DEFINE_BATCH_CLASS_ID
+    DEFINE_OP_CLASS_ID
 
     GLBatch(GLInstancedRendering* instRendering) : INHERITED(ClassID(), instRendering) {}
     int numGLCommands() const { return 1 + fNumChangesInGeometry; }
diff --git a/src/gpu/instanced/InstancedRendering.cpp b/src/gpu/instanced/InstancedRendering.cpp
index 99a749a..a46f24b 100644
--- a/src/gpu/instanced/InstancedRendering.cpp
+++ b/src/gpu/instanced/InstancedRendering.cpp
@@ -382,7 +382,7 @@
     fIsTracked = true;
 }
 
-bool InstancedRendering::Batch::onCombineIfPossible(GrBatch* other, const GrCaps& caps) {
+bool InstancedRendering::Batch::onCombineIfPossible(GrOp* other, const GrCaps& caps) {
     Batch* that = static_cast<Batch*>(other);
     SkASSERT(fInstancedRendering == that->fInstancedRendering);
     SkASSERT(fTailDraw);
diff --git a/src/gpu/instanced/InstancedRendering.h b/src/gpu/instanced/InstancedRendering.h
index faa5471..c94ca8a 100644
--- a/src/gpu/instanced/InstancedRendering.h
+++ b/src/gpu/instanced/InstancedRendering.h
@@ -137,7 +137,7 @@
         Batch(uint32_t classID, InstancedRendering* ir);
 
         void initBatchTracker(const GrXPOverridesForBatch&) override;
-        bool onCombineIfPossible(GrBatch* other, const GrCaps& caps) override;
+        bool onCombineIfPossible(GrOp* other, const GrCaps& caps) override;
 
         void computePipelineOptimizations(GrInitInvariantOutput* color,
                                           GrInitInvariantOutput* coverage,