GrBatchPrototype

BUG=skia:

Committed: https://skia.googlesource.com/skia/+/d15e4e45374275c045572b304c229237c4a82be4

Committed: https://skia.googlesource.com/skia/+/d5a7db4a867c7e6ccf8451a053d987b470099198

Review URL: https://codereview.chromium.org/845103005
diff --git a/src/gpu/GrInOrderDrawBuffer.cpp b/src/gpu/GrInOrderDrawBuffer.cpp
index 70d61ec..a760dcb 100644
--- a/src/gpu/GrInOrderDrawBuffer.cpp
+++ b/src/gpu/GrInOrderDrawBuffer.cpp
@@ -7,6 +7,7 @@
 
 #include "GrInOrderDrawBuffer.h"
 
+#include "GrBufferAllocPool.h"
 #include "GrDefaultGeoProcFactory.h"
 #include "GrDrawTargetCaps.h"
 #include "GrGpu.h"
@@ -20,7 +21,9 @@
     : INHERITED(gpu, vertexPool, indexPool)
     , fCmdBuffer(kCmdBufferInitialSizeInBytes)
     , fPrevState(NULL)
-    , fDrawID(0) {
+    , fDrawID(0)
+    , fBatchTarget(gpu, vertexPool, indexPool)
+    , fDrawBatch(NULL) {
 
     SkASSERT(vertexPool);
     SkASSERT(indexPool);
@@ -210,6 +213,7 @@
     Draw* draw = static_cast<Draw*>(&fCmdBuffer.back());
 
     if (!draw->fInfo.isInstanced() ||
+        draw->fInfo.primitiveType() != info.primitiveType() ||
         draw->fInfo.verticesPerInstance() != info.verticesPerInstance() ||
         draw->fInfo.indicesPerInstance() != info.indicesPerInstance() ||
         draw->fInfo.vertexBuffer() != info.vertexBuffer() ||
@@ -247,6 +251,9 @@
                                  const GrDeviceCoordTexture* dstCopy) {
     SkASSERT(info.vertexBuffer() && (!info.isIndexed() || info.indexBuffer()));
 
+    // This closeBatch call is required because we may introduce new draws when we setup clip
+    this->closeBatch();
+
     if (!this->recordStateAndShouldDraw(pipelineBuilder, gp, scissorState, dstCopy)) {
         return;
     }
@@ -266,6 +273,30 @@
     this->recordTraceMarkersIfNecessary();
 }
 
+void GrInOrderDrawBuffer::onDrawBatch(GrBatch* batch,
+                                      const GrPipelineBuilder& pipelineBuilder,
+                                      const GrScissorState& scissorState,
+                                      const GrDeviceCoordTexture* dstCopy) {
+    if (!this->recordStateAndShouldDraw(batch, pipelineBuilder, scissorState, dstCopy)) {
+        return;
+    }
+
+    // Check if there is a Batch Draw we can batch with
+    if (kDrawBatch_Cmd != strip_trace_bit(fCmdBuffer.back().fType)) {
+        fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch));
+        return;
+    }
+
+    DrawBatch* draw = static_cast<DrawBatch*>(&fCmdBuffer.back());
+    if (draw->fBatch->combineIfPossible(batch)) {
+        return;
+    } else {
+        this->closeBatch();
+        fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch));
+    }
+    this->recordTraceMarkersIfNecessary();
+}
+
 void GrInOrderDrawBuffer::onStencilPath(const GrPipelineBuilder& pipelineBuilder,
                                         const GrPathProcessor* pathProc,
                                         const GrPath* path,
@@ -286,6 +317,8 @@
                                      const GrScissorState& scissorState,
                                      const GrStencilSettings& stencilSettings,
                                      const GrDeviceCoordTexture* dstCopy) {
+    this->closeBatch();
+
     // TODO: Only compare the subset of GrPipelineBuilder relevant to path covering?
     if (!this->recordStateAndShouldDraw(pipelineBuilder, pathProc, scissorState, dstCopy)) {
         return;
@@ -310,6 +343,8 @@
     SkASSERT(indices);
     SkASSERT(transformValues);
 
+    this->closeBatch();
+
     if (!this->recordStateAndShouldDraw(pipelineBuilder, pathProc, scissorState, dstCopy)) {
         return;
     }
@@ -403,6 +438,7 @@
     reset_data_buffer(&fPathIndexBuffer, kPathIdxBufferMinReserve);
     reset_data_buffer(&fPathTransformBuffer, kPathXformBufferMinReserve);
     fGpuCmdMarkers.reset();
+    fDrawBatch = NULL;
 }
 
 void GrInOrderDrawBuffer::onFlush() {
@@ -410,15 +446,21 @@
         return;
     }
 
-
-    CmdBuffer::Iter iter(fCmdBuffer);
-
-    int currCmdMarker = 0;
-
     // Updated every time we find a set state cmd to reflect the current state in the playback
     // stream.
     SetState* currentState = NULL;
 
+    // TODO this is temporary while batch is being rolled out
+    this->closeBatch();
+    this->getVertexAllocPool()->unmap();
+    this->getIndexAllocPool()->unmap();
+    fBatchTarget.preFlush();
+
+    currentState = NULL;
+    CmdBuffer::Iter iter(fCmdBuffer);
+
+    int currCmdMarker = 0;
+
     while (iter.next()) {
         GrGpuTraceMarker newMarker("", -1);
         SkString traceString;
@@ -429,13 +471,25 @@
             ++currCmdMarker;
         }
 
-        if (kSetState_Cmd == strip_trace_bit(iter->fType)) {
+        // TODO temporary hack
+        if (kDrawBatch_Cmd == strip_trace_bit(iter->fType)) {
+            fBatchTarget.flushNext();
+            continue;
+        }
+
+        bool isSetState = kSetState_Cmd == strip_trace_bit(iter->fType);
+        if (isSetState) {
             SetState* ss = reinterpret_cast<SetState*>(iter.get());
 
-            this->getGpu()->buildProgramDesc(&ss->fDesc, *ss->fPrimitiveProcessor, ss->fPipeline,
-                                             ss->fPipeline.descInfo(), ss->fBatchTracker);
+            // TODO sometimes we have a prim proc, othertimes we have a GrBatch.  Eventually we will
+            // only have GrBatch and we can delete this
+            if (ss->fPrimitiveProcessor) {
+                this->getGpu()->buildProgramDesc(&ss->fDesc, *ss->fPrimitiveProcessor,
+                                                 ss->fPipeline,
+                                                 ss->fPipeline.descInfo(),
+                                                 ss->fBatchTracker);
+            }
             currentState = ss;
-
         } else {
             iter->execute(this, currentState);
         }
@@ -445,6 +499,9 @@
         }
     }
 
+    // TODO see copious notes about hack
+    fBatchTarget.postFlush();
+
     SkASSERT(fGpuCmdMarkers.count() == currCmdMarker);
     ++fDrawID;
 }
@@ -484,6 +541,11 @@
                             fCount, fStencilSettings);
 }
 
+void GrInOrderDrawBuffer::DrawBatch::execute(GrInOrderDrawBuffer* buf, const SetState* state) {
+    SkASSERT(state);
+    fBatch->generateGeometry(buf->getBatchTarget(), &state->fPipeline);
+}
+
 void GrInOrderDrawBuffer::SetState::execute(GrInOrderDrawBuffer*, const SetState*) {}
 
 void GrInOrderDrawBuffer::Clear::execute(GrInOrderDrawBuffer* buf, const SetState*) {
@@ -531,7 +593,7 @@
     ss->fPrimitiveProcessor->initBatchTracker(&ss->fBatchTracker,
                                               ss->fPipeline.getInitBatchTracker());
 
-    if (fPrevState &&
+    if (fPrevState && fPrevState->fPrimitiveProcessor.get() &&
         fPrevState->fPrimitiveProcessor->canMakeEqual(fPrevState->fBatchTracker,
                                                       *ss->fPrimitiveProcessor,
                                                       ss->fBatchTracker) &&
@@ -544,6 +606,34 @@
     return true;
 }
 
+bool GrInOrderDrawBuffer::recordStateAndShouldDraw(GrBatch* batch,
+                                                   const GrPipelineBuilder& pipelineBuilder,
+                                                   const GrScissorState& scissor,
+                                                   const GrDeviceCoordTexture* dstCopy) {
+    // TODO this gets much simpler when we have batches everywhere.
+    // If the previous command is also a set state, then we check to see if it has a Batch.  If so,
+    // and we can make the two batches equal, and we can combine the states, then we make them equal
+    SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState,
+                                            (batch, pipelineBuilder, *this->getGpu()->caps(), scissor,
+                                             dstCopy));
+    if (ss->fPipeline.mustSkip()) {
+        fCmdBuffer.pop_back();
+        return false;
+    }
+
+    batch->initBatchTracker(ss->fPipeline.getInitBatchTracker());
+
+    if (fPrevState && !fPrevState->fPrimitiveProcessor.get() &&
+        fPrevState->fPipeline.isEqual(ss->fPipeline)) {
+        fCmdBuffer.pop_back();
+    } else {
+        this->closeBatch();
+        fPrevState = ss;
+        this->recordTraceMarkersIfNecessary();
+    }
+    return true;
+}
+
 void GrInOrderDrawBuffer::recordTraceMarkersIfNecessary() {
     SkASSERT(!fCmdBuffer.empty());
     SkASSERT(!cmd_has_trace_marker(fCmdBuffer.back().fType));
@@ -553,3 +643,42 @@
         fGpuCmdMarkers.push_back(activeTraceMarkers);
     }
 }
+
+void GrInOrderDrawBuffer::closeBatch() {
+    if (fDrawBatch) {
+        fDrawBatch->execute(this, fPrevState);
+        fDrawBatch = NULL;
+    }
+}
+
+void GrInOrderDrawBuffer::willReserveVertexAndIndexSpace(int vertexCount,
+                                                         size_t vertexStride,
+                                                         int indexCount) {
+    this->closeBatch();
+
+    // We use geometryHints() to know whether to flush the draw buffer. We
+    // can't flush if we are inside an unbalanced pushGeometrySource.
+    // Moreover, flushing blows away vertex and index data that was
+    // previously reserved. So if the vertex or index data is pulled from
+    // reserved space and won't be released by this request then we can't
+    // flush.
+    bool insideGeoPush = this->getGeoPoolStateStack().count() > 1;
+
+    bool unreleasedVertexSpace =
+        !vertexCount &&
+        kReserved_GeometrySrcType == this->getGeomSrc().fVertexSrc;
+
+    bool unreleasedIndexSpace =
+        !indexCount &&
+        kReserved_GeometrySrcType == this->getGeomSrc().fIndexSrc;
+
+    int vcount = vertexCount;
+    int icount = indexCount;
+
+    if (!insideGeoPush &&
+        !unreleasedVertexSpace &&
+        !unreleasedIndexSpace &&
+        this->geometryHints(vertexStride, &vcount, &icount)) {
+        this->flush();
+    }
+}