Split GrTargetCommands into its own files
Review URL: https://codereview.chromium.org/979493002
diff --git a/src/gpu/GrInOrderDrawBuffer.cpp b/src/gpu/GrInOrderDrawBuffer.cpp
index 3a17fa9..24db2e0 100644
--- a/src/gpu/GrInOrderDrawBuffer.cpp
+++ b/src/gpu/GrInOrderDrawBuffer.cpp
@@ -7,13 +7,8 @@
#include "GrInOrderDrawBuffer.h"
-#include "GrBufferAllocPool.h"
#include "GrDefaultGeoProcFactory.h"
-#include "GrDrawTargetCaps.h"
-#include "GrGpu.h"
#include "GrTemplates.h"
-#include "GrFontCache.h"
-#include "GrTexture.h"
GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrGpu* gpu,
GrVertexBufferAllocPool* vertexPool,
@@ -32,15 +27,6 @@
this->reset();
}
-void GrTargetCommands::closeBatch() {
- if (fDrawBatch) {
- fBatchTarget.resetNumberOfDraws();
- fDrawBatch->execute(NULL, fPrevState);
- fDrawBatch->fBatch->setNumberOfDraws(fBatchTarget.numberOfDraws());
- fDrawBatch = NULL;
- }
-}
-
////////////////////////////////////////////////////////////////////////////////
/** We always use per-vertex colors so that rects can be batched across color changes. Sometimes we
@@ -68,19 +54,6 @@
}
}
-static bool path_fill_type_is_winding(const GrStencilSettings& pathStencilSettings) {
- static const GrStencilSettings::Face pathFace = GrStencilSettings::kFront_Face;
- bool isWinding = kInvert_StencilOp != pathStencilSettings.passOp(pathFace);
- if (isWinding) {
- // Double check that it is in fact winding.
- SkASSERT(kIncClamp_StencilOp == pathStencilSettings.passOp(pathFace));
- SkASSERT(kIncClamp_StencilOp == pathStencilSettings.failOp(pathFace));
- SkASSERT(0x1 != pathStencilSettings.writeMask(pathFace));
- SkASSERT(!pathStencilSettings.isTwoSided());
- }
- return isWinding;
-}
-
class RectBatch : public GrBatch {
public:
struct Geometry {
@@ -319,48 +292,6 @@
this->drawBatch(pipelineBuilder, batch, &bounds);
}
-int GrTargetCommands::concatInstancedDraw(GrInOrderDrawBuffer* iodb,
- const GrDrawTarget::DrawInfo& info) {
- SkASSERT(!fCmdBuffer.empty());
- SkASSERT(info.isInstanced());
-
- const GrIndexBuffer* ib;
- if (!iodb->canConcatToIndexBuffer(&ib)) {
- return 0;
- }
-
- // Check if there is a draw info that is compatible that uses the same VB from the pool and
- // the same IB
- if (Cmd::kDraw_Cmd != fCmdBuffer.back().type()) {
- return 0;
- }
-
- Draw* draw = static_cast<Draw*>(&fCmdBuffer.back());
-
- if (!draw->fInfo.isInstanced() ||
- draw->fInfo.primitiveType() != info.primitiveType() ||
- draw->fInfo.verticesPerInstance() != info.verticesPerInstance() ||
- draw->fInfo.indicesPerInstance() != info.indicesPerInstance() ||
- draw->fInfo.vertexBuffer() != info.vertexBuffer() ||
- draw->fInfo.indexBuffer() != ib) {
- return 0;
- }
- if (draw->fInfo.startVertex() + draw->fInfo.vertexCount() != info.startVertex()) {
- return 0;
- }
-
- // how many instances can be concat'ed onto draw given the size of the index buffer
- int instancesToConcat = iodb->indexCountInCurrentSource() / info.indicesPerInstance();
- instancesToConcat -= draw->fInfo.instanceCount();
- instancesToConcat = SkTMin(instancesToConcat, info.instanceCount());
-
- draw->fInfo.adjustInstanceCount(instancesToConcat);
-
- // update last fGpuCmdMarkers to include any additional trace markers that have been added
- iodb->recordTraceMarkersIfNecessary(draw);
- return instancesToConcat;
-}
-
void GrInOrderDrawBuffer::onDraw(const GrGeometryProcessor* gp,
const DrawInfo& info,
const PipelineInfo& pipelineInfo) {
@@ -368,63 +299,12 @@
this->recordTraceMarkersIfNecessary(cmd);
}
-GrTargetCommands::Cmd* GrTargetCommands::recordDraw(
- GrInOrderDrawBuffer* iodb,
- const GrGeometryProcessor* gp,
- const GrDrawTarget::DrawInfo& info,
- const GrDrawTarget::PipelineInfo& pipelineInfo) {
- SkASSERT(info.vertexBuffer() && (!info.isIndexed() || info.indexBuffer()));
- this->closeBatch();
-
- if (!this->setupPipelineAndShouldDraw(iodb, gp, pipelineInfo)) {
- return NULL;
- }
-
- Draw* draw;
- if (info.isInstanced()) {
- int instancesConcated = this->concatInstancedDraw(iodb, info);
- if (info.instanceCount() > instancesConcated) {
- draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info));
- draw->fInfo.adjustInstanceCount(-instancesConcated);
- } else {
- return NULL;
- }
- } else {
- draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info));
- }
-
- return draw;
-}
-
void GrInOrderDrawBuffer::onDrawBatch(GrBatch* batch,
const PipelineInfo& pipelineInfo) {
GrTargetCommands::Cmd* cmd = fCommands.recordDrawBatch(this, batch, pipelineInfo);
this->recordTraceMarkersIfNecessary(cmd);
}
-GrTargetCommands::Cmd* GrTargetCommands::recordDrawBatch(
- GrInOrderDrawBuffer* iodb,
- GrBatch* batch,
- const GrDrawTarget::PipelineInfo& pipelineInfo) {
- if (!this->setupPipelineAndShouldDraw(iodb, batch, pipelineInfo)) {
- return NULL;
- }
-
- // Check if there is a Batch Draw we can batch with
- if (Cmd::kDrawBatch_Cmd != fCmdBuffer.back().type() || !fDrawBatch) {
- fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fBatchTarget));
- return fDrawBatch;
- }
-
- SkASSERT(&fCmdBuffer.back() == fDrawBatch);
- if (!fDrawBatch->fBatch->combineIfPossible(batch)) {
- this->closeBatch();
- fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fBatchTarget));
- }
-
- return fDrawBatch;
-}
-
void GrInOrderDrawBuffer::onStencilPath(const GrPipelineBuilder& pipelineBuilder,
const GrPathProcessor* pathProc,
const GrPath* path,
@@ -436,25 +316,6 @@
this->recordTraceMarkersIfNecessary(cmd);
}
-GrTargetCommands::Cmd* GrTargetCommands::recordStencilPath(
- GrInOrderDrawBuffer* iodb,
- const GrPipelineBuilder& pipelineBuilder,
- const GrPathProcessor* pathProc,
- const GrPath* path,
- const GrScissorState& scissorState,
- const GrStencilSettings& stencilSettings) {
- this->closeBatch();
-
- StencilPath* sp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, StencilPath,
- (path, pipelineBuilder.getRenderTarget()));
-
- sp->fScissor = scissorState;
- sp->fUseHWAA = pipelineBuilder.isHWAntialias();
- sp->fViewMatrix = pathProc->viewMatrix();
- sp->fStencil = stencilSettings;
- return sp;
-}
-
void GrInOrderDrawBuffer::onDrawPath(const GrPathProcessor* pathProc,
const GrPath* path,
const GrStencilSettings& stencilSettings,
@@ -465,23 +326,6 @@
this->recordTraceMarkersIfNecessary(cmd);
}
-GrTargetCommands::Cmd* GrTargetCommands::recordDrawPath(
- GrInOrderDrawBuffer* iodb,
- const GrPathProcessor* pathProc,
- const GrPath* path,
- const GrStencilSettings& stencilSettings,
- const GrDrawTarget::PipelineInfo& pipelineInfo) {
- this->closeBatch();
-
- // TODO: Only compare the subset of GrPipelineBuilder relevant to path covering?
- if (!this->setupPipelineAndShouldDraw(iodb, pathProc, pipelineInfo)) {
- return NULL;
- }
- DrawPath* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPath, (path));
- dp->fStencilSettings = stencilSettings;
- return dp;
-}
-
void GrInOrderDrawBuffer::onDrawPaths(const GrPathProcessor* pathProc,
const GrPathRange* pathRange,
const void* indices,
@@ -498,70 +342,6 @@
this->recordTraceMarkersIfNecessary(cmd);
}
-GrTargetCommands::Cmd* GrTargetCommands::recordDrawPaths(
- GrInOrderDrawBuffer* iodb,
- const GrPathProcessor* pathProc,
- const GrPathRange* pathRange,
- const void* indexValues,
- GrDrawTarget::PathIndexType indexType,
- const float transformValues[],
- GrDrawTarget::PathTransformType transformType,
- int count,
- const GrStencilSettings& stencilSettings,
- const GrDrawTarget::PipelineInfo& pipelineInfo) {
- SkASSERT(pathRange);
- SkASSERT(indexValues);
- SkASSERT(transformValues);
- this->closeBatch();
-
- if (!this->setupPipelineAndShouldDraw(iodb, pathProc, pipelineInfo)) {
- return NULL;
- }
-
- char* savedIndices;
- float* savedTransforms;
-
- iodb->appendIndicesAndTransforms(indexValues, indexType,
- transformValues, transformType,
- count, &savedIndices, &savedTransforms);
-
- if (Cmd::kDrawPaths_Cmd == fCmdBuffer.back().type()) {
- // The previous command was also DrawPaths. Try to collapse this call into the one
- // before. Note that stenciling all the paths at once, then covering, may not be
- // equivalent to two separate draw calls if there is overlap. Blending won't work,
- // and the combined calls may also cancel each other's winding numbers in some
- // places. For now the winding numbers are only an issue if the fill is even/odd,
- // because DrawPaths is currently only used for glyphs, and glyphs in the same
- // font tend to all wind in the same direction.
- DrawPaths* previous = static_cast<DrawPaths*>(&fCmdBuffer.back());
- if (pathRange == previous->pathRange() &&
- indexType == previous->fIndexType &&
- transformType == previous->fTransformType &&
- stencilSettings == previous->fStencilSettings &&
- path_fill_type_is_winding(stencilSettings) &&
- !pipelineInfo.willBlendWithDst(pathProc)) {
- const int indexBytes = GrPathRange::PathIndexSizeInBytes(indexType);
- const int xformSize = GrPathRendering::PathTransformSize(transformType);
- if (&previous->fIndices[previous->fCount*indexBytes] == savedIndices &&
- (0 == xformSize ||
- &previous->fTransforms[previous->fCount*xformSize] == savedTransforms)) {
- // Fold this DrawPaths call into the one previous.
- previous->fCount += count;
- return NULL;
- }
- }
- }
-
- DrawPaths* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPaths, (pathRange));
- dp->fIndices = savedIndices;
- dp->fIndexType = indexType;
- dp->fTransforms = savedTransforms;
- dp->fTransformType = transformType;
- dp->fCount = count;
- dp->fStencilSettings = stencilSettings;
- return dp;
-}
-
void GrInOrderDrawBuffer::onClear(const SkIRect* rect, GrColor color,
bool canIgnoreRect, GrRenderTarget* renderTarget) {
GrTargetCommands::Cmd* cmd = fCommands.recordClear(this, rect, color,
@@ -569,30 +349,6 @@
this->recordTraceMarkersIfNecessary(cmd);
}
-GrTargetCommands::Cmd* GrTargetCommands::recordClear(GrInOrderDrawBuffer* iodb,
- const SkIRect* rect,
- GrColor color,
- bool canIgnoreRect,
- GrRenderTarget* renderTarget) {
- SkASSERT(renderTarget);
- this->closeBatch();
-
- SkIRect r;
- if (NULL == rect) {
- // We could do something smart and remove previous draws and clears to
- // the current render target. If we get that smart we have to make sure
- // those draws aren't read before this clear (render-to-texture).
- r.setLTRB(0, 0, renderTarget->width(), renderTarget->height());
- rect = &r;
- }
- Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget));
- GrColorIsPMAssert(color);
- clr->fColor = color;
- clr->fRect = *rect;
- clr->fCanIgnoreRect = canIgnoreRect;
- return clr;
-}
-
void GrInOrderDrawBuffer::clearStencilClip(const SkIRect& rect,
bool insideClip,
GrRenderTarget* renderTarget) {
@@ -601,19 +357,6 @@
this->recordTraceMarkersIfNecessary(cmd);
}
-GrTargetCommands::Cmd* GrTargetCommands::recordClearStencilClip(GrInOrderDrawBuffer* iodb,
- const SkIRect& rect,
- bool insideClip,
- GrRenderTarget* renderTarget) {
- SkASSERT(renderTarget);
- this->closeBatch();
-
- ClearStencilClip* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, ClearStencilClip, (renderTarget));
- clr->fRect = rect;
- clr->fInsideClip = insideClip;
- return clr;
-}
-
void GrInOrderDrawBuffer::discard(GrRenderTarget* renderTarget) {
if (!this->caps()->discardRenderTargetSupport()) {
return;
@@ -623,16 +366,6 @@
this->recordTraceMarkersIfNecessary(cmd);
}
-GrTargetCommands::Cmd* GrTargetCommands::recordDiscard(GrInOrderDrawBuffer* iodb,
- GrRenderTarget* renderTarget) {
- SkASSERT(renderTarget);
- this->closeBatch();
-
- Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget));
- clr->fColor = GrColor_ILLEGAL;
- return clr;
-}
-
void GrInOrderDrawBuffer::onReset() {
fCommands.reset();
fPathIndexBuffer.rewind();
@@ -640,140 +373,11 @@
fGpuCmdMarkers.reset();
}
-void GrTargetCommands::reset() {
- fCmdBuffer.reset();
- fPrevState = NULL;
- fDrawBatch = NULL;
-}
-
void GrInOrderDrawBuffer::onFlush() {
fCommands.flush(this);
++fDrawID;
}
-void GrTargetCommands::flush(GrInOrderDrawBuffer* iodb) {
- if (fCmdBuffer.empty()) {
- return;
- }
-
- // Updated every time we find a set state cmd to reflect the current state in the playback
- // stream.
- SetState* currentState = NULL;
-
- // TODO this is temporary while batch is being rolled out
- this->closeBatch();
- iodb->getVertexAllocPool()->unmap();
- iodb->getIndexAllocPool()->unmap();
- fBatchTarget.preFlush();
-
- currentState = NULL;
- CmdBuffer::Iter iter(fCmdBuffer);
-
- int currCmdMarker = 0;
-
- GrGpu* gpu = iodb->getGpu();
-
- int i = 0;
- while (iter.next()) {
- i++;
- GrGpuTraceMarker newMarker("", -1);
- SkString traceString;
- if (iter->isTraced()) {
- traceString = iodb->getCmdString(currCmdMarker);
- newMarker.fMarker = traceString.c_str();
- gpu->addGpuTraceMarker(&newMarker);
- ++currCmdMarker;
- }
-
- // TODO temporary hack
- if (Cmd::kDrawBatch_Cmd == iter->type()) {
- DrawBatch* db = reinterpret_cast<DrawBatch*>(iter.get());
- fBatchTarget.flushNext(db->fBatch->numberOfDraws());
- continue;
- }
-
- if (Cmd::kSetState_Cmd == iter->type()) {
- SetState* ss = reinterpret_cast<SetState*>(iter.get());
-
- // TODO sometimes we have a prim proc, othertimes we have a GrBatch. Eventually we
- // will only have GrBatch and we can delete this
- if (ss->fPrimitiveProcessor) {
- gpu->buildProgramDesc(&ss->fDesc, *ss->fPrimitiveProcessor,
- *ss->getPipeline(),
- ss->fBatchTracker);
- }
- currentState = ss;
- } else {
- iter->execute(gpu, currentState);
- }
-
- if (iter->isTraced()) {
- gpu->removeGpuTraceMarker(&newMarker);
- }
- }
-
- // TODO see copious notes about hack
- fBatchTarget.postFlush();
-}
-
-void GrTargetCommands::Draw::execute(GrGpu* gpu, const SetState* state) {
- SkASSERT(state);
- DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state->fDesc,
- &state->fBatchTracker);
- gpu->draw(args, fInfo);
-}
-
-void GrTargetCommands::StencilPath::execute(GrGpu* gpu, const SetState*) {
- GrGpu::StencilPathState state;
- state.fRenderTarget = fRenderTarget.get();
- state.fScissor = &fScissor;
- state.fStencil = &fStencil;
- state.fUseHWAA = fUseHWAA;
- state.fViewMatrix = &fViewMatrix;
-
- gpu->stencilPath(this->path(), state);
-}
-
-void GrTargetCommands::DrawPath::execute(GrGpu* gpu, const SetState* state) {
- SkASSERT(state);
- DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state->fDesc,
- &state->fBatchTracker);
- gpu->drawPath(args, this->path(), fStencilSettings);
-}
-
-void GrTargetCommands::DrawPaths::execute(GrGpu* gpu, const SetState* state) {
- SkASSERT(state);
- DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state->fDesc,
- &state->fBatchTracker);
- gpu->drawPaths(args, this->pathRange(),
- fIndices, fIndexType,
- fTransforms, fTransformType,
- fCount, fStencilSettings);
-}
-
-void GrTargetCommands::DrawBatch::execute(GrGpu*, const SetState* state) {
- SkASSERT(state);
- fBatch->generateGeometry(fBatchTarget, state->getPipeline());
-}
-
-void GrTargetCommands::SetState::execute(GrGpu*, const SetState*) {}
-
-void GrTargetCommands::Clear::execute(GrGpu* gpu, const SetState*) {
- if (GrColor_ILLEGAL == fColor) {
- gpu->discard(this->renderTarget());
- } else {
- gpu->clear(&fRect, fColor, fCanIgnoreRect, this->renderTarget());
- }
-}
-
-void GrTargetCommands::ClearStencilClip::execute(GrGpu* gpu, const SetState*) {
- gpu->clearStencilClip(fRect, fInsideClip, this->renderTarget());
-}
-
-void GrTargetCommands::CopySurface::execute(GrGpu* gpu, const SetState*) {
- gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint);
-}
-
bool GrInOrderDrawBuffer::onCopySurface(GrSurface* dst,
GrSurface* src,
const SkIRect& srcRect,
@@ -784,72 +388,6 @@
return SkToBool(cmd);
}
-GrTargetCommands::Cmd* GrTargetCommands::recordCopySurface(GrInOrderDrawBuffer* iodb,
- GrSurface* dst,
- GrSurface* src,
- const SkIRect& srcRect,
- const SkIPoint& dstPoint) {
- if (iodb->getGpu()->canCopySurface(dst, src, srcRect, dstPoint)) {
- this->closeBatch();
- CopySurface* cs = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, CopySurface, (dst, src));
- cs->fSrcRect = srcRect;
- cs->fDstPoint = dstPoint;
- return cs;
- }
- return NULL;
-}
-
-bool GrTargetCommands::setupPipelineAndShouldDraw(GrInOrderDrawBuffer* iodb,
- const GrPrimitiveProcessor* primProc,
- const GrDrawTarget::PipelineInfo& pipelineInfo) {
- SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, (primProc));
- iodb->setupPipeline(pipelineInfo, ss->pipelineLocation());
-
- if (ss->getPipeline()->mustSkip()) {
- fCmdBuffer.pop_back();
- return false;
- }
-
- ss->fPrimitiveProcessor->initBatchTracker(&ss->fBatchTracker,
- ss->getPipeline()->getInitBatchTracker());
-
- if (fPrevState && fPrevState->fPrimitiveProcessor.get() &&
- fPrevState->fPrimitiveProcessor->canMakeEqual(fPrevState->fBatchTracker,
- *ss->fPrimitiveProcessor,
- ss->fBatchTracker) &&
- fPrevState->getPipeline()->isEqual(*ss->getPipeline())) {
- fCmdBuffer.pop_back();
- } else {
- fPrevState = ss;
- iodb->recordTraceMarkersIfNecessary(ss);
- }
- return true;
-}
-
-bool GrTargetCommands::setupPipelineAndShouldDraw(GrInOrderDrawBuffer* iodb,
- GrBatch* batch,
- const GrDrawTarget::PipelineInfo& pipelineInfo) {
- SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, ());
- iodb->setupPipeline(pipelineInfo, ss->pipelineLocation());
-
- if (ss->getPipeline()->mustSkip()) {
- fCmdBuffer.pop_back();
- return false;
- }
-
- batch->initBatchTracker(ss->getPipeline()->getInitBatchTracker());
-
- if (fPrevState && !fPrevState->fPrimitiveProcessor.get() &&
- fPrevState->getPipeline()->isEqual(*ss->getPipeline())) {
- fCmdBuffer.pop_back();
- } else {
- this->closeBatch();
- fPrevState = ss;
- iodb->recordTraceMarkersIfNecessary(ss);
- }
- return true;
-}
-
void GrInOrderDrawBuffer::recordTraceMarkersIfNecessary(GrTargetCommands::Cmd* cmd) {
if (!cmd) {
return;
diff --git a/src/gpu/GrInOrderDrawBuffer.h b/src/gpu/GrInOrderDrawBuffer.h
index 1100d95..464fdcb 100644
--- a/src/gpu/GrInOrderDrawBuffer.h
+++ b/src/gpu/GrInOrderDrawBuffer.h
@@ -9,298 +9,8 @@
#define GrInOrderDrawBuffer_DEFINED
#include "GrFlushToGpuDrawTarget.h"
-
-#include "GrBatch.h"
-#include "GrBatchTarget.h"
+#include "GrTargetCommands.h"
#include "SkChunkAlloc.h"
-#include "GrPipeline.h"
-#include "GrPath.h"
-#include "GrTRecorder.h"
-
-class GrInOrderDrawBuffer;
-
-class GrTargetCommands : ::SkNoncopyable {
- struct SetState;
-
-public:
- GrTargetCommands(GrGpu* gpu,
- GrVertexBufferAllocPool* vertexPool,
- GrIndexBufferAllocPool* indexPool)
- : fCmdBuffer(kCmdBufferInitialSizeInBytes)
- , fPrevState(NULL)
- , fBatchTarget(gpu, vertexPool, indexPool)
- , fDrawBatch(NULL) {
- }
-
- struct Cmd : ::SkNoncopyable {
- enum {
- kDraw_Cmd = 1,
- kStencilPath_Cmd = 2,
- kSetState_Cmd = 3,
- kClear_Cmd = 4,
- kCopySurface_Cmd = 5,
- kDrawPath_Cmd = 6,
- kDrawPaths_Cmd = 7,
- kDrawBatch_Cmd = 8,
- };
-
- Cmd(uint8_t type) : fType(type) {}
- virtual ~Cmd() {}
-
- virtual void execute(GrGpu*, const SetState*) = 0;
-
- uint8_t type() const { return fType & kCmdMask; }
-
- bool isTraced() const { return SkToBool(fType & kTraceCmdBit); }
- void makeTraced() { fType |= kTraceCmdBit; }
-
- private:
- static const int kCmdMask = 0x7F;
- static const int kTraceCmdBit = 0x80;
-
- uint8_t fType;
- };
-
- void reset();
- void flush(GrInOrderDrawBuffer*);
-
- Cmd* recordClearStencilClip(GrInOrderDrawBuffer*,
- const SkIRect& rect,
- bool insideClip,
- GrRenderTarget* renderTarget);
-
- Cmd* recordDiscard(GrInOrderDrawBuffer*, GrRenderTarget*);
-
- Cmd* recordDraw(GrInOrderDrawBuffer*,
- const GrGeometryProcessor*,
- const GrDrawTarget::DrawInfo&,
- const GrDrawTarget::PipelineInfo&);
- Cmd* recordDrawBatch(GrInOrderDrawBuffer*,
- GrBatch*,
- const GrDrawTarget::PipelineInfo&);
- void recordDrawRect(GrInOrderDrawBuffer*,
- GrPipelineBuilder*,
- GrColor,
- const SkMatrix& viewMatrix,
- const SkRect& rect,
- const SkRect* localRect,
- const SkMatrix* localMatrix);
- Cmd* recordStencilPath(GrInOrderDrawBuffer*,
- const GrPipelineBuilder&,
- const GrPathProcessor*,
- const GrPath*,
- const GrScissorState&,
- const GrStencilSettings&);
- Cmd* recordDrawPath(GrInOrderDrawBuffer*,
- const GrPathProcessor*,
- const GrPath*,
- const GrStencilSettings&,
- const GrDrawTarget::PipelineInfo&);
- Cmd* recordDrawPaths(GrInOrderDrawBuffer*,
- const GrPathProcessor*,
- const GrPathRange*,
- const void*,
- GrDrawTarget::PathIndexType,
- const float transformValues[],
- GrDrawTarget::PathTransformType ,
- int,
- const GrStencilSettings&,
- const GrDrawTarget::PipelineInfo&);
- Cmd* recordClear(GrInOrderDrawBuffer*,
- const SkIRect* rect,
- GrColor,
- bool canIgnoreRect,
- GrRenderTarget*);
- Cmd* recordCopySurface(GrInOrderDrawBuffer*,
- GrSurface* dst,
- GrSurface* src,
- const SkIRect& srcRect,
- const SkIPoint& dstPoint);
-
-protected:
- void willReserveVertexAndIndexSpace(int vertexCount,
- size_t vertexStride,
- int indexCount);
-
-private:
- friend class GrInOrderDrawBuffer;
-
- typedef GrGpu::DrawArgs DrawArgs;
-
- // Attempts to concat instances from info onto the previous draw. info must represent an
- // instanced draw. The caller must have already recorded a new draw state and clip if necessary.
- int concatInstancedDraw(GrInOrderDrawBuffer*, const GrDrawTarget::DrawInfo&);
-
- bool SK_WARN_UNUSED_RESULT setupPipelineAndShouldDraw(GrInOrderDrawBuffer*,
- const GrPrimitiveProcessor*,
- const GrDrawTarget::PipelineInfo&);
- bool SK_WARN_UNUSED_RESULT setupPipelineAndShouldDraw(GrInOrderDrawBuffer*,
- GrBatch*,
- const GrDrawTarget::PipelineInfo&);
-
- struct Draw : public Cmd {
- Draw(const GrDrawTarget::DrawInfo& info) : Cmd(kDraw_Cmd), fInfo(info) {}
-
- void execute(GrGpu*, const SetState*) SK_OVERRIDE;
-
- GrDrawTarget::DrawInfo fInfo;
- };
-
- struct StencilPath : public Cmd {
- StencilPath(const GrPath* path, GrRenderTarget* rt)
- : Cmd(kStencilPath_Cmd)
- , fRenderTarget(rt)
- , fPath(path) {}
-
- const GrPath* path() const { return fPath.get(); }
-
- void execute(GrGpu*, const SetState*) SK_OVERRIDE;
-
- SkMatrix fViewMatrix;
- bool fUseHWAA;
- GrStencilSettings fStencil;
- GrScissorState fScissor;
- private:
- GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget;
- GrPendingIOResource<const GrPath, kRead_GrIOType> fPath;
- };
-
- struct DrawPath : public Cmd {
- DrawPath(const GrPath* path) : Cmd(kDrawPath_Cmd), fPath(path) {}
-
- const GrPath* path() const { return fPath.get(); }
-
- void execute(GrGpu*, const SetState*) SK_OVERRIDE;
-
- GrStencilSettings fStencilSettings;
-
- private:
- GrPendingIOResource<const GrPath, kRead_GrIOType> fPath;
- };
-
- struct DrawPaths : public Cmd {
- DrawPaths(const GrPathRange* pathRange) : Cmd(kDrawPaths_Cmd), fPathRange(pathRange) {}
-
- const GrPathRange* pathRange() const { return fPathRange.get(); }
-
- void execute(GrGpu*, const SetState*) SK_OVERRIDE;
-
- char* fIndices;
- GrDrawTarget::PathIndexType fIndexType;
- float* fTransforms;
- GrDrawTarget::PathTransformType fTransformType;
- int fCount;
- GrStencilSettings fStencilSettings;
-
- private:
- GrPendingIOResource<const GrPathRange, kRead_GrIOType> fPathRange;
- };
-
- // This is also used to record a discard by setting the color to GrColor_ILLEGAL
- struct Clear : public Cmd {
- Clear(GrRenderTarget* rt) : Cmd(kClear_Cmd), fRenderTarget(rt) {}
-
- GrRenderTarget* renderTarget() const { return fRenderTarget.get(); }
-
- void execute(GrGpu*, const SetState*) SK_OVERRIDE;
-
- SkIRect fRect;
- GrColor fColor;
- bool fCanIgnoreRect;
-
- private:
- GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget;
- };
-
- // This command is ONLY used by the clip mask manager to clear the stencil clip bits
- struct ClearStencilClip : public Cmd {
- ClearStencilClip(GrRenderTarget* rt) : Cmd(kClear_Cmd), fRenderTarget(rt) {}
-
- GrRenderTarget* renderTarget() const { return fRenderTarget.get(); }
-
- void execute(GrGpu*, const SetState*) SK_OVERRIDE;
-
- SkIRect fRect;
- bool fInsideClip;
-
- private:
- GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget;
- };
-
- struct CopySurface : public Cmd {
- CopySurface(GrSurface* dst, GrSurface* src) : Cmd(kCopySurface_Cmd), fDst(dst), fSrc(src) {}
-
- GrSurface* dst() const { return fDst.get(); }
- GrSurface* src() const { return fSrc.get(); }
-
- void execute(GrGpu*, const SetState*) SK_OVERRIDE;
-
- SkIPoint fDstPoint;
- SkIRect fSrcRect;
-
- private:
- GrPendingIOResource<GrSurface, kWrite_GrIOType> fDst;
- GrPendingIOResource<GrSurface, kRead_GrIOType> fSrc;
- };
-
- // TODO: rename to SetPipeline once pp, batch tracker, and desc are removed
- struct SetState : public Cmd {
- // TODO get rid of the prim proc parameter when we use batch everywhere
- SetState(const GrPrimitiveProcessor* primProc = NULL)
- : Cmd(kSetState_Cmd)
- , fPrimitiveProcessor(primProc) {}
-
- ~SetState() { reinterpret_cast<GrPipeline*>(fPipeline.get())->~GrPipeline(); }
-
- // This function is only for getting the location in memory where we will create our
- // pipeline object.
- GrPipeline* pipelineLocation() { return reinterpret_cast<GrPipeline*>(fPipeline.get()); }
-
- const GrPipeline* getPipeline() const {
- return reinterpret_cast<const GrPipeline*>(fPipeline.get());
- }
-
- void execute(GrGpu*, const SetState*) SK_OVERRIDE;
-
- typedef GrPendingProgramElement<const GrPrimitiveProcessor> ProgramPrimitiveProcessor;
- ProgramPrimitiveProcessor fPrimitiveProcessor;
- SkAlignedSStorage<sizeof(GrPipeline)> fPipeline;
- GrProgramDesc fDesc;
- GrBatchTracker fBatchTracker;
- };
-
- struct DrawBatch : public Cmd {
- DrawBatch(GrBatch* batch, GrBatchTarget* batchTarget)
- : Cmd(kDrawBatch_Cmd)
- , fBatch(SkRef(batch))
- , fBatchTarget(batchTarget) {
- SkASSERT(!batch->isUsed());
- }
-
- void execute(GrGpu*, const SetState*) SK_OVERRIDE;
-
- // TODO it wouldn't be too hard to let batches allocate in the cmd buffer
- SkAutoTUnref<GrBatch> fBatch;
-
- private:
- GrBatchTarget* fBatchTarget;
- };
-
- static const int kCmdBufferInitialSizeInBytes = 8 * 1024;
-
- typedef void* TCmdAlign; // This wouldn't be enough align if a command used long double.
- typedef GrTRecorder<Cmd, TCmdAlign> CmdBuffer;
-
- CmdBuffer fCmdBuffer;
- SetState* fPrevState;
- GrBatchTarget fBatchTarget;
- // TODO hack until batch is everywhere
- GrTargetCommands::DrawBatch* fDrawBatch;
-
- // This will go away when everything uses batch. However, in the short term anything which
- // might be put into the GrInOrderDrawBuffer needs to make sure it closes the last batch
- void closeBatch();
-};
/**
* GrInOrderDrawBuffer is an implementation of GrDrawTarget that queues up draws for eventual
@@ -382,8 +92,6 @@
private:
friend class GrTargetCommands;
- typedef GrGpu::DrawArgs DrawArgs;
-
void onReset() SK_OVERRIDE;
void onFlush() SK_OVERRIDE;
diff --git a/src/gpu/GrTargetCommands.cpp b/src/gpu/GrTargetCommands.cpp
new file mode 100644
index 0000000..d326226
--- /dev/null
+++ b/src/gpu/GrTargetCommands.cpp
@@ -0,0 +1,472 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrTargetCommands.h"
+
+#include "GrColor.h"
+#include "GrDefaultGeoProcFactory.h"
+#include "GrInOrderDrawBuffer.h"
+#include "GrTemplates.h"
+#include "SkPoint.h"
+
+void GrTargetCommands::closeBatch() {
+ if (fDrawBatch) {
+ fBatchTarget.resetNumberOfDraws();
+ fDrawBatch->execute(NULL, fPrevState);
+ fDrawBatch->fBatch->setNumberOfDraws(fBatchTarget.numberOfDraws());
+ fDrawBatch = NULL;
+ }
+}
+
+static bool path_fill_type_is_winding(const GrStencilSettings& pathStencilSettings) {
+ static const GrStencilSettings::Face pathFace = GrStencilSettings::kFront_Face;
+ bool isWinding = kInvert_StencilOp != pathStencilSettings.passOp(pathFace);
+ if (isWinding) {
+ // Double check that it is in fact winding.
+ SkASSERT(kIncClamp_StencilOp == pathStencilSettings.passOp(pathFace));
+ SkASSERT(kIncClamp_StencilOp == pathStencilSettings.failOp(pathFace));
+ SkASSERT(0x1 != pathStencilSettings.writeMask(pathFace));
+ SkASSERT(!pathStencilSettings.isTwoSided());
+ }
+ return isWinding;
+}
+
+int GrTargetCommands::concatInstancedDraw(GrInOrderDrawBuffer* iodb,
+ const GrDrawTarget::DrawInfo& info) {
+ SkASSERT(!fCmdBuffer.empty());
+ SkASSERT(info.isInstanced());
+
+ const GrIndexBuffer* ib;
+ if (!iodb->canConcatToIndexBuffer(&ib)) {
+ return 0;
+ }
+
+ // Check if there is a draw info that is compatible that uses the same VB from the pool and
+ // the same IB
+ if (Cmd::kDraw_Cmd != fCmdBuffer.back().type()) {
+ return 0;
+ }
+
+ Draw* draw = static_cast<Draw*>(&fCmdBuffer.back());
+
+ if (!draw->fInfo.isInstanced() ||
+ draw->fInfo.primitiveType() != info.primitiveType() ||
+ draw->fInfo.verticesPerInstance() != info.verticesPerInstance() ||
+ draw->fInfo.indicesPerInstance() != info.indicesPerInstance() ||
+ draw->fInfo.vertexBuffer() != info.vertexBuffer() ||
+ draw->fInfo.indexBuffer() != ib) {
+ return 0;
+ }
+ if (draw->fInfo.startVertex() + draw->fInfo.vertexCount() != info.startVertex()) {
+ return 0;
+ }
+
+ // how many instances can be concat'ed onto draw given the size of the index buffer
+ int instancesToConcat = iodb->indexCountInCurrentSource() / info.indicesPerInstance();
+ instancesToConcat -= draw->fInfo.instanceCount();
+ instancesToConcat = SkTMin(instancesToConcat, info.instanceCount());
+
+ draw->fInfo.adjustInstanceCount(instancesToConcat);
+
+ // update last fGpuCmdMarkers to include any additional trace markers that have been added
+ iodb->recordTraceMarkersIfNecessary(draw);
+ return instancesToConcat;
+}
+
+GrTargetCommands::Cmd* GrTargetCommands::recordDraw(
+ GrInOrderDrawBuffer* iodb,
+ const GrGeometryProcessor* gp,
+ const GrDrawTarget::DrawInfo& info,
+ const GrDrawTarget::PipelineInfo& pipelineInfo) {
+ SkASSERT(info.vertexBuffer() && (!info.isIndexed() || info.indexBuffer()));
+ this->closeBatch();
+
+ if (!this->setupPipelineAndShouldDraw(iodb, gp, pipelineInfo)) {
+ return NULL;
+ }
+
+ Draw* draw;
+ if (info.isInstanced()) {
+ int instancesConcated = this->concatInstancedDraw(iodb, info);
+ if (info.instanceCount() > instancesConcated) {
+ draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info));
+ draw->fInfo.adjustInstanceCount(-instancesConcated);
+ } else {
+ return NULL;
+ }
+ } else {
+ draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info));
+ }
+
+ return draw;
+}
+
+GrTargetCommands::Cmd* GrTargetCommands::recordDrawBatch(
+ GrInOrderDrawBuffer* iodb,
+ GrBatch* batch,
+ const GrDrawTarget::PipelineInfo& pipelineInfo) {
+ if (!this->setupPipelineAndShouldDraw(iodb, batch, pipelineInfo)) {
+ return NULL;
+ }
+
+ // Check if there is a Batch Draw we can batch with
+ if (Cmd::kDrawBatch_Cmd != fCmdBuffer.back().type() || !fDrawBatch) {
+ fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fBatchTarget));
+ return fDrawBatch;
+ }
+
+ SkASSERT(&fCmdBuffer.back() == fDrawBatch);
+ if (!fDrawBatch->fBatch->combineIfPossible(batch)) {
+ this->closeBatch();
+ fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fBatchTarget));
+ }
+
+ return fDrawBatch;
+}
+
+GrTargetCommands::Cmd* GrTargetCommands::recordStencilPath(
+ GrInOrderDrawBuffer* iodb,
+ const GrPipelineBuilder& pipelineBuilder,
+ const GrPathProcessor* pathProc,
+ const GrPath* path,
+ const GrScissorState& scissorState,
+ const GrStencilSettings& stencilSettings) {
+ this->closeBatch();
+
+ StencilPath* sp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, StencilPath,
+ (path, pipelineBuilder.getRenderTarget()));
+
+ sp->fScissor = scissorState;
+ sp->fUseHWAA = pipelineBuilder.isHWAntialias();
+ sp->fViewMatrix = pathProc->viewMatrix();
+ sp->fStencil = stencilSettings;
+ return sp;
+}
+
+GrTargetCommands::Cmd* GrTargetCommands::recordDrawPath(
+ GrInOrderDrawBuffer* iodb,
+ const GrPathProcessor* pathProc,
+ const GrPath* path,
+ const GrStencilSettings& stencilSettings,
+ const GrDrawTarget::PipelineInfo& pipelineInfo) {
+ this->closeBatch();
+
+ // TODO: Only compare the subset of GrPipelineBuilder relevant to path covering?
+ if (!this->setupPipelineAndShouldDraw(iodb, pathProc, pipelineInfo)) {
+ return NULL;
+ }
+ DrawPath* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPath, (path));
+ dp->fStencilSettings = stencilSettings;
+ return dp;
+}
+
+GrTargetCommands::Cmd* GrTargetCommands::recordDrawPaths(
+ GrInOrderDrawBuffer* iodb,
+ const GrPathProcessor* pathProc,
+ const GrPathRange* pathRange,
+ const void* indexValues,
+ GrDrawTarget::PathIndexType indexType,
+ const float transformValues[],
+ GrDrawTarget::PathTransformType transformType,
+ int count,
+ const GrStencilSettings& stencilSettings,
+ const GrDrawTarget::PipelineInfo& pipelineInfo) {
+ SkASSERT(pathRange);
+ SkASSERT(indexValues);
+ SkASSERT(transformValues);
+ this->closeBatch();
+
+ if (!this->setupPipelineAndShouldDraw(iodb, pathProc, pipelineInfo)) {
+ return NULL;
+ }
+
+ char* savedIndices;
+ float* savedTransforms;
+
+ iodb->appendIndicesAndTransforms(indexValues, indexType,
+ transformValues, transformType,
+ count, &savedIndices, &savedTransforms);
+
+ if (Cmd::kDrawPaths_Cmd == fCmdBuffer.back().type()) {
+ // The previous command was also DrawPaths. Try to collapse this call into the one
+ // before. Note that stenciling all the paths at once, then covering, may not be
+ // equivalent to two separate draw calls if there is overlap. Blending won't work,
+ // and the combined calls may also cancel each other's winding numbers in some
+ // places. For now the winding numbers are only an issue if the fill is even/odd,
+ // because DrawPaths is currently only used for glyphs, and glyphs in the same
+ // font tend to all wind in the same direction.
+ DrawPaths* previous = static_cast<DrawPaths*>(&fCmdBuffer.back());
+ if (pathRange == previous->pathRange() &&
+ indexType == previous->fIndexType &&
+ transformType == previous->fTransformType &&
+ stencilSettings == previous->fStencilSettings &&
+ path_fill_type_is_winding(stencilSettings) &&
+ !pipelineInfo.willBlendWithDst(pathProc)) {
+ const int indexBytes = GrPathRange::PathIndexSizeInBytes(indexType);
+ const int xformSize = GrPathRendering::PathTransformSize(transformType);
+ if (&previous->fIndices[previous->fCount*indexBytes] == savedIndices &&
+ (0 == xformSize ||
+ &previous->fTransforms[previous->fCount*xformSize] == savedTransforms)) {
+ // Fold this DrawPaths call into the one previous.
+ previous->fCount += count;
+ return NULL;
+ }
+ }
+ }
+
+ DrawPaths* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPaths, (pathRange));
+ dp->fIndices = savedIndices;
+ dp->fIndexType = indexType;
+ dp->fTransforms = savedTransforms;
+ dp->fTransformType = transformType;
+ dp->fCount = count;
+ dp->fStencilSettings = stencilSettings;
+ return dp;
+}
+
+GrTargetCommands::Cmd* GrTargetCommands::recordClear(GrInOrderDrawBuffer* iodb,
+ const SkIRect* rect,
+ GrColor color,
+ bool canIgnoreRect,
+ GrRenderTarget* renderTarget) {
+ SkASSERT(renderTarget);
+ this->closeBatch();
+
+ SkIRect r;
+ if (NULL == rect) {
+ // We could do something smart and remove previous draws and clears to
+ // the current render target. If we get that smart we have to make sure
+ // those draws aren't read before this clear (render-to-texture).
+ r.setLTRB(0, 0, renderTarget->width(), renderTarget->height());
+ rect = &r;
+ }
+ Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget));
+ GrColorIsPMAssert(color);
+ clr->fColor = color;
+ clr->fRect = *rect;
+ clr->fCanIgnoreRect = canIgnoreRect;
+ return clr;
+}
+
+GrTargetCommands::Cmd* GrTargetCommands::recordClearStencilClip(GrInOrderDrawBuffer* iodb,
+ const SkIRect& rect,
+ bool insideClip,
+ GrRenderTarget* renderTarget) {
+ SkASSERT(renderTarget);
+ this->closeBatch();
+
+ ClearStencilClip* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, ClearStencilClip, (renderTarget));
+ clr->fRect = rect;
+ clr->fInsideClip = insideClip;
+ return clr;
+}
+
+GrTargetCommands::Cmd* GrTargetCommands::recordDiscard(GrInOrderDrawBuffer* iodb,
+ GrRenderTarget* renderTarget) {
+ SkASSERT(renderTarget);
+ this->closeBatch();
+
+ Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget));
+ clr->fColor = GrColor_ILLEGAL;
+ return clr;
+}
+
+void GrTargetCommands::reset() {
+ fCmdBuffer.reset();
+ fPrevState = NULL;
+ fDrawBatch = NULL;
+}
+
+void GrTargetCommands::flush(GrInOrderDrawBuffer* iodb) {
+ if (fCmdBuffer.empty()) {
+ return;
+ }
+
+ // Updated every time we find a set state cmd to reflect the current state in the playback
+ // stream.
+ SetState* currentState = NULL;
+
+ // TODO this is temporary while batch is being rolled out
+ this->closeBatch();
+ iodb->getVertexAllocPool()->unmap();
+ iodb->getIndexAllocPool()->unmap();
+ fBatchTarget.preFlush();
+
+ currentState = NULL;
+ CmdBuffer::Iter iter(fCmdBuffer);
+
+ int currCmdMarker = 0;
+
+ GrGpu* gpu = iodb->getGpu();
+
+ int i = 0;
+ while (iter.next()) {
+ i++;
+ GrGpuTraceMarker newMarker("", -1);
+ SkString traceString;
+ if (iter->isTraced()) {
+ traceString = iodb->getCmdString(currCmdMarker);
+ newMarker.fMarker = traceString.c_str();
+ gpu->addGpuTraceMarker(&newMarker);
+ ++currCmdMarker;
+ }
+
+ // TODO temporary hack
+ if (Cmd::kDrawBatch_Cmd == iter->type()) {
+ DrawBatch* db = reinterpret_cast<DrawBatch*>(iter.get());
+ fBatchTarget.flushNext(db->fBatch->numberOfDraws());
+ continue;
+ }
+
+ if (Cmd::kSetState_Cmd == iter->type()) {
+ SetState* ss = reinterpret_cast<SetState*>(iter.get());
+
+ // TODO sometimes we have a prim proc, othertimes we have a GrBatch. Eventually we
+ // will only have GrBatch and we can delete this
+ if (ss->fPrimitiveProcessor) {
+ gpu->buildProgramDesc(&ss->fDesc, *ss->fPrimitiveProcessor,
+ *ss->getPipeline(),
+ ss->fBatchTracker);
+ }
+ currentState = ss;
+ } else {
+ iter->execute(gpu, currentState);
+ }
+
+ if (iter->isTraced()) {
+ gpu->removeGpuTraceMarker(&newMarker);
+ }
+ }
+
+ // TODO see copious notes about hack
+ fBatchTarget.postFlush();
+}
+
+void GrTargetCommands::Draw::execute(GrGpu* gpu, const SetState* state) {
+ SkASSERT(state);
+ DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state->fDesc,
+ &state->fBatchTracker);
+ gpu->draw(args, fInfo);
+}
+
+void GrTargetCommands::StencilPath::execute(GrGpu* gpu, const SetState*) {
+ GrGpu::StencilPathState state;
+ state.fRenderTarget = fRenderTarget.get();
+ state.fScissor = &fScissor;
+ state.fStencil = &fStencil;
+ state.fUseHWAA = fUseHWAA;
+ state.fViewMatrix = &fViewMatrix;
+
+ gpu->stencilPath(this->path(), state);
+}
+
+void GrTargetCommands::DrawPath::execute(GrGpu* gpu, const SetState* state) {
+ SkASSERT(state);
+ DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state->fDesc,
+ &state->fBatchTracker);
+ gpu->drawPath(args, this->path(), fStencilSettings);
+}
+
+void GrTargetCommands::DrawPaths::execute(GrGpu* gpu, const SetState* state) {
+ SkASSERT(state);
+ DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state->fDesc,
+ &state->fBatchTracker);
+ gpu->drawPaths(args, this->pathRange(),
+ fIndices, fIndexType,
+ fTransforms, fTransformType,
+ fCount, fStencilSettings);
+}
+
+void GrTargetCommands::DrawBatch::execute(GrGpu*, const SetState* state) {
+ SkASSERT(state);
+ fBatch->generateGeometry(fBatchTarget, state->getPipeline());
+}
+
+void GrTargetCommands::SetState::execute(GrGpu*, const SetState*) {}
+
+void GrTargetCommands::Clear::execute(GrGpu* gpu, const SetState*) {
+ if (GrColor_ILLEGAL == fColor) {
+ gpu->discard(this->renderTarget());
+ } else {
+ gpu->clear(&fRect, fColor, fCanIgnoreRect, this->renderTarget());
+ }
+}
+
+void GrTargetCommands::ClearStencilClip::execute(GrGpu* gpu, const SetState*) {
+ gpu->clearStencilClip(fRect, fInsideClip, this->renderTarget());
+}
+
+void GrTargetCommands::CopySurface::execute(GrGpu* gpu, const SetState*) {
+ gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint);
+}
+
+GrTargetCommands::Cmd* GrTargetCommands::recordCopySurface(GrInOrderDrawBuffer* iodb,
+ GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ if (iodb->getGpu()->canCopySurface(dst, src, srcRect, dstPoint)) {
+ this->closeBatch();
+ CopySurface* cs = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, CopySurface, (dst, src));
+ cs->fSrcRect = srcRect;
+ cs->fDstPoint = dstPoint;
+ return cs;
+ }
+ return NULL;
+}
+
+bool GrTargetCommands::setupPipelineAndShouldDraw(GrInOrderDrawBuffer* iodb,
+ const GrPrimitiveProcessor* primProc,
+ const GrDrawTarget::PipelineInfo& pipelineInfo) {
+ SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, (primProc));
+ iodb->setupPipeline(pipelineInfo, ss->pipelineLocation());
+
+ if (ss->getPipeline()->mustSkip()) {
+ fCmdBuffer.pop_back();
+ return false;
+ }
+
+ ss->fPrimitiveProcessor->initBatchTracker(&ss->fBatchTracker,
+ ss->getPipeline()->getInitBatchTracker());
+
+ if (fPrevState && fPrevState->fPrimitiveProcessor.get() &&
+ fPrevState->fPrimitiveProcessor->canMakeEqual(fPrevState->fBatchTracker,
+ *ss->fPrimitiveProcessor,
+ ss->fBatchTracker) &&
+ fPrevState->getPipeline()->isEqual(*ss->getPipeline())) {
+ fCmdBuffer.pop_back();
+ } else {
+ fPrevState = ss;
+ iodb->recordTraceMarkersIfNecessary(ss);
+ }
+ return true;
+}
+
+bool GrTargetCommands::setupPipelineAndShouldDraw(GrInOrderDrawBuffer* iodb,
+ GrBatch* batch,
+ const GrDrawTarget::PipelineInfo& pipelineInfo) {
+ SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, ());
+ iodb->setupPipeline(pipelineInfo, ss->pipelineLocation());
+
+ if (ss->getPipeline()->mustSkip()) {
+ fCmdBuffer.pop_back();
+ return false;
+ }
+
+ batch->initBatchTracker(ss->getPipeline()->getInitBatchTracker());
+
+ if (fPrevState && !fPrevState->fPrimitiveProcessor.get() &&
+ fPrevState->getPipeline()->isEqual(*ss->getPipeline())) {
+ fCmdBuffer.pop_back();
+ } else {
+ this->closeBatch();
+ fPrevState = ss;
+ iodb->recordTraceMarkersIfNecessary(ss);
+ }
+ return true;
+}
+
diff --git a/src/gpu/GrTargetCommands.h b/src/gpu/GrTargetCommands.h
new file mode 100644
index 0000000..b89f581
--- /dev/null
+++ b/src/gpu/GrTargetCommands.h
@@ -0,0 +1,309 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrTargetCommands_DEFINED
+#define GrTargetCommands_DEFINED
+
+#include "GrBatchTarget.h"
+#include "GrDrawTarget.h"
+#include "GrGpu.h"
+#include "GrPath.h"
+#include "GrPendingProgramElement.h"
+#include "GrRenderTarget.h"
+#include "GrTRecorder.h"
+#include "SkRect.h"
+#include "SkTypes.h"
+
+class GrInOrderDrawBuffer;
+class GrVertexBufferAllocPool;
+class GrIndexBufferAllocPool;
+
+class GrTargetCommands : ::SkNoncopyable {
+ struct SetState;
+
+public:
+ GrTargetCommands(GrGpu* gpu,
+ GrVertexBufferAllocPool* vertexPool,
+ GrIndexBufferAllocPool* indexPool)
+ : fCmdBuffer(kCmdBufferInitialSizeInBytes)
+ , fPrevState(NULL)
+ , fBatchTarget(gpu, vertexPool, indexPool)
+ , fDrawBatch(NULL) {
+ }
+
+ struct Cmd : ::SkNoncopyable {
+ enum {
+ kDraw_Cmd = 1,
+ kStencilPath_Cmd = 2,
+ kSetState_Cmd = 3,
+ kClear_Cmd = 4,
+ kCopySurface_Cmd = 5,
+ kDrawPath_Cmd = 6,
+ kDrawPaths_Cmd = 7,
+ kDrawBatch_Cmd = 8,
+ };
+
+ Cmd(uint8_t type) : fType(type) {}
+ virtual ~Cmd() {}
+
+ virtual void execute(GrGpu*, const SetState*) = 0;
+
+ uint8_t type() const { return fType & kCmdMask; }
+
+ bool isTraced() const { return SkToBool(fType & kTraceCmdBit); }
+ void makeTraced() { fType |= kTraceCmdBit; }
+
+ private:
+ static const int kCmdMask = 0x7F;
+ static const int kTraceCmdBit = 0x80;
+
+ uint8_t fType;
+ };
+
+ void reset();
+ void flush(GrInOrderDrawBuffer*);
+
+ Cmd* recordClearStencilClip(GrInOrderDrawBuffer*,
+ const SkIRect& rect,
+ bool insideClip,
+ GrRenderTarget* renderTarget);
+
+ Cmd* recordDiscard(GrInOrderDrawBuffer*, GrRenderTarget*);
+
+ Cmd* recordDraw(GrInOrderDrawBuffer*,
+ const GrGeometryProcessor*,
+ const GrDrawTarget::DrawInfo&,
+ const GrDrawTarget::PipelineInfo&);
+ Cmd* recordDrawBatch(GrInOrderDrawBuffer*,
+ GrBatch*,
+ const GrDrawTarget::PipelineInfo&);
+ void recordDrawRect(GrInOrderDrawBuffer*,
+ GrPipelineBuilder*,
+ GrColor,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkRect* localRect,
+ const SkMatrix* localMatrix);
+ Cmd* recordStencilPath(GrInOrderDrawBuffer*,
+ const GrPipelineBuilder&,
+ const GrPathProcessor*,
+ const GrPath*,
+ const GrScissorState&,
+ const GrStencilSettings&);
+ Cmd* recordDrawPath(GrInOrderDrawBuffer*,
+ const GrPathProcessor*,
+ const GrPath*,
+ const GrStencilSettings&,
+ const GrDrawTarget::PipelineInfo&);
+ Cmd* recordDrawPaths(GrInOrderDrawBuffer*,
+ const GrPathProcessor*,
+ const GrPathRange*,
+ const void*,
+ GrDrawTarget::PathIndexType,
+ const float transformValues[],
+ GrDrawTarget::PathTransformType ,
+ int,
+ const GrStencilSettings&,
+ const GrDrawTarget::PipelineInfo&);
+ Cmd* recordClear(GrInOrderDrawBuffer*,
+ const SkIRect* rect,
+ GrColor,
+ bool canIgnoreRect,
+ GrRenderTarget*);
+ Cmd* recordCopySurface(GrInOrderDrawBuffer*,
+ GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint);
+
+protected:
+ void willReserveVertexAndIndexSpace(int vertexCount,
+ size_t vertexStride,
+ int indexCount);
+
+private:
+ friend class GrInOrderDrawBuffer;
+
+ typedef GrGpu::DrawArgs DrawArgs;
+
+ // Attempts to concat instances from info onto the previous draw. info must represent an
+ // instanced draw. The caller must have already recorded a new draw state and clip if necessary.
+ int concatInstancedDraw(GrInOrderDrawBuffer*, const GrDrawTarget::DrawInfo&);
+
+ bool SK_WARN_UNUSED_RESULT setupPipelineAndShouldDraw(GrInOrderDrawBuffer*,
+ const GrPrimitiveProcessor*,
+ const GrDrawTarget::PipelineInfo&);
+ bool SK_WARN_UNUSED_RESULT setupPipelineAndShouldDraw(GrInOrderDrawBuffer*,
+ GrBatch*,
+ const GrDrawTarget::PipelineInfo&);
+
+ struct Draw : public Cmd {
+ Draw(const GrDrawTarget::DrawInfo& info) : Cmd(kDraw_Cmd), fInfo(info) {}
+
+ void execute(GrGpu*, const SetState*) SK_OVERRIDE;
+
+ GrDrawTarget::DrawInfo fInfo;
+ };
+
+ struct StencilPath : public Cmd {
+ StencilPath(const GrPath* path, GrRenderTarget* rt)
+ : Cmd(kStencilPath_Cmd)
+ , fRenderTarget(rt)
+ , fPath(path) {}
+
+ const GrPath* path() const { return fPath.get(); }
+
+ void execute(GrGpu*, const SetState*) SK_OVERRIDE;
+
+ SkMatrix fViewMatrix;
+ bool fUseHWAA;
+ GrStencilSettings fStencil;
+ GrScissorState fScissor;
+ private:
+ GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget;
+ GrPendingIOResource<const GrPath, kRead_GrIOType> fPath;
+ };
+
+ struct DrawPath : public Cmd {
+ DrawPath(const GrPath* path) : Cmd(kDrawPath_Cmd), fPath(path) {}
+
+ const GrPath* path() const { return fPath.get(); }
+
+ void execute(GrGpu*, const SetState*) SK_OVERRIDE;
+
+ GrStencilSettings fStencilSettings;
+
+ private:
+ GrPendingIOResource<const GrPath, kRead_GrIOType> fPath;
+ };
+
+ struct DrawPaths : public Cmd {
+ DrawPaths(const GrPathRange* pathRange) : Cmd(kDrawPaths_Cmd), fPathRange(pathRange) {}
+
+ const GrPathRange* pathRange() const { return fPathRange.get(); }
+
+ void execute(GrGpu*, const SetState*) SK_OVERRIDE;
+
+ char* fIndices;
+ GrDrawTarget::PathIndexType fIndexType;
+ float* fTransforms;
+ GrDrawTarget::PathTransformType fTransformType;
+ int fCount;
+ GrStencilSettings fStencilSettings;
+
+ private:
+ GrPendingIOResource<const GrPathRange, kRead_GrIOType> fPathRange;
+ };
+
+ // This is also used to record a discard by setting the color to GrColor_ILLEGAL
+ struct Clear : public Cmd {
+ Clear(GrRenderTarget* rt) : Cmd(kClear_Cmd), fRenderTarget(rt) {}
+
+ GrRenderTarget* renderTarget() const { return fRenderTarget.get(); }
+
+ void execute(GrGpu*, const SetState*) SK_OVERRIDE;
+
+ SkIRect fRect;
+ GrColor fColor;
+ bool fCanIgnoreRect;
+
+ private:
+ GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget;
+ };
+
+ // This command is ONLY used by the clip mask manager to clear the stencil clip bits
+ struct ClearStencilClip : public Cmd {
+ ClearStencilClip(GrRenderTarget* rt) : Cmd(kClear_Cmd), fRenderTarget(rt) {}
+
+ GrRenderTarget* renderTarget() const { return fRenderTarget.get(); }
+
+ void execute(GrGpu*, const SetState*) SK_OVERRIDE;
+
+ SkIRect fRect;
+ bool fInsideClip;
+
+ private:
+ GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget;
+ };
+
+ struct CopySurface : public Cmd {
+ CopySurface(GrSurface* dst, GrSurface* src) : Cmd(kCopySurface_Cmd), fDst(dst), fSrc(src) {}
+
+ GrSurface* dst() const { return fDst.get(); }
+ GrSurface* src() const { return fSrc.get(); }
+
+ void execute(GrGpu*, const SetState*) SK_OVERRIDE;
+
+ SkIPoint fDstPoint;
+ SkIRect fSrcRect;
+
+ private:
+ GrPendingIOResource<GrSurface, kWrite_GrIOType> fDst;
+ GrPendingIOResource<GrSurface, kRead_GrIOType> fSrc;
+ };
+
+ // TODO: rename to SetPipeline once pp, batch tracker, and desc are removed
+ struct SetState : public Cmd {
+ // TODO get rid of the prim proc parameter when we use batch everywhere
+ SetState(const GrPrimitiveProcessor* primProc = NULL)
+ : Cmd(kSetState_Cmd)
+ , fPrimitiveProcessor(primProc) {}
+
+ ~SetState() { reinterpret_cast<GrPipeline*>(fPipeline.get())->~GrPipeline(); }
+
+ // This function is only for getting the location in memory where we will create our
+ // pipeline object.
+ GrPipeline* pipelineLocation() { return reinterpret_cast<GrPipeline*>(fPipeline.get()); }
+
+ const GrPipeline* getPipeline() const {
+ return reinterpret_cast<const GrPipeline*>(fPipeline.get());
+ }
+
+ void execute(GrGpu*, const SetState*) SK_OVERRIDE;
+
+ typedef GrPendingProgramElement<const GrPrimitiveProcessor> ProgramPrimitiveProcessor;
+ ProgramPrimitiveProcessor fPrimitiveProcessor;
+ SkAlignedSStorage<sizeof(GrPipeline)> fPipeline;
+ GrProgramDesc fDesc;
+ GrBatchTracker fBatchTracker;
+ };
+
+ struct DrawBatch : public Cmd {
+ DrawBatch(GrBatch* batch, GrBatchTarget* batchTarget)
+ : Cmd(kDrawBatch_Cmd)
+ , fBatch(SkRef(batch))
+ , fBatchTarget(batchTarget) {
+ SkASSERT(!batch->isUsed());
+ }
+
+ void execute(GrGpu*, const SetState*) SK_OVERRIDE;
+
+ // TODO it wouldn't be too hard to let batches allocate in the cmd buffer
+ SkAutoTUnref<GrBatch> fBatch;
+
+ private:
+ GrBatchTarget* fBatchTarget;
+ };
+
+ static const int kCmdBufferInitialSizeInBytes = 8 * 1024;
+
+ typedef void* TCmdAlign; // This wouldn't be enough align if a command used long double.
+ typedef GrTRecorder<Cmd, TCmdAlign> CmdBuffer;
+
+ CmdBuffer fCmdBuffer;
+ SetState* fPrevState;
+ GrBatchTarget fBatchTarget;
+ // TODO hack until batch is everywhere
+ GrTargetCommands::DrawBatch* fDrawBatch;
+
+ // This will go away when everything uses batch. However, in the short term anything which
+ // might be put into the GrInOrderDrawBuffer needs to make sure it closes the last batch
+ void closeBatch();
+};
+
+#endif
+