Check for xfer barriers in GrBatch, auto-issue barriers in GrGpu
Review URL: https://codereview.chromium.org/1287973003
diff --git a/src/gpu/GrAAConvexPathRenderer.cpp b/src/gpu/GrAAConvexPathRenderer.cpp
index 9850ddc..3d2ff45 100644
--- a/src/gpu/GrAAConvexPathRenderer.cpp
+++ b/src/gpu/GrAAConvexPathRenderer.cpp
@@ -940,8 +940,9 @@
geometry.fViewMatrix.mapRect(&fBounds);
}
- bool onCombineIfPossible(GrBatch* t) override {
- if (!this->pipeline()->isEqual(*t->pipeline())) {
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
+ caps)) {
return false;
}
diff --git a/src/gpu/GrAADistanceFieldPathRenderer.cpp b/src/gpu/GrAADistanceFieldPathRenderer.cpp
index 1858f00..0428373 100755
--- a/src/gpu/GrAADistanceFieldPathRenderer.cpp
+++ b/src/gpu/GrAADistanceFieldPathRenderer.cpp
@@ -492,8 +492,9 @@
const SkMatrix& viewMatrix() const { return fBatch.fViewMatrix; }
bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
- bool onCombineIfPossible(GrBatch* t) override {
- if (!this->pipeline()->isEqual(*t->pipeline())) {
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
+ caps)) {
return false;
}
diff --git a/src/gpu/GrAAHairLinePathRenderer.cpp b/src/gpu/GrAAHairLinePathRenderer.cpp
index 1a3318b..df0095e 100644
--- a/src/gpu/GrAAHairLinePathRenderer.cpp
+++ b/src/gpu/GrAAHairLinePathRenderer.cpp
@@ -731,8 +731,9 @@
fBounds.outset(0.5f, 0.5f);
}
- bool onCombineIfPossible(GrBatch* t) override {
- if (!this->pipeline()->isEqual(*t->pipeline())) {
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
+ caps)) {
return false;
}
diff --git a/src/gpu/GrAALinearizingConvexPathRenderer.cpp b/src/gpu/GrAALinearizingConvexPathRenderer.cpp
index b4abc01..d1c6355 100644
--- a/src/gpu/GrAALinearizingConvexPathRenderer.cpp
+++ b/src/gpu/GrAALinearizingConvexPathRenderer.cpp
@@ -264,8 +264,9 @@
geometry.fViewMatrix.mapRect(&fBounds);
}
- bool onCombineIfPossible(GrBatch* t) override {
- if (!this->pipeline()->isEqual(*t->pipeline())) {
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
+ caps)) {
return false;
}
diff --git a/src/gpu/GrAtlasTextContext.cpp b/src/gpu/GrAtlasTextContext.cpp
index f33e2f9..4337d81 100644
--- a/src/gpu/GrAtlasTextContext.cpp
+++ b/src/gpu/GrAtlasTextContext.cpp
@@ -1893,8 +1893,9 @@
bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
int numGlyphs() const { return fBatch.fNumGlyphs; }
- bool onCombineIfPossible(GrBatch* t) override {
- if (!this->pipeline()->isEqual(*t->pipeline())) {
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
+ caps)) {
return false;
}
diff --git a/src/gpu/GrBufferedDrawTarget.cpp b/src/gpu/GrBufferedDrawTarget.cpp
index 793ffd1..ef24ce2 100644
--- a/src/gpu/GrBufferedDrawTarget.cpp
+++ b/src/gpu/GrBufferedDrawTarget.cpp
@@ -27,8 +27,7 @@
}
void GrBufferedDrawTarget::onDrawBatch(GrBatch* batch) {
- fCommands->recordXferBarrierIfNecessary(*batch->pipeline(), *this->caps());
- fCommands->recordDrawBatch(batch);
+ fCommands->recordDrawBatch(batch, *this->caps());
}
void GrBufferedDrawTarget::onStencilPath(const GrPipelineBuilder& pipelineBuilder,
@@ -131,12 +130,11 @@
fPrevState->fPrimitiveProcessor->canMakeEqual(fPrevState->fBatchTracker,
*state->fPrimitiveProcessor,
state->fBatchTracker) &&
- fPrevState->getPipeline()->isEqual(*state->getPipeline())) {
+ GrPipeline::AreEqual(*fPrevState->getPipeline(), *state->getPipeline(), false)) {
this->unallocState(state);
} else {
fPrevState.reset(state);
}
- fCommands->recordXferBarrierIfNecessary(*fPrevState->getPipeline(), *this->caps());
return fPrevState;
}
diff --git a/src/gpu/GrCommandBuilder.cpp b/src/gpu/GrCommandBuilder.cpp
index 81e988c..cfd2225 100644
--- a/src/gpu/GrCommandBuilder.cpp
+++ b/src/gpu/GrCommandBuilder.cpp
@@ -65,20 +65,3 @@
GrBATCH_INFO("Recording copysurface %d\n", cs->uniqueID());
return cs;
}
-
-GrTargetCommands::Cmd*
-GrCommandBuilder::recordXferBarrierIfNecessary(const GrPipeline& pipeline,
- const GrCaps& caps) {
- const GrXferProcessor& xp = *pipeline.getXferProcessor();
- GrRenderTarget* rt = pipeline.getRenderTarget();
-
- GrXferBarrierType barrierType;
- if (!xp.willNeedXferBarrier(rt, caps, &barrierType)) {
- return NULL;
- }
-
- XferBarrier* xb = GrNEW_APPEND_TO_RECORDER(*this->cmdBuffer(), XferBarrier, (rt));
- xb->fBarrierType = barrierType;
- GrBATCH_INFO("Recording xfer barrier %d\n", xb->uniqueID());
- return xb;
-}
diff --git a/src/gpu/GrCommandBuilder.h b/src/gpu/GrCommandBuilder.h
index 1b6d5e0..15237fb 100644
--- a/src/gpu/GrCommandBuilder.h
+++ b/src/gpu/GrCommandBuilder.h
@@ -28,7 +28,7 @@
bool insideClip,
GrRenderTarget* renderTarget);
virtual Cmd* recordDiscard(GrRenderTarget*);
- virtual Cmd* recordDrawBatch(GrBatch*) = 0;
+ virtual Cmd* recordDrawBatch(GrBatch*, const GrCaps&) = 0;
virtual Cmd* recordStencilPath(const GrPipelineBuilder&,
const GrPathProcessor*,
const GrPath*,
@@ -56,7 +56,6 @@
GrSurface* src,
const SkIRect& srcRect,
const SkIPoint& dstPoint);
- virtual Cmd* recordXferBarrierIfNecessary(const GrPipeline&, const GrCaps&);
protected:
typedef GrTargetCommands::DrawBatch DrawBatch;
@@ -66,7 +65,6 @@
typedef GrTargetCommands::Clear Clear;
typedef GrTargetCommands::ClearStencilClip ClearStencilClip;
typedef GrTargetCommands::CopySurface CopySurface;
- typedef GrTargetCommands::XferBarrier XferBarrier;
GrCommandBuilder(GrGpu* gpu) : fCommands(gpu) {}
diff --git a/src/gpu/GrDefaultPathRenderer.cpp b/src/gpu/GrDefaultPathRenderer.cpp
index 0678438..50fd20a 100644
--- a/src/gpu/GrDefaultPathRenderer.cpp
+++ b/src/gpu/GrDefaultPathRenderer.cpp
@@ -389,8 +389,9 @@
this->setBounds(devBounds);
}
- bool onCombineIfPossible(GrBatch* t) override {
- if (!this->pipeline()->isEqual(*t->pipeline())) {
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
+ caps)) {
return false;
}
diff --git a/src/gpu/GrGpu.cpp b/src/gpu/GrGpu.cpp
index 64016cf..9c468e7 100644
--- a/src/gpu/GrGpu.cpp
+++ b/src/gpu/GrGpu.cpp
@@ -14,6 +14,7 @@
#include "GrGpuResourcePriv.h"
#include "GrIndexBuffer.h"
#include "GrPathRendering.h"
+#include "GrPipeline.h"
#include "GrResourceCache.h"
#include "GrRenderTargetPriv.h"
#include "GrStencilAttachment.h"
@@ -381,6 +382,10 @@
void GrGpu::draw(const DrawArgs& args, const GrVertices& vertices) {
this->handleDirtyContext();
+ if (GrXferBarrierType barrierType = args.fPipeline->xferBarrierType(*this->caps())) {
+ this->xferBarrier(args.fPipeline->getRenderTarget(), barrierType);
+ }
+
GrVertices::Iterator iter;
const GrNonInstancedVertices* verts = iter.init(vertices);
do {
diff --git a/src/gpu/GrGpu.h b/src/gpu/GrGpu.h
index cc3a92a..69a4ec9 100644
--- a/src/gpu/GrGpu.h
+++ b/src/gpu/GrGpu.h
@@ -299,9 +299,6 @@
const SkIRect& srcRect,
const SkIPoint& dstPoint) = 0;
- // Called before certain draws in order to guarantee coherent results from dst reads.
- virtual void xferBarrier(GrRenderTarget*, GrXferBarrierType) = 0;
-
struct DrawArgs {
DrawArgs(const GrPrimitiveProcessor* primProc,
const GrPipeline* pipeline,
@@ -432,6 +429,9 @@
// assumed 3D context state and dirty any state cache.
virtual void onResetContext(uint32_t resetBits) = 0;
+ // Called before certain draws in order to guarantee coherent results from dst reads.
+ virtual void xferBarrier(GrRenderTarget*, GrXferBarrierType) = 0;
+
// overridden by backend-specific derived class to create objects.
// Texture size and sample size will have already been validated in base class before
// onCreateTexture/CompressedTexture are called.
diff --git a/src/gpu/GrImmediateDrawTarget.cpp b/src/gpu/GrImmediateDrawTarget.cpp
index 8088eae..999446f 100644
--- a/src/gpu/GrImmediateDrawTarget.cpp
+++ b/src/gpu/GrImmediateDrawTarget.cpp
@@ -26,14 +26,6 @@
}
void GrImmediateDrawTarget::onDrawBatch(GrBatch* batch) {
- const GrXferProcessor& xp = *batch->pipeline()->getXferProcessor();
- GrRenderTarget* rt = batch->pipeline()->getRenderTarget();
-
- GrXferBarrierType barrierType;
- if (xp.willNeedXferBarrier(rt, *this->caps(), &barrierType)) {
- this->getGpu()->xferBarrier(rt, barrierType);
- }
-
fBatchTarget.resetNumberOfDraws();
batch->generateGeometry(&fBatchTarget);
diff --git a/src/gpu/GrInOrderCommandBuilder.cpp b/src/gpu/GrInOrderCommandBuilder.cpp
index 89a2502..0fc7eff 100644
--- a/src/gpu/GrInOrderCommandBuilder.cpp
+++ b/src/gpu/GrInOrderCommandBuilder.cpp
@@ -25,12 +25,13 @@
return isWinding;
}
-GrTargetCommands::Cmd* GrInOrderCommandBuilder::recordDrawBatch(GrBatch* batch) {
+GrTargetCommands::Cmd* GrInOrderCommandBuilder::recordDrawBatch(GrBatch* batch,
+ const GrCaps& caps) {
GrBATCH_INFO("In-Recording (%s, %u)\n", batch->name(), batch->uniqueID());
if (!this->cmdBuffer()->empty() &&
Cmd::kDrawBatch_CmdType == this->cmdBuffer()->back().type()) {
DrawBatch* previous = static_cast<DrawBatch*>(&this->cmdBuffer()->back());
- if (previous->batch()->combineIfPossible(batch)) {
+ if (previous->batch()->combineIfPossible(batch, caps)) {
GrBATCH_INFO("\tBatching with (%s, %u)\n",
previous->fBatch->name(), previous->fBatch->uniqueID());
return NULL;
diff --git a/src/gpu/GrInOrderCommandBuilder.h b/src/gpu/GrInOrderCommandBuilder.h
index 1deccfa..b50a0c6 100644
--- a/src/gpu/GrInOrderCommandBuilder.h
+++ b/src/gpu/GrInOrderCommandBuilder.h
@@ -17,7 +17,7 @@
GrInOrderCommandBuilder(GrGpu* gpu) : INHERITED(gpu) { }
- Cmd* recordDrawBatch(GrBatch*) override;
+ Cmd* recordDrawBatch(GrBatch*, const GrCaps&) override;
Cmd* recordStencilPath(const GrPipelineBuilder&,
const GrPathProcessor*,
const GrPath*,
diff --git a/src/gpu/GrOvalRenderer.cpp b/src/gpu/GrOvalRenderer.cpp
index 856b952..fa51bb2 100644
--- a/src/gpu/GrOvalRenderer.cpp
+++ b/src/gpu/GrOvalRenderer.cpp
@@ -733,8 +733,9 @@
this->setBounds(geometry.fDevBounds);
}
- bool onCombineIfPossible(GrBatch* t) override {
- if (!this->pipeline()->isEqual(*t->pipeline())) {
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
+ caps)) {
return false;
}
@@ -956,8 +957,9 @@
this->setBounds(geometry.fDevBounds);
}
- bool onCombineIfPossible(GrBatch* t) override {
- if (!this->pipeline()->isEqual(*t->pipeline())) {
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
+ caps)) {
return false;
}
@@ -1214,8 +1216,9 @@
this->setBounds(bounds);
}
- bool onCombineIfPossible(GrBatch* t) override {
- if (!this->pipeline()->isEqual(*t->pipeline())) {
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
+ caps)) {
return false;
}
@@ -1591,8 +1594,9 @@
this->setBounds(geometry.fDevBounds);
}
- bool onCombineIfPossible(GrBatch* t) override {
- if (!this->pipeline()->isEqual(*t->pipeline())) {
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
+ caps)) {
return false;
}
@@ -1777,8 +1781,9 @@
this->setBounds(geometry.fDevBounds);
}
- bool onCombineIfPossible(GrBatch* t) override {
- if (!this->pipeline()->isEqual(*t->pipeline())) {
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
+ caps)) {
return false;
}
diff --git a/src/gpu/GrPathRendering.h b/src/gpu/GrPathRendering.h
index 527a843..f13a190 100644
--- a/src/gpu/GrPathRendering.h
+++ b/src/gpu/GrPathRendering.h
@@ -11,6 +11,7 @@
#include "SkPath.h"
#include "GrGpu.h"
#include "GrPathRange.h"
+#include "GrPipeline.h"
class SkDescriptor;
class SkTypeface;
@@ -166,6 +167,9 @@
void drawPath(const DrawPathArgs& args, const GrPath* path) {
fGpu->handleDirtyContext();
+ if (GrXferBarrierType barrierType = args.fPipeline->xferBarrierType(*fGpu->caps())) {
+ fGpu->xferBarrier(args.fPipeline->getRenderTarget(), barrierType);
+ }
this->onDrawPath(args, path);
}
@@ -173,10 +177,14 @@
PathIndexType indexType, const float transformValues[],
PathTransformType transformType, int count) {
fGpu->handleDirtyContext();
+ if (GrXferBarrierType barrierType = args.fPipeline->xferBarrierType(*fGpu->caps())) {
+ fGpu->xferBarrier(args.fPipeline->getRenderTarget(), barrierType);
+ }
pathRange->willDrawPaths(indices, indexType, count);
this->onDrawPaths(args, pathRange, indices, indexType, transformValues, transformType,
count);
}
+
protected:
GrPathRendering(GrGpu* gpu)
: fGpu(gpu) {
diff --git a/src/gpu/GrPipeline.cpp b/src/gpu/GrPipeline.cpp
index 3761830..28ef7ba 100644
--- a/src/gpu/GrPipeline.cpp
+++ b/src/gpu/GrPipeline.cpp
@@ -150,28 +150,26 @@
////////////////////////////////////////////////////////////////////////////////
-bool GrPipeline::isEqual(const GrPipeline& that, bool ignoreCoordTransforms) const {
- // If we point to the same pipeline, then we are necessarily equal
- if (this == &that) {
- return true;
- }
+bool GrPipeline::AreEqual(const GrPipeline& a, const GrPipeline& b,
+ bool ignoreCoordTransforms) {
+ SkASSERT(&a != &b);
- if (this->getRenderTarget() != that.getRenderTarget() ||
- this->fFragmentStages.count() != that.fFragmentStages.count() ||
- this->fNumColorStages != that.fNumColorStages ||
- this->fScissorState != that.fScissorState ||
- this->fFlags != that.fFlags ||
- this->fStencilSettings != that.fStencilSettings ||
- this->fDrawFace != that.fDrawFace) {
+ if (a.getRenderTarget() != b.getRenderTarget() ||
+ a.fFragmentStages.count() != b.fFragmentStages.count() ||
+ a.fNumColorStages != b.fNumColorStages ||
+ a.fScissorState != b.fScissorState ||
+ a.fFlags != b.fFlags ||
+ a.fStencilSettings != b.fStencilSettings ||
+ a.fDrawFace != b.fDrawFace) {
return false;
}
- if (!this->getXferProcessor()->isEqual(*that.getXferProcessor())) {
+ if (!a.getXferProcessor()->isEqual(*b.getXferProcessor())) {
return false;
}
- for (int i = 0; i < this->numFragmentStages(); i++) {
- if (!this->getFragmentStage(i).processor()->isEqual(*that.getFragmentStage(i).processor(),
+ for (int i = 0; i < a.numFragmentStages(); i++) {
+ if (!a.getFragmentStage(i).processor()->isEqual(*b.getFragmentStage(i).processor(),
ignoreCoordTransforms)) {
return false;
}
diff --git a/src/gpu/GrPipeline.h b/src/gpu/GrPipeline.h
index ef81d21..ea2ea85 100644
--- a/src/gpu/GrPipeline.h
+++ b/src/gpu/GrPipeline.h
@@ -29,6 +29,9 @@
*/
class GrPipeline : public GrNonAtomicRef {
public:
+ ///////////////////////////////////////////////////////////////////////////
+ /// @name Creation
+
struct CreateArgs {
const GrPipelineBuilder* fPipelineBuilder;
const GrCaps* fCaps;
@@ -41,13 +44,39 @@
/** Creates a pipeline into a pre-allocated buffer */
static GrPipeline* CreateAt(void* memory, const CreateArgs&, GrPipelineOptimizations*);
- /*
+ /// @}
+
+ ///////////////////////////////////////////////////////////////////////////
+ /// @name Comparisons
+
+ /**
* Returns true if these pipelines are equivalent. Coord transforms may be applied either on
* the GPU or the CPU. When we apply them on the CPU then the matrices need not agree in order
* to combine draws. Therefore we take a param that indicates whether coord transforms should be
* compared."
*/
- bool isEqual(const GrPipeline& that, bool ignoreCoordTransforms = false) const;
+ static bool AreEqual(const GrPipeline& a, const GrPipeline& b, bool ignoreCoordTransforms);
+
+ /**
+ * Allows a GrBatch subclass to determine whether two GrBatches can combine. This is a stricter
+ * test than isEqual because it also considers blend barriers when the two batches' bounds
+ * overlap
+ */
+ static bool CanCombine(const GrPipeline& a, const SkRect& aBounds,
+ const GrPipeline& b, const SkRect& bBounds,
+ const GrCaps& caps,
+ bool ignoreCoordTransforms = false) {
+ if (!AreEqual(a, b, ignoreCoordTransforms)) {
+ return false;
+ }
+ if (a.xferBarrierType(caps)) {
+ return aBounds.fRight <= bBounds.fLeft ||
+ aBounds.fBottom <= bBounds.fTop ||
+ bBounds.fRight <= aBounds.fLeft ||
+ bBounds.fBottom <= aBounds.fTop;
+ }
+ return true;
+ }
/// @}
@@ -90,6 +119,10 @@
bool isHWAntialiasState() const { return SkToBool(fFlags & kHWAA_Flag); }
bool snapVerticesToPixelCenters() const { return SkToBool(fFlags & kSnapVertices_Flag); }
+ GrXferBarrierType xferBarrierType(const GrCaps& caps) const {
+ return fXferProcessor->xferBarrierType(fRenderTarget.get(), caps);
+ }
+
/**
* Gets whether the target is drawing clockwise, counterclockwise,
* or both faces.
diff --git a/src/gpu/GrReorderCommandBuilder.cpp b/src/gpu/GrReorderCommandBuilder.cpp
index 29b4500..2da7f85 100644
--- a/src/gpu/GrReorderCommandBuilder.cpp
+++ b/src/gpu/GrReorderCommandBuilder.cpp
@@ -15,7 +15,8 @@
a.fTop < b.fBottom && b.fTop < a.fBottom;
}
-GrTargetCommands::Cmd* GrReorderCommandBuilder::recordDrawBatch(GrBatch* batch) {
+GrTargetCommands::Cmd* GrReorderCommandBuilder::recordDrawBatch(GrBatch* batch,
+ const GrCaps& caps) {
// Check if there is a Batch Draw we can batch with by linearly searching back until we either
// 1) check every draw
// 2) intersect with something
@@ -58,7 +59,7 @@
break;
}
// We cannot continue to search backwards if the render target changes
- if (previous->batch()->combineIfPossible(batch)) {
+ if (previous->batch()->combineIfPossible(batch, caps)) {
GrBATCH_INFO("\t\tCombining with (%s, B%u)\n",
previous->fBatch->name(), previous->fBatch->uniqueID());
return NULL;
diff --git a/src/gpu/GrReorderCommandBuilder.h b/src/gpu/GrReorderCommandBuilder.h
index a1f1721..ee3b084 100644
--- a/src/gpu/GrReorderCommandBuilder.h
+++ b/src/gpu/GrReorderCommandBuilder.h
@@ -17,7 +17,7 @@
GrReorderCommandBuilder(GrGpu* gpu) : INHERITED(gpu) {}
- Cmd* recordDrawBatch(GrBatch*) override;
+ Cmd* recordDrawBatch(GrBatch*, const GrCaps&) override;
Cmd* recordStencilPath(const GrPipelineBuilder&,
const GrPathProcessor*,
const GrPath*,
diff --git a/src/gpu/GrTargetCommands.cpp b/src/gpu/GrTargetCommands.cpp
index c712ac0..474bbbd 100644
--- a/src/gpu/GrTargetCommands.cpp
+++ b/src/gpu/GrTargetCommands.cpp
@@ -75,7 +75,7 @@
fTransformType, fCount);
}
-void GrTargetCommands::DrawBatch::execute(GrGpu*) {
+void GrTargetCommands::DrawBatch::execute(GrGpu* gpu) {
fBatchTarget->flushNext(fBatch->numberOfDraws());
}
@@ -94,7 +94,3 @@
void GrTargetCommands::CopySurface::execute(GrGpu* gpu) {
gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint);
}
-
-void GrTargetCommands::XferBarrier::execute(GrGpu* gpu) {
- gpu->xferBarrier(fRenderTarget.get(), fBarrierType);
-}
diff --git a/src/gpu/GrTargetCommands.h b/src/gpu/GrTargetCommands.h
index a0e6546..62b2650 100644
--- a/src/gpu/GrTargetCommands.h
+++ b/src/gpu/GrTargetCommands.h
@@ -22,9 +22,7 @@
class GrBufferedDrawTarget;
-// TODO: Convert all commands into GrBatch and remove this class. Xferbarrier will just become a
-// batch blocker (when there is overlap) and the xp is responsible for issuing any barrier calls
-// on the backend.
+// TODO: Convert all commands into GrBatch and remove this class.
class GrTargetCommands : ::SkNoncopyable {
public:
GrTargetCommands(GrGpu* gpu)
@@ -42,7 +40,6 @@
kDrawPath_CmdType = 5,
kDrawPaths_CmdType = 6,
kDrawBatch_CmdType = 7,
- kXferBarrier_CmdType = 8,
};
Cmd(CmdType type)
@@ -83,8 +80,6 @@
typedef GrGpu::DrawArgs DrawArgs;
- void recordXferBarrierIfNecessary(const GrPipeline&, GrBufferedDrawTarget*);
-
// TODO: This can be just a pipeline once paths are in batch, and it should live elsewhere
struct StateForPathDraw : public SkNVRefCnt<StateForPathDraw> {
// TODO get rid of the prim proc parameter when we use batch everywhere
@@ -251,20 +246,6 @@
GrBatchTarget* fBatchTarget;
};
- struct XferBarrier : public Cmd {
- XferBarrier(GrRenderTarget* rt)
- : Cmd(kXferBarrier_CmdType)
- , fRenderTarget(rt) {
- }
-
- void execute(GrGpu*) override;
-
- GrXferBarrierType fBarrierType;
-
- private:
- GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget;
- };
-
static const int kCmdBufferInitialSizeInBytes = 8 * 1024;
typedef void* TCmdAlign; // This wouldn't be enough align if a command used long double.
diff --git a/src/gpu/GrTessellatingPathRenderer.cpp b/src/gpu/GrTessellatingPathRenderer.cpp
index 4d23217..4809521 100644
--- a/src/gpu/GrTessellatingPathRenderer.cpp
+++ b/src/gpu/GrTessellatingPathRenderer.cpp
@@ -1576,9 +1576,7 @@
batchTarget->draw(vertices);
}
- bool onCombineIfPossible(GrBatch*) override {
- return false;
- }
+ bool onCombineIfPossible(GrBatch*, const GrCaps&) override { return false; }
private:
TessellatingPathBatch(const GrColor& color,
diff --git a/src/gpu/GrTest.cpp b/src/gpu/GrTest.cpp
index 518ebac..7b6cb5e 100644
--- a/src/gpu/GrTest.cpp
+++ b/src/gpu/GrTest.cpp
@@ -175,11 +175,11 @@
return false;
}
- void xferBarrier(GrRenderTarget*, GrXferBarrierType) override {}
-
private:
void onResetContext(uint32_t resetBits) override {}
+ void xferBarrier(GrRenderTarget*, GrXferBarrierType) override {}
+
GrTexture* onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle,
const void* srcData, size_t rowBytes) override {
return NULL;
diff --git a/src/gpu/GrXferProcessor.cpp b/src/gpu/GrXferProcessor.cpp
index 1ead010..a95445e 100644
--- a/src/gpu/GrXferProcessor.cpp
+++ b/src/gpu/GrXferProcessor.cpp
@@ -93,17 +93,15 @@
this->onGetGLProcessorKey(caps, b);
}
-bool GrXferProcessor::willNeedXferBarrier(const GrRenderTarget* rt,
- const GrCaps& caps,
- GrXferBarrierType* outBarrierType) const {
+GrXferBarrierType GrXferProcessor::xferBarrierType(const GrRenderTarget* rt,
+ const GrCaps& caps) const {
+ SkASSERT(rt);
if (static_cast<const GrSurface*>(rt) == this->getDstTexture()) {
// Texture barriers are required when a shader reads and renders to the same texture.
- SkASSERT(rt);
SkASSERT(caps.textureBarrierSupport());
- *outBarrierType = kTexture_GrXferBarrierType;
- return true;
+ return kTexture_GrXferBarrierType;
}
- return this->onWillNeedXferBarrier(rt, caps, outBarrierType);
+ return this->onXferBarrier(rt, caps);
}
#ifdef SK_DEBUG
diff --git a/src/gpu/batches/GrAAFillRectBatch.cpp b/src/gpu/batches/GrAAFillRectBatch.cpp
index 582d2e8..73a0c4b 100644
--- a/src/gpu/batches/GrAAFillRectBatch.cpp
+++ b/src/gpu/batches/GrAAFillRectBatch.cpp
@@ -130,7 +130,6 @@
args.fDevRect,
canTweakAlphaForCoverage);
}
-
helper.issueDraw(batchTarget);
}
@@ -176,8 +175,9 @@
const SkMatrix& viewMatrix() const { return fGeoData[0].fViewMatrix; }
bool coverageIgnored() const { return fBatch.fCoverageIgnored; }
- bool onCombineIfPossible(GrBatch* t) override {
- if (!this->pipeline()->isEqual(*t->pipeline())) {
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
+ caps)) {
return false;
}
diff --git a/src/gpu/batches/GrAAFillRectBatch.h b/src/gpu/batches/GrAAFillRectBatch.h
index 6ce8f51..9344695 100644
--- a/src/gpu/batches/GrAAFillRectBatch.h
+++ b/src/gpu/batches/GrAAFillRectBatch.h
@@ -15,12 +15,10 @@
struct SkRect;
namespace GrAAFillRectBatch {
-
GrBatch* Create(GrColor color,
const SkMatrix& viewMatrix,
const SkRect& rect,
const SkRect& devRect);
-
};
#endif
diff --git a/src/gpu/batches/GrAAStrokeRectBatch.cpp b/src/gpu/batches/GrAAStrokeRectBatch.cpp
index fa88fd1..0083aaa 100644
--- a/src/gpu/batches/GrAAStrokeRectBatch.cpp
+++ b/src/gpu/batches/GrAAStrokeRectBatch.cpp
@@ -203,8 +203,9 @@
}
}
-bool GrAAStrokeRectBatch::onCombineIfPossible(GrBatch* t) {
- if (!this->pipeline()->isEqual(*t->pipeline())) {
+bool GrAAStrokeRectBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
+ caps)) {
return false;
}
diff --git a/src/gpu/batches/GrAAStrokeRectBatch.h b/src/gpu/batches/GrAAStrokeRectBatch.h
index b1222c9..7fa15f5 100644
--- a/src/gpu/batches/GrAAStrokeRectBatch.h
+++ b/src/gpu/batches/GrAAStrokeRectBatch.h
@@ -78,7 +78,7 @@
bool miterStroke() const { return fBatch.fMiterStroke; }
bool coverageIgnored() const { return fBatch.fCoverageIgnored; }
- bool onCombineIfPossible(GrBatch* t) override;
+ bool onCombineIfPossible(GrBatch* t, const GrCaps&) override;
void generateAAStrokeRectGeometry(void* vertices,
size_t offset,
diff --git a/src/gpu/batches/GrBatch.h b/src/gpu/batches/GrBatch.h
index 8153691..35844cd 100644
--- a/src/gpu/batches/GrBatch.h
+++ b/src/gpu/batches/GrBatch.h
@@ -54,16 +54,14 @@
virtual void getInvariantOutputColor(GrInitInvariantOutput* out) const = 0;
virtual void getInvariantOutputCoverage(GrInitInvariantOutput* out) const = 0;
- bool combineIfPossible(GrBatch* that) {
+ bool combineIfPossible(GrBatch* that, const GrCaps& caps) {
if (this->classID() != that->classID()) {
return false;
}
- return this->onCombineIfPossible(that);
+ return this->onCombineIfPossible(that, caps);
}
- virtual bool onCombineIfPossible(GrBatch*) = 0;
-
virtual void generateGeometry(GrBatchTarget*) = 0;
const SkRect& bounds() const { return fBounds; }
@@ -162,6 +160,8 @@
SkRect fBounds;
private:
+ virtual bool onCombineIfPossible(GrBatch*, const GrCaps& caps) = 0;
+
/*
* initBatchTracker is a hook for the some additional overrides / optimization possibilities
* from the GrXferProcessor.
diff --git a/src/gpu/batches/GrDrawAtlasBatch.cpp b/src/gpu/batches/GrDrawAtlasBatch.cpp
index 9bc7753..ba3551d 100644
--- a/src/gpu/batches/GrDrawAtlasBatch.cpp
+++ b/src/gpu/batches/GrDrawAtlasBatch.cpp
@@ -156,8 +156,9 @@
this->setBounds(bounds);
}
-bool GrDrawAtlasBatch::onCombineIfPossible(GrBatch* t) {
- if (!this->pipeline()->isEqual(*t->pipeline())) {
+bool GrDrawAtlasBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
+ caps)) {
return false;
}
diff --git a/src/gpu/batches/GrDrawAtlasBatch.h b/src/gpu/batches/GrDrawAtlasBatch.h
index bc882b2..e6ce7a0 100644
--- a/src/gpu/batches/GrDrawAtlasBatch.h
+++ b/src/gpu/batches/GrDrawAtlasBatch.h
@@ -56,7 +56,7 @@
int quadCount() const { return fQuadCount; }
bool coverageIgnored() const { return fCoverageIgnored; }
- bool onCombineIfPossible(GrBatch* t) override;
+ bool onCombineIfPossible(GrBatch* t, const GrCaps&) override;
SkSTArray<1, Geometry, true> fGeoData;
SkMatrix fViewMatrix;
diff --git a/src/gpu/batches/GrDrawVerticesBatch.cpp b/src/gpu/batches/GrDrawVerticesBatch.cpp
index 397bb2b..f36c54b 100644
--- a/src/gpu/batches/GrDrawVerticesBatch.cpp
+++ b/src/gpu/batches/GrDrawVerticesBatch.cpp
@@ -183,8 +183,9 @@
batchTarget->draw(vertices);
}
-bool GrDrawVerticesBatch::onCombineIfPossible(GrBatch* t) {
- if (!this->pipeline()->isEqual(*t->pipeline())) {
+bool GrDrawVerticesBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
+ caps)) {
return false;
}
diff --git a/src/gpu/batches/GrDrawVerticesBatch.h b/src/gpu/batches/GrDrawVerticesBatch.h
index a59518a..9446d88 100644
--- a/src/gpu/batches/GrDrawVerticesBatch.h
+++ b/src/gpu/batches/GrDrawVerticesBatch.h
@@ -75,7 +75,7 @@
int indexCount() const { return fBatch.fIndexCount; }
bool coverageIgnored() const { return fBatch.fCoverageIgnored; }
- bool onCombineIfPossible(GrBatch* t) override;
+ bool onCombineIfPossible(GrBatch* t, const GrCaps&) override;
struct BatchTracker {
GrPrimitiveType fPrimitiveType;
diff --git a/src/gpu/batches/GrRectBatch.cpp b/src/gpu/batches/GrRectBatch.cpp
index 1cfa093..1d5b9f3 100644
--- a/src/gpu/batches/GrRectBatch.cpp
+++ b/src/gpu/batches/GrRectBatch.cpp
@@ -80,8 +80,9 @@
helper.issueDraw(batchTarget);
}
-bool GrRectBatch::onCombineIfPossible(GrBatch* t) {
- if (!this->pipeline()->isEqual(*t->pipeline())) {
+bool GrRectBatch::onCombineIfPossible(GrBatch* t, const GrCaps& caps) {
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
+ caps)) {
return false;
}
diff --git a/src/gpu/batches/GrRectBatch.h b/src/gpu/batches/GrRectBatch.h
index aa227d4..3f13ef5 100644
--- a/src/gpu/batches/GrRectBatch.h
+++ b/src/gpu/batches/GrRectBatch.h
@@ -66,7 +66,7 @@
bool hasLocalMatrix() const { return fGeoData[0].fHasLocalMatrix; }
bool coverageIgnored() const { return fBatch.fCoverageIgnored; }
- bool onCombineIfPossible(GrBatch* t) override;
+ bool onCombineIfPossible(GrBatch* t, const GrCaps&) override;
const GrGeometryProcessor* createRectGP();
diff --git a/src/gpu/batches/GrStrokeRectBatch.h b/src/gpu/batches/GrStrokeRectBatch.h
index 3a20d9b..7979a64 100644
--- a/src/gpu/batches/GrStrokeRectBatch.h
+++ b/src/gpu/batches/GrStrokeRectBatch.h
@@ -50,10 +50,11 @@
bool hairline() const { return fBatch.fHairline; }
bool coverageIgnored() const { return fBatch.fCoverageIgnored; }
- bool onCombineIfPossible(GrBatch* t) override {
- //if (!this->pipeline()->isEqual(*t->pipeline())) {
- // return false;
- //}
+ bool onCombineIfPossible(GrBatch* t, const GrCaps&) override {
+ // if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(),
+ // t->bounds(), caps)) {
+ // return false;
+ // }
// GrStrokeRectBatch* that = t->cast<StrokeRectBatch>();
// NonAA stroke rects cannot batch right now
diff --git a/src/gpu/batches/GrTestBatch.h b/src/gpu/batches/GrTestBatch.h
index 55cc5c2..68ea676 100644
--- a/src/gpu/batches/GrTestBatch.h
+++ b/src/gpu/batches/GrTestBatch.h
@@ -66,7 +66,7 @@
virtual Geometry* geoData(int index) = 0;
virtual const Geometry* geoData(int index) const = 0;
- bool onCombineIfPossible(GrBatch* t) override {
+ bool onCombineIfPossible(GrBatch* t, const GrCaps&) override {
return false;
}
diff --git a/src/gpu/effects/GrCustomXfermode.cpp b/src/gpu/effects/GrCustomXfermode.cpp
index 08a1e46..e7023ca 100644
--- a/src/gpu/effects/GrCustomXfermode.cpp
+++ b/src/gpu/effects/GrCustomXfermode.cpp
@@ -552,9 +552,7 @@
void onGetGLProcessorKey(const GrGLSLCaps& caps, GrProcessorKeyBuilder* b) const override;
- bool onWillNeedXferBarrier(const GrRenderTarget* rt,
- const GrCaps& caps,
- GrXferBarrierType* outBarrierType) const override;
+ GrXferBarrierType onXferBarrier(const GrRenderTarget*, const GrCaps&) const override;
void onGetBlendInfo(BlendInfo*) const override;
@@ -757,14 +755,11 @@
return flags;
}
-bool CustomXP::onWillNeedXferBarrier(const GrRenderTarget* rt,
- const GrCaps& caps,
- GrXferBarrierType* outBarrierType) const {
+GrXferBarrierType CustomXP::onXferBarrier(const GrRenderTarget* rt, const GrCaps& caps) const {
if (this->hasHWBlendEquation() && !caps.advancedCoherentBlendEquationSupport()) {
- *outBarrierType = kBlend_GrXferBarrierType;
- return true;
+ return kBlend_GrXferBarrierType;
}
- return false;
+ return kNone_GrXferBarrierType;
}
void CustomXP::onGetBlendInfo(BlendInfo* blendInfo) const {
diff --git a/src/gpu/effects/GrDashingEffect.cpp b/src/gpu/effects/GrDashingEffect.cpp
index 71f4e8d..6a894f9 100644
--- a/src/gpu/effects/GrDashingEffect.cpp
+++ b/src/gpu/effects/GrDashingEffect.cpp
@@ -617,8 +617,9 @@
combinedMatrix.mapRect(&fBounds);
}
- bool onCombineIfPossible(GrBatch* t) override {
- if (!this->pipeline()->isEqual(*t->pipeline())) {
+ bool onCombineIfPossible(GrBatch* t, const GrCaps& caps) override {
+ if (!GrPipeline::CanCombine(*this->pipeline(), this->bounds(), *t->pipeline(), t->bounds(),
+ caps)) {
return false;
}
diff --git a/src/gpu/gl/GrGLGpu.cpp b/src/gpu/gl/GrGLGpu.cpp
index ec95557..0945ed5 100644
--- a/src/gpu/gl/GrGLGpu.cpp
+++ b/src/gpu/gl/GrGLGpu.cpp
@@ -3062,6 +3062,7 @@
}
void GrGLGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType type) {
+ SkASSERT(type);
switch (type) {
case kTexture_GrXferBarrierType: {
GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(rt);
@@ -3080,6 +3081,7 @@
this->caps()->blendEquationSupport());
GL_CALL(BlendBarrier());
return;
+ default: break; // placate compiler warnings that kNone not handled
}
}
diff --git a/src/gpu/gl/GrGLGpu.h b/src/gpu/gl/GrGLGpu.h
index d247a68..5d806a7 100644
--- a/src/gpu/gl/GrGLGpu.h
+++ b/src/gpu/gl/GrGLGpu.h
@@ -96,8 +96,6 @@
const SkIRect& srcRect,
const SkIPoint& dstPoint) override;
- void xferBarrier(GrRenderTarget*, GrXferBarrierType) override;
-
void buildProgramDesc(GrProgramDesc*,
const GrPrimitiveProcessor&,
const GrPipeline&,
@@ -118,6 +116,8 @@
// GrGpu overrides
void onResetContext(uint32_t resetBits) override;
+ void xferBarrier(GrRenderTarget*, GrXferBarrierType) override;
+
GrTexture* onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle,
const void* srcData, size_t rowBytes) override;
GrTexture* onCreateCompressedTexture(const GrSurfaceDesc& desc,