GrBatchPrototype
BUG=skia:
Review URL: https://codereview.chromium.org/845103005
diff --git a/gm/dstreadshuffle.cpp b/gm/dstreadshuffle.cpp
new file mode 100644
index 0000000..b98b10e
--- /dev/null
+++ b/gm/dstreadshuffle.cpp
@@ -0,0 +1,185 @@
+
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "gm.h"
+#include "SkBitmap.h"
+#include "SkRandom.h"
+#include "SkShader.h"
+#include "SkXfermode.h"
+
+namespace skiagm {
+
+/**
+ * Renders overlapping shapes with colorburn against a checkerboard.
+ */
+class DstReadShuffle : public GM {
+public:
+ DstReadShuffle() {
+ this->setBGColor(SkColorSetARGB(0xff, 0xff, 0, 0xff));
+ }
+
+protected:
+ enum ShapeType {
+ kCircle_ShapeType,
+ kRoundRect_ShapeType,
+ kRect_ShapeType,
+ kConvexPath_ShapeType,
+ kConcavePath_ShapeType,
+ kText_ShapeType,
+ kNumShapeTypes
+ };
+
+ SkString onShortName() SK_OVERRIDE {
+ return SkString("dstreadshuffle");
+ }
+
+ SkISize onISize() SK_OVERRIDE {
+ return SkISize::Make(kWidth, kHeight);
+ }
+
+ void drawShape(SkCanvas* canvas,
+ SkPaint* paint,
+ ShapeType type) {
+ static const SkRect kRect = SkRect::MakeXYWH(SkIntToScalar(-50), SkIntToScalar(-50),
+ SkIntToScalar(75), SkIntToScalar(105));
+ switch (type) {
+ case kCircle_ShapeType:
+ canvas->drawCircle(0, 0, 50, *paint);
+ break;
+ case kRoundRect_ShapeType:
+ canvas->drawRoundRect(kRect, SkIntToScalar(10), SkIntToScalar(20), *paint);
+ break;
+ case kRect_ShapeType:
+ canvas->drawRect(kRect, *paint);
+ break;
+ case kConvexPath_ShapeType:
+ if (fConvexPath.isEmpty()) {
+ SkPoint points[4];
+ kRect.toQuad(points);
+ fConvexPath.moveTo(points[0]);
+ fConvexPath.quadTo(points[1], points[2]);
+ fConvexPath.quadTo(points[3], points[0]);
+ SkASSERT(fConvexPath.isConvex());
+ }
+ canvas->drawPath(fConvexPath, *paint);
+ break;
+ case kConcavePath_ShapeType:
+ if (fConcavePath.isEmpty()) {
+ SkPoint points[5] = {{0, SkIntToScalar(-50)} };
+ SkMatrix rot;
+ rot.setRotate(SkIntToScalar(360) / 5);
+ for (int i = 1; i < 5; ++i) {
+ rot.mapPoints(points + i, points + i - 1, 1);
+ }
+ fConcavePath.moveTo(points[0]);
+ for (int i = 0; i < 5; ++i) {
+ fConcavePath.lineTo(points[(2 * i) % 5]);
+ }
+ fConcavePath.setFillType(SkPath::kEvenOdd_FillType);
+ SkASSERT(!fConcavePath.isConvex());
+ }
+ canvas->drawPath(fConcavePath, *paint);
+ break;
+ case kText_ShapeType: {
+ const char* text = "Hello!";
+ paint->setTextSize(30);
+ canvas->drawText(text, strlen(text), 0, 0, *paint);
+ }
+ default:
+ break;
+ }
+ }
+
+ static SkColor GetColor(SkRandom* random, int i) {
+ SkColor color;
+ switch (i) {
+ case 0:
+ color = SK_ColorTRANSPARENT;
+ break;
+ case 1:
+ color = SkColorSetARGB(0xff,
+ random->nextULessThan(256),
+ random->nextULessThan(256),
+ random->nextULessThan(256));
+ break;
+ default:
+ uint8_t alpha = random->nextULessThan(256);
+ color = SkColorSetARGB(alpha,
+ random->nextRangeU(0, alpha),
+ random->nextRangeU(0, alpha),
+ random->nextRangeU(0, alpha));
+ break;
+ }
+ return color;
+ }
+
+ static void SetStyle(SkPaint* p, int style, int width) {
+ switch (style) {
+ case 0:
+ p->setStyle(SkPaint::kStroke_Style);
+ p->setStrokeWidth((SkScalar)width);
+ break;
+ case 1:
+ p->setStyle(SkPaint::kStrokeAndFill_Style);
+ p->setStrokeWidth((SkScalar)width);
+ break;
+ default:
+ p->setStyle(SkPaint::kFill_Style);
+ break;
+ }
+ }
+
+ void onDraw(SkCanvas* canvas) SK_OVERRIDE {
+ SkRandom random;
+ SkScalar y = 100;
+ for (int i = 0; i < kNumShapeTypes; i++) {
+ ShapeType shapeType = static_cast<ShapeType>(i);
+ SkScalar x = 25;
+ for (int style = 0; style < 3; style++) {
+ for (int width = 0; width <= 1; width++) {
+ for (int alpha = 0; alpha <= 2; alpha++) {
+ for (int r = 0; r <= 5; r++) {
+ SkColor color = GetColor(&random, alpha);
+
+ SkPaint p;
+ p.setAntiAlias(true);
+ p.setColor(color);
+ p.setXfermodeMode(r % 3 == 0 ? SkXfermode::kHardLight_Mode :
+ SkXfermode::kSrcOver_Mode);
+ SetStyle(&p, style, width);
+ canvas->save();
+ canvas->translate(x, y);
+ canvas->rotate((SkScalar)(r < 3 ? 10 : 0));
+ this->drawShape(canvas, &p, shapeType);
+ canvas->restore();
+ x += 8;
+ }
+ }
+ }
+ }
+ y += 50;
+ }
+ }
+
+private:
+ enum {
+ kNumShapes = 100,
+ };
+ SkAutoTUnref<SkShader> fBG;
+ SkPath fConcavePath;
+ SkPath fConvexPath;
+ static const int kWidth = 900;
+ static const int kHeight = 400;
+ typedef GM INHERITED;
+};
+
+//////////////////////////////////////////////////////////////////////////////
+
+static GM* MyFactory(void*) { return new DstReadShuffle; }
+static GMRegistry reg(MyFactory);
+
+}
diff --git a/gyp/gmslides.gypi b/gyp/gmslides.gypi
index abcb867..9597ed6 100644
--- a/gyp/gmslides.gypi
+++ b/gyp/gmslides.gypi
@@ -64,6 +64,7 @@
'../gm/copyTo4444.cpp',
'../gm/cubicpaths.cpp',
'../gm/cmykjpeg.cpp',
+ '../gm/dstreadshuffle.cpp',
'../gm/degeneratesegments.cpp',
'../gm/dcshader.cpp',
'../gm/discard.cpp',
@@ -77,6 +78,7 @@
'../gm/drawlooper.cpp',
'../gm/dropshadowimagefilter.cpp',
'../gm/drrect.cpp',
+ '../gm/dstreadshuffle.cpp',
'../gm/etc1bitmap.cpp',
'../gm/extractbitmap.cpp',
'../gm/emboss.cpp',
diff --git a/gyp/gpu.gypi b/gyp/gpu.gypi
index 4110c7e..1fbbec8 100644
--- a/gyp/gpu.gypi
+++ b/gyp/gpu.gypi
@@ -55,6 +55,10 @@
'<(skia_src_path)/gpu/GrAllocator.h',
'<(skia_src_path)/gpu/GrAtlas.cpp',
'<(skia_src_path)/gpu/GrAtlas.h',
+ '<(skia_src_path)/gpu/GrBatch.cpp',
+ '<(skia_src_path)/gpu/GrBatch.h',
+ '<(skia_src_path)/gpu/GrBatchTarget.cpp',
+ '<(skia_src_path)/gpu/GrBatchTarget.h',
'<(skia_src_path)/gpu/GrBitmapTextContext.cpp',
'<(skia_src_path)/gpu/GrBitmapTextContext.h',
'<(skia_src_path)/gpu/GrBlend.cpp',
@@ -82,7 +86,6 @@
'<(skia_src_path)/gpu/GrFontScaler.cpp',
'<(skia_src_path)/gpu/GrFontScaler.h',
'<(skia_src_path)/gpu/GrGeometryBuffer.h',
- '<(skia_src_path)/gpu/GrGeometryData.h',
'<(skia_src_path)/gpu/GrGeometryProcessor.h',
'<(skia_src_path)/gpu/GrGeometryProcessor.cpp',
'<(skia_src_path)/gpu/GrGlyph.h',
diff --git a/src/gpu/GrAAConvexPathRenderer.cpp b/src/gpu/GrAAConvexPathRenderer.cpp
index 031dac3..b301076 100644
--- a/src/gpu/GrAAConvexPathRenderer.cpp
+++ b/src/gpu/GrAAConvexPathRenderer.cpp
@@ -632,7 +632,7 @@
return SkNEW_ARGS(GLProcessor, (*this, bt));
}
- void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE {
+ void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE {
BatchTracker* local = bt->cast<BatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fUsesLocalCoords = init.fUsesLocalCoords;
diff --git a/src/gpu/GrAARectRenderer.cpp b/src/gpu/GrAARectRenderer.cpp
index 0ffdd4d..6866b79 100644
--- a/src/gpu/GrAARectRenderer.cpp
+++ b/src/gpu/GrAARectRenderer.cpp
@@ -6,6 +6,9 @@
*/
#include "GrAARectRenderer.h"
+#include "GrBatch.h"
+#include "GrBatchTarget.h"
+#include "GrBufferAllocPool.h"
#include "GrDefaultGeoProcFactory.h"
#include "GrGeometryProcessor.h"
#include "GrGpu.h"
@@ -17,46 +20,12 @@
///////////////////////////////////////////////////////////////////////////////
-namespace {
-// Should the coverage be multiplied into the color attrib or use a separate attrib.
-enum CoverageAttribType {
- kUseColor_CoverageAttribType,
- kUseCoverage_CoverageAttribType,
-};
-}
-
-static const GrGeometryProcessor* create_rect_gp(const GrPipelineBuilder& pipelineBuilder,
- GrColor color,
- CoverageAttribType* type,
- const SkMatrix& localMatrix) {
- uint32_t flags = GrDefaultGeoProcFactory::kColor_GPType;
- const GrGeometryProcessor* gp;
- if (pipelineBuilder.canTweakAlphaForCoverage()) {
- gp = GrDefaultGeoProcFactory::Create(flags, color, SkMatrix::I(), localMatrix);
- SkASSERT(gp->getVertexStride() == sizeof(GrDefaultGeoProcFactory::PositionColorAttr));
- *type = kUseColor_CoverageAttribType;
- } else {
- flags |= GrDefaultGeoProcFactory::kCoverage_GPType;
- gp = GrDefaultGeoProcFactory::Create(flags, color, SkMatrix::I(), localMatrix,
- GrColorIsOpaque(color));
- SkASSERT(gp->getVertexStride()==sizeof(GrDefaultGeoProcFactory::PositionColorCoverageAttr));
- *type = kUseCoverage_CoverageAttribType;
- }
- return gp;
-}
-
static void set_inset_fan(SkPoint* pts, size_t stride,
const SkRect& r, SkScalar dx, SkScalar dy) {
pts->setRectFan(r.fLeft + dx, r.fTop + dy,
r.fRight - dx, r.fBottom - dy, stride);
}
-void GrAARectRenderer::reset() {
- SkSafeSetNull(fAAFillRectIndexBuffer);
- SkSafeSetNull(fAAMiterStrokeRectIndexBuffer);
- SkSafeSetNull(fAABevelStrokeRectIndexBuffer);
-}
-
static const uint16_t gFillAARectIdx[] = {
0, 1, 5, 5, 4, 0,
1, 2, 6, 6, 5, 1,
@@ -69,6 +38,314 @@
static const int kVertsPerAAFillRect = 8;
static const int kNumAAFillRectsInIndexBuffer = 256;
+static const GrGeometryProcessor* create_fill_rect_gp(bool tweakAlphaForCoverage,
+ const SkMatrix& localMatrix) {
+ uint32_t flags = GrDefaultGeoProcFactory::kColor_GPType;
+ const GrGeometryProcessor* gp;
+ if (tweakAlphaForCoverage) {
+ gp = GrDefaultGeoProcFactory::Create(flags, GrColor_WHITE, SkMatrix::I(), localMatrix,
+ false, 0xff);
+ } else {
+ flags |= GrDefaultGeoProcFactory::kCoverage_GPType;
+ gp = GrDefaultGeoProcFactory::Create(flags, GrColor_WHITE, SkMatrix::I(), localMatrix,
+ false, 0xff);
+ }
+ return gp;
+}
+
+class AAFillRectBatch : public GrBatch {
+public:
+ struct Geometry {
+ GrColor fColor;
+ SkMatrix fViewMatrix;
+ SkRect fRect;
+ SkRect fDevRect;
+ };
+
+ static GrBatch* Create(const Geometry& geometry, const GrIndexBuffer* indexBuffer) {
+ return SkNEW_ARGS(AAFillRectBatch, (geometry, indexBuffer));
+ }
+
+ const char* name() const SK_OVERRIDE { return "AAFillRectBatch"; }
+
+ void getInvariantOutputColor(GrInitInvariantOutput* out) const SK_OVERRIDE {
+ // When this is called on a batch, there is only one geometry bundle
+ if (!this->canTweakAlphaForCoverage() && GrColorIsOpaque(fGeoData[0].fColor)) {
+ out->setUnknownOpaqueFourComponents();
+ } else {
+ out->setUnknownFourComponents();
+ }
+ }
+
+ void getInvariantOutputCoverage(GrInitInvariantOutput* out) const SK_OVERRIDE {
+ if (this->canTweakAlphaForCoverage()) {
+ // uniform coverage
+ out->setKnownSingleComponent(0xff);
+ } else {
+ out->setUnknownSingleComponent();
+ }
+ }
+
+ void initBatchOpt(const GrBatchOpt& batchOpt) {
+ fBatchOpt = batchOpt;
+ }
+
+ void initBatchTracker(const GrPipelineInfo& init) SK_OVERRIDE {
+ // Handle any color overrides
+ if (init.fColorIgnored) {
+ fGeoData[0].fColor = GrColor_ILLEGAL;
+ } else if (GrColor_ILLEGAL != init.fOverrideColor) {
+ fGeoData[0].fColor = init.fOverrideColor;
+ }
+
+ // setup batch properties
+ fBatch.fColorIgnored = init.fColorIgnored;
+ fBatch.fColor = fGeoData[0].fColor;
+ fBatch.fUsesLocalCoords = init.fUsesLocalCoords;
+ fBatch.fCoverageIgnored = init.fCoverageIgnored;
+ }
+
+ void generateGeometry(GrBatchTarget* batchTarget, const GrPipeline* pipeline) SK_OVERRIDE {
+ bool canTweakAlphaForCoverage = this->canTweakAlphaForCoverage();
+
+ SkMatrix localMatrix;
+ if (!this->viewMatrix().invert(&localMatrix)) {
+ SkDebugf("Cannot invert\n");
+ return;
+ }
+
+ const GrGeometryProcessor* gp = create_fill_rect_gp(canTweakAlphaForCoverage,
+ localMatrix);
+
+ batchTarget->initDraw(gp, pipeline);
+ gp->unref();
+
+ // TODO this is hacky, but the only way we have to initialize the GP is to use the
+ // GrPipelineInfo struct so we can generate the correct shader. Once we have GrBatch
+ // everywhere we can remove this nastiness
+ GrPipelineInfo init;
+ init.fColorIgnored = fBatch.fColorIgnored;
+ init.fOverrideColor = GrColor_ILLEGAL;
+ init.fCoverageIgnored = fBatch.fCoverageIgnored;
+ init.fUsesLocalCoords = this->usesLocalCoords();
+ gp->initBatchTracker(batchTarget->currentBatchTracker(), init);
+
+ size_t vertexStride = gp->getVertexStride();
+
+ SkASSERT(canTweakAlphaForCoverage ?
+ vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorAttr) :
+ vertexStride == sizeof(GrDefaultGeoProcFactory::PositionColorCoverageAttr));
+
+ int instanceCount = fGeoData.count();
+ int vertexCount = kVertsPerAAFillRect * instanceCount;
+
+ const GrVertexBuffer* vertexBuffer;
+ int firstVertex;
+
+ void *vertices = batchTarget->vertexPool()->makeSpace(vertexStride,
+ vertexCount,
+ &vertexBuffer,
+ &firstVertex);
+
+ for (int i = 0; i < instanceCount; i++) {
+ const Geometry& args = fGeoData[i];
+ this->generateAAFillRectGeometry(vertices,
+ i * kVertsPerAAFillRect * vertexStride,
+ vertexStride,
+ args.fColor,
+ args.fViewMatrix,
+ args.fRect,
+ args.fDevRect,
+ canTweakAlphaForCoverage);
+ }
+
+ GrDrawTarget::DrawInfo drawInfo;
+ drawInfo.setPrimitiveType(kTriangles_GrPrimitiveType);
+ drawInfo.setStartVertex(0);
+ drawInfo.setStartIndex(0);
+ drawInfo.setVerticesPerInstance(kVertsPerAAFillRect);
+ drawInfo.setIndicesPerInstance(kIndicesPerAAFillRect);
+ drawInfo.adjustStartVertex(firstVertex);
+ drawInfo.setVertexBuffer(vertexBuffer);
+ drawInfo.setIndexBuffer(fIndexBuffer);
+
+ int maxInstancesPerDraw = kNumAAFillRectsInIndexBuffer;
+
+ while (instanceCount) {
+ drawInfo.setInstanceCount(SkTMin(instanceCount, maxInstancesPerDraw));
+ drawInfo.setVertexCount(drawInfo.instanceCount() * drawInfo.verticesPerInstance());
+ drawInfo.setIndexCount(drawInfo.instanceCount() * drawInfo.indicesPerInstance());
+
+ batchTarget->draw(drawInfo);
+
+ drawInfo.setStartVertex(drawInfo.startVertex() + drawInfo.vertexCount());
+ instanceCount -= drawInfo.instanceCount();
+ }
+ }
+
+ SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
+
+private:
+ AAFillRectBatch(const Geometry& geometry, const GrIndexBuffer* indexBuffer)
+ : fIndexBuffer(indexBuffer) {
+ this->initClassID<AAFillRectBatch>();
+ fGeoData.push_back(geometry);
+ }
+
+ GrColor color() const { return fBatch.fColor; }
+ bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
+ bool canTweakAlphaForCoverage() const { return fBatchOpt.fCanTweakAlphaForCoverage; }
+ bool colorIgnored() const { return fBatch.fColorIgnored; }
+ const SkMatrix& viewMatrix() const { return fGeoData[0].fViewMatrix; }
+
+ bool onCombineIfPossible(GrBatch* t) SK_OVERRIDE {
+ AAFillRectBatch* that = t->cast<AAFillRectBatch>();
+ if (this->canTweakAlphaForCoverage() != that->canTweakAlphaForCoverage()) {
+ return false;
+ }
+
+ if (this->colorIgnored() != that->colorIgnored()) {
+ return false;
+ }
+
+ if (this->usesLocalCoords() != that->usesLocalCoords()) {
+ return false;
+ }
+
+ // We apply the viewmatrix to the rect points on the cpu. However, if the pipeline uses
+ // local coords then we won't be able to batch. We could actually upload the viewmatrix
+ // using vertex attributes in these cases, but haven't investigated that
+ if (this->usesLocalCoords() && !this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
+ return false;
+ }
+
+ if (this->color() != that->color()) {
+ fBatch.fColor = GrColor_ILLEGAL;
+ }
+ fGeoData.push_back_n(that->geoData()->count(), that->geoData()->begin());
+ return true;
+ }
+
+ void generateAAFillRectGeometry(void* vertices,
+ uint32_t offset,
+ uint32_t vertexStride,
+ GrColor color,
+ const SkMatrix& viewMatrix,
+ const SkRect& rect,
+ const SkRect& devRect,
+ bool tweakAlphaForCoverage) const {
+ intptr_t verts = reinterpret_cast<intptr_t>(vertices) + offset;
+
+ SkPoint* fan0Pos = reinterpret_cast<SkPoint*>(verts);
+ SkPoint* fan1Pos = reinterpret_cast<SkPoint*>(verts + 4 * vertexStride);
+
+ SkScalar inset = SkMinScalar(devRect.width(), SK_Scalar1);
+ inset = SK_ScalarHalf * SkMinScalar(inset, devRect.height());
+
+ if (viewMatrix.rectStaysRect()) {
+ set_inset_fan(fan0Pos, vertexStride, devRect, -SK_ScalarHalf, -SK_ScalarHalf);
+ set_inset_fan(fan1Pos, vertexStride, devRect, inset, inset);
+ } else {
+ // compute transformed (1, 0) and (0, 1) vectors
+ SkVector vec[2] = {
+ { viewMatrix[SkMatrix::kMScaleX], viewMatrix[SkMatrix::kMSkewY] },
+ { viewMatrix[SkMatrix::kMSkewX], viewMatrix[SkMatrix::kMScaleY] }
+ };
+
+ vec[0].normalize();
+ vec[0].scale(SK_ScalarHalf);
+ vec[1].normalize();
+ vec[1].scale(SK_ScalarHalf);
+
+ // create the rotated rect
+ fan0Pos->setRectFan(rect.fLeft, rect.fTop,
+ rect.fRight, rect.fBottom, vertexStride);
+ viewMatrix.mapPointsWithStride(fan0Pos, vertexStride, 4);
+
+ // Now create the inset points and then outset the original
+ // rotated points
+
+ // TL
+ *((SkPoint*)((intptr_t)fan1Pos + 0 * vertexStride)) =
+ *((SkPoint*)((intptr_t)fan0Pos + 0 * vertexStride)) + vec[0] + vec[1];
+ *((SkPoint*)((intptr_t)fan0Pos + 0 * vertexStride)) -= vec[0] + vec[1];
+ // BL
+ *((SkPoint*)((intptr_t)fan1Pos + 1 * vertexStride)) =
+ *((SkPoint*)((intptr_t)fan0Pos + 1 * vertexStride)) + vec[0] - vec[1];
+ *((SkPoint*)((intptr_t)fan0Pos + 1 * vertexStride)) -= vec[0] - vec[1];
+ // BR
+ *((SkPoint*)((intptr_t)fan1Pos + 2 * vertexStride)) =
+ *((SkPoint*)((intptr_t)fan0Pos + 2 * vertexStride)) - vec[0] - vec[1];
+ *((SkPoint*)((intptr_t)fan0Pos + 2 * vertexStride)) += vec[0] + vec[1];
+ // TR
+ *((SkPoint*)((intptr_t)fan1Pos + 3 * vertexStride)) =
+ *((SkPoint*)((intptr_t)fan0Pos + 3 * vertexStride)) - vec[0] + vec[1];
+ *((SkPoint*)((intptr_t)fan0Pos + 3 * vertexStride)) += vec[0] - vec[1];
+ }
+
+ // Make verts point to vertex color and then set all the color and coverage vertex attrs
+ // values.
+ verts += sizeof(SkPoint);
+ for (int i = 0; i < 4; ++i) {
+ if (tweakAlphaForCoverage) {
+ *reinterpret_cast<GrColor*>(verts + i * vertexStride) = 0;
+ } else {
+ *reinterpret_cast<GrColor*>(verts + i * vertexStride) = color;
+ *reinterpret_cast<float*>(verts + i * vertexStride + sizeof(GrColor)) = 0;
+ }
+ }
+
+ int scale;
+ if (inset < SK_ScalarHalf) {
+ scale = SkScalarFloorToInt(512.0f * inset / (inset + SK_ScalarHalf));
+ SkASSERT(scale >= 0 && scale <= 255);
+ } else {
+ scale = 0xff;
+ }
+
+ verts += 4 * vertexStride;
+
+ float innerCoverage = GrNormalizeByteToFloat(scale);
+ GrColor scaledColor = (0xff == scale) ? color : SkAlphaMulQ(color, scale);
+
+ for (int i = 0; i < 4; ++i) {
+ if (tweakAlphaForCoverage) {
+ *reinterpret_cast<GrColor*>(verts + i * vertexStride) = scaledColor;
+ } else {
+ *reinterpret_cast<GrColor*>(verts + i * vertexStride) = color;
+ *reinterpret_cast<float*>(verts + i * vertexStride +
+ sizeof(GrColor)) = innerCoverage;
+ }
+ }
+ }
+
+ struct BatchTracker {
+ GrColor fColor;
+ bool fUsesLocalCoords;
+ bool fColorIgnored;
+ bool fCoverageIgnored;
+ };
+
+ GrBatchOpt fBatchOpt;
+ BatchTracker fBatch;
+ const GrIndexBuffer* fIndexBuffer;
+ SkSTArray<1, Geometry, true> fGeoData;
+};
+
+namespace {
+// Should the coverage be multiplied into the color attrib or use a separate attrib.
+enum CoverageAttribType {
+ kUseColor_CoverageAttribType,
+ kUseCoverage_CoverageAttribType,
+};
+}
+
+void GrAARectRenderer::reset() {
+ SkSafeSetNull(fAAFillRectIndexBuffer);
+ SkSafeSetNull(fAAMiterStrokeRectIndexBuffer);
+ SkSafeSetNull(fAABevelStrokeRectIndexBuffer);
+}
+
static const uint16_t gMiterStrokeAARectIdx[] = {
0 + 0, 1 + 0, 5 + 0, 5 + 0, 4 + 0, 0 + 0,
1 + 0, 2 + 0, 6 + 0, 6 + 0, 5 + 0, 1 + 0,
@@ -184,135 +461,21 @@
const SkMatrix& viewMatrix,
const SkRect& rect,
const SkRect& devRect) {
- GrPipelineBuilder::AutoRestoreEffects are(pipelineBuilder);
-
- SkMatrix localMatrix;
- if (!viewMatrix.invert(&localMatrix)) {
- SkDebugf("Cannot invert\n");
- return;
- }
-
- CoverageAttribType type;
- SkAutoTUnref<const GrGeometryProcessor> gp(create_rect_gp(*pipelineBuilder, color, &type,
- localMatrix));
-
- size_t vertexStride = gp->getVertexStride();
- GrDrawTarget::AutoReleaseGeometry geo(target, 8, vertexStride, 0);
- if (!geo.succeeded()) {
- SkDebugf("Failed to get space for vertices!\n");
- return;
- }
-
if (NULL == fAAFillRectIndexBuffer) {
fAAFillRectIndexBuffer = fGpu->createInstancedIndexBuffer(gFillAARectIdx,
kIndicesPerAAFillRect,
kNumAAFillRectsInIndexBuffer,
kVertsPerAAFillRect);
}
- GrIndexBuffer* indexBuffer = fAAFillRectIndexBuffer;
- if (NULL == indexBuffer) {
- SkDebugf("Failed to create index buffer!\n");
- return;
- }
- intptr_t verts = reinterpret_cast<intptr_t>(geo.vertices());
+ AAFillRectBatch::Geometry geometry;
+ geometry.fRect = rect;
+ geometry.fViewMatrix = viewMatrix;
+ geometry.fDevRect = devRect;
+ geometry.fColor = color;
- SkPoint* fan0Pos = reinterpret_cast<SkPoint*>(verts);
- SkPoint* fan1Pos = reinterpret_cast<SkPoint*>(verts + 4 * vertexStride);
-
- SkScalar inset = SkMinScalar(devRect.width(), SK_Scalar1);
- inset = SK_ScalarHalf * SkMinScalar(inset, devRect.height());
-
- if (viewMatrix.rectStaysRect()) {
- // Temporarily #if'ed out. We don't want to pass in the devRect but
- // right now it is computed in GrContext::apply_aa_to_rect and we don't
- // want to throw away the work
-#if 0
- SkRect devRect;
- combinedMatrix.mapRect(&devRect, rect);
-#endif
-
- set_inset_fan(fan0Pos, vertexStride, devRect, -SK_ScalarHalf, -SK_ScalarHalf);
- set_inset_fan(fan1Pos, vertexStride, devRect, inset, inset);
- } else {
- // compute transformed (1, 0) and (0, 1) vectors
- SkVector vec[2] = {
- { viewMatrix[SkMatrix::kMScaleX], viewMatrix[SkMatrix::kMSkewY] },
- { viewMatrix[SkMatrix::kMSkewX], viewMatrix[SkMatrix::kMScaleY] }
- };
-
- vec[0].normalize();
- vec[0].scale(SK_ScalarHalf);
- vec[1].normalize();
- vec[1].scale(SK_ScalarHalf);
-
- // create the rotated rect
- fan0Pos->setRectFan(rect.fLeft, rect.fTop,
- rect.fRight, rect.fBottom, vertexStride);
- viewMatrix.mapPointsWithStride(fan0Pos, vertexStride, 4);
-
- // Now create the inset points and then outset the original
- // rotated points
-
- // TL
- *((SkPoint*)((intptr_t)fan1Pos + 0 * vertexStride)) =
- *((SkPoint*)((intptr_t)fan0Pos + 0 * vertexStride)) + vec[0] + vec[1];
- *((SkPoint*)((intptr_t)fan0Pos + 0 * vertexStride)) -= vec[0] + vec[1];
- // BL
- *((SkPoint*)((intptr_t)fan1Pos + 1 * vertexStride)) =
- *((SkPoint*)((intptr_t)fan0Pos + 1 * vertexStride)) + vec[0] - vec[1];
- *((SkPoint*)((intptr_t)fan0Pos + 1 * vertexStride)) -= vec[0] - vec[1];
- // BR
- *((SkPoint*)((intptr_t)fan1Pos + 2 * vertexStride)) =
- *((SkPoint*)((intptr_t)fan0Pos + 2 * vertexStride)) - vec[0] - vec[1];
- *((SkPoint*)((intptr_t)fan0Pos + 2 * vertexStride)) += vec[0] + vec[1];
- // TR
- *((SkPoint*)((intptr_t)fan1Pos + 3 * vertexStride)) =
- *((SkPoint*)((intptr_t)fan0Pos + 3 * vertexStride)) - vec[0] + vec[1];
- *((SkPoint*)((intptr_t)fan0Pos + 3 * vertexStride)) += vec[0] - vec[1];
- }
-
- // Make verts point to vertex color and then set all the color and coverage vertex attrs values.
- verts += sizeof(SkPoint);
- for (int i = 0; i < 4; ++i) {
- if (kUseCoverage_CoverageAttribType == type) {
- *reinterpret_cast<GrColor*>(verts + i * vertexStride) = color;
- *reinterpret_cast<float*>(verts + i * vertexStride + sizeof(GrColor)) = 0;
- } else {
- *reinterpret_cast<GrColor*>(verts + i * vertexStride) = 0;
- }
- }
-
- int scale;
- if (inset < SK_ScalarHalf) {
- scale = SkScalarFloorToInt(512.0f * inset / (inset + SK_ScalarHalf));
- SkASSERT(scale >= 0 && scale <= 255);
- } else {
- scale = 0xff;
- }
-
- verts += 4 * vertexStride;
-
- float innerCoverage = GrNormalizeByteToFloat(scale);
- GrColor scaledColor = (0xff == scale) ? color : SkAlphaMulQ(color, scale);
-
- for (int i = 0; i < 4; ++i) {
- if (kUseCoverage_CoverageAttribType == type) {
- *reinterpret_cast<GrColor*>(verts + i * vertexStride) = color;
- *reinterpret_cast<float*>(verts + i * vertexStride + sizeof(GrColor)) = innerCoverage;
- } else {
- *reinterpret_cast<GrColor*>(verts + i * vertexStride) = scaledColor;
- }
- }
-
- target->setIndexSourceToBuffer(indexBuffer);
- target->drawIndexedInstances(pipelineBuilder,
- gp,
- kTriangles_GrPrimitiveType,
- 1,
- kVertsPerAAFillRect,
- kIndicesPerAAFillRect);
- target->resetIndexSource();
+ SkAutoTUnref<GrBatch> batch(AAFillRectBatch::Create(geometry, fAAFillRectIndexBuffer));
+ target->drawBatch(pipelineBuilder, batch, &devRect);
}
void GrAARectRenderer::strokeAARect(GrDrawTarget* target,
@@ -382,10 +545,31 @@
devOutsideAssist.outset(0, ry);
}
- this->geometryStrokeAARect(target, pipelineBuilder, color, viewMatrix, devOutside, devOutsideAssist,
- devInside, miterStroke);
+ this->geometryStrokeAARect(target, pipelineBuilder, color, viewMatrix, devOutside,
+ devOutsideAssist, devInside, miterStroke);
}
+static const GrGeometryProcessor* create_rect_gp(const GrPipelineBuilder& pipelneBuilder,
+ GrColor color,
+ CoverageAttribType* type,
+ const SkMatrix& localMatrix) {
+ uint32_t flags = GrDefaultGeoProcFactory::kColor_GPType;
+ const GrGeometryProcessor* gp;
+ if (pipelneBuilder.canTweakAlphaForCoverage()) {
+ gp = GrDefaultGeoProcFactory::Create(flags, color, SkMatrix::I(), localMatrix);
+ SkASSERT(gp->getVertexStride() == sizeof(GrDefaultGeoProcFactory::PositionColorAttr));
+ *type = kUseColor_CoverageAttribType;
+ } else {
+ flags |= GrDefaultGeoProcFactory::kCoverage_GPType;
+ gp = GrDefaultGeoProcFactory::Create(flags, color, SkMatrix::I(), localMatrix,
+ GrColorIsOpaque(color));
+ SkASSERT(gp->getVertexStride()==sizeof(GrDefaultGeoProcFactory::PositionColorCoverageAttr));
+ *type = kUseCoverage_CoverageAttribType;
+ }
+ return gp;
+}
+
+
void GrAARectRenderer::geometryStrokeAARect(GrDrawTarget* target,
GrPipelineBuilder* pipelineBuilder,
GrColor color,
@@ -394,8 +578,6 @@
const SkRect& devOutsideAssist,
const SkRect& devInside,
bool miterStroke) {
- GrPipelineBuilder::AutoRestoreEffects are(pipelineBuilder);
-
SkMatrix localMatrix;
if (!viewMatrix.invert(&localMatrix)) {
SkDebugf("Cannot invert\n");
diff --git a/src/gpu/GrBatch.cpp b/src/gpu/GrBatch.cpp
new file mode 100644
index 0000000..e1650a6
--- /dev/null
+++ b/src/gpu/GrBatch.cpp
@@ -0,0 +1,35 @@
+#include "GrBatch.h"
+
+#include "GrMemoryPool.h"
+#include "SkTLS.h"
+
+// TODO I noticed a small benefit to using a larger exclusive pool for batches. Its very small,
+// but seems to be mostly consistent. There is a lot in flux right now, but we should really
+// revisit this when batch is everywhere
+
+class GrBatch_Globals {
+public:
+ static GrMemoryPool* GetTLS() {
+ return (GrMemoryPool*)SkTLS::Get(CreateTLS, DeleteTLS);
+ }
+
+private:
+ static void* CreateTLS() {
+ return SkNEW_ARGS(GrMemoryPool, (16384, 16384));
+ }
+
+ static void DeleteTLS(void* pool) {
+ SkDELETE(reinterpret_cast<GrMemoryPool*>(pool));
+ }
+};
+
+int32_t GrBatch::gCurrBatchClassID =
+ GrBatch::kIllegalBatchClassID;
+
+void* GrBatch::operator new(size_t size) {
+ return GrBatch_Globals::GetTLS()->allocate(size);
+}
+
+void GrBatch::operator delete(void* target) {
+ GrBatch_Globals::GetTLS()->release(target);
+}
diff --git a/src/gpu/GrBatch.h b/src/gpu/GrBatch.h
new file mode 100644
index 0000000..ceb2c5c
--- /dev/null
+++ b/src/gpu/GrBatch.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBatch_DEFINED
+#define GrBatch_DEFINED
+
+#include <new>
+// TODO remove this header when we move entirely to batch
+#include "GrGeometryProcessor.h"
+#include "SkRefCnt.h"
+#include "SkThread.h"
+#include "SkTypes.h"
+
+class GrBatchTarget;
+class GrGpu;
+class GrIndexBufferAllocPool;
+class GrPipeline;
+class GrVertexBufferAllocPool;
+
+struct GrInitInvariantOutput;
+
+/*
+ * GrBatch is the base class for all Ganesh deferred geometry generators. To facilitate
+ * reorderable batching, Ganesh does not generate geometry inline with draw calls. Instead, it
+ * captures the arguments to the draw and then generates the geometry on demand. This gives GrBatch
+ * subclasses complete freedom to decide how / what they can batch.
+ *
+ * Batches are created when GrContext processes a draw call. Batches of the same subclass may be
+ * merged using combineIfPossible. When two batches merge, one takes on the union of the data
+ * and the other is left empty. The merged batch becomes responsible for drawing the data from both
+ * the original batches.
+ *
+ * If there are any possible optimizations which might require knowing more about the full state of
+ * the draw, ie whether or not the GrBatch is allowed to tweak alpha for coverage, then this
+ * information will be communicated to the GrBatch prior to geometry generation.
+ */
+
+struct GrBatchOpt {
+ bool fCanTweakAlphaForCoverage;
+};
+
+class GrBatch : public SkRefCnt {
+public:
+ SK_DECLARE_INST_COUNT(GrBatch)
+ GrBatch() { SkDEBUGCODE(fUsed = false;) }
+ virtual ~GrBatch() {}
+
+ virtual const char* name() const = 0;
+ virtual void getInvariantOutputColor(GrInitInvariantOutput* out) const = 0;
+ virtual void getInvariantOutputCoverage(GrInitInvariantOutput* out) const = 0;
+
+ /*
+ * initBatchOpt is used to communicate possible optimizations to the GrBatch. initBatchTracker
+ * is a hook for the some additional overrides from the GrXferProcessor. This is a bit
+ * confusing but has to be like this until GrBatch is everywhere.
+ *
+ * TODO combine to a single init call when GrBatch is everywhere.
+ */
+ virtual void initBatchOpt(const GrBatchOpt&) = 0;
+ virtual void initBatchTracker(const GrPipelineInfo& init) = 0;
+
+ bool combineIfPossible(GrBatch* that) {
+ if (this->classID() != that->classID()) {
+ return false;
+ }
+
+ return onCombineIfPossible(that);
+ }
+
+ virtual bool onCombineIfPossible(GrBatch*) = 0;
+
+ virtual void generateGeometry(GrBatchTarget*, const GrPipeline*) = 0;
+
+ void* operator new(size_t size);
+ void operator delete(void* target);
+
+ void* operator new(size_t size, void* placement) {
+ return ::operator new(size, placement);
+ }
+ void operator delete(void* target, void* placement) {
+ ::operator delete(target, placement);
+ }
+
+ /**
+ * Helper for down-casting to a GrBatch subclass
+ */
+ template <typename T> const T& cast() const { return *static_cast<const T*>(this); }
+ template <typename T> T* cast() { return static_cast<T*>(this); }
+
+ uint32_t classID() const { SkASSERT(kIllegalBatchClassID != fClassID); return fClassID; }
+
+ // TODO no GrPrimitiveProcessors yet read fragment position
+ bool willReadFragmentPosition() const { return false; }
+
+ SkDEBUGCODE(bool isUsed() const { return fUsed; })
+
+protected:
+ template <typename PROC_SUBCLASS> void initClassID() {
+ static uint32_t kClassID = GenClassID();
+ fClassID = kClassID;
+ }
+
+ uint32_t fClassID;
+
+private:
+ static uint32_t GenClassID() {
+ // fCurrProcessorClassID has been initialized to kIllegalProcessorClassID. The
+ // atomic inc returns the old value not the incremented value. So we add
+ // 1 to the returned value.
+ uint32_t id = static_cast<uint32_t>(sk_atomic_inc(&gCurrBatchClassID)) + 1;
+ if (!id) {
+ SkFAIL("This should never wrap as it should only be called once for each GrBatch "
+ "subclass.");
+ }
+ return id;
+ }
+
+ enum {
+ kIllegalBatchClassID = 0,
+ };
+ static int32_t gCurrBatchClassID;
+
+ SkDEBUGCODE(bool fUsed;)
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/src/gpu/GrBatchTarget.cpp b/src/gpu/GrBatchTarget.cpp
new file mode 100644
index 0000000..1ae8b63
--- /dev/null
+++ b/src/gpu/GrBatchTarget.cpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrBatchTarget.h"
+
+#include "GrBufferAllocPool.h"
+#include "GrPipeline.h"
+
+void GrBatchTarget::flush() {
+ FlushBuffer::Iter iter(fFlushBuffer);
+ fVertexPool->unmap();
+ fIndexPool->unmap();
+
+ while (iter.next()) {
+ GrProgramDesc desc;
+ BufferedFlush* bf = iter.get();
+ const GrPipeline* pipeline = bf->fPipeline;
+ const GrPrimitiveProcessor* primProc = bf->fPrimitiveProcessor.get();
+ fGpu->buildProgramDesc(&desc, *primProc, *pipeline, pipeline->descInfo(),
+ bf->fBatchTracker);
+
+ GrGpu::DrawArgs args(primProc, pipeline, &desc, &bf->fBatchTracker);
+ for (int i = 0; i < bf->fDraws.count(); i++) {
+ fGpu->draw(args, bf->fDraws[i]);
+ }
+ }
+ fFlushBuffer.reset();
+}
diff --git a/src/gpu/GrBatchTarget.h b/src/gpu/GrBatchTarget.h
new file mode 100644
index 0000000..82dad45
--- /dev/null
+++ b/src/gpu/GrBatchTarget.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrBatchBuffer_DEFINED
+#define GrBatchBuffer_DEFINED
+
+#include "GrPendingProgramElement.h"
+#include "GrGpu.h"
+#include "GrTRecorder.h"
+
+/*
+ * GrBatch instances use this object to allocate space for their geometry and to issue the draws
+ * that render their batch.
+ */
+
+class GrBatchTarget : public SkNoncopyable {
+public:
+ GrBatchTarget(GrGpu* gpu,
+ GrVertexBufferAllocPool* vpool,
+ GrIndexBufferAllocPool* ipool)
+ : fGpu(gpu)
+ , fVertexPool(vpool)
+ , fIndexPool(ipool)
+ , fFlushBuffer(kFlushBufferInitialSizeInBytes) {}
+
+ typedef GrDrawTarget::DrawInfo DrawInfo;
+ void initDraw(const GrPrimitiveProcessor* primProc, const GrPipeline* pipeline) {
+ GrNEW_APPEND_TO_RECORDER(fFlushBuffer, BufferedFlush, (primProc, pipeline));
+ }
+
+ void draw(const GrDrawTarget::DrawInfo& draw) {
+ fFlushBuffer.back().fDraws.push_back(draw);
+ }
+ void flush();
+
+ // TODO This goes away when everything uses batch
+ GrBatchTracker* currentBatchTracker() {
+ SkASSERT(!fFlushBuffer.empty());
+ return &fFlushBuffer.back().fBatchTracker;
+ }
+
+ GrVertexBufferAllocPool* vertexPool() { return fVertexPool; }
+ GrIndexBufferAllocPool* indexPool() { return fIndexPool; }
+
+private:
+ GrGpu* fGpu;
+ GrVertexBufferAllocPool* fVertexPool;
+ GrIndexBufferAllocPool* fIndexPool;
+
+ typedef void* TBufferAlign; // This wouldn't be enough align if a command used long double.
+
+ struct BufferedFlush {
+ BufferedFlush(const GrPrimitiveProcessor* primProc, const GrPipeline* pipeline)
+ : fPrimitiveProcessor(primProc)
+ , fPipeline(pipeline)
+ , fDraws(kDrawRecorderInitialSizeInBytes) {}
+ typedef GrPendingProgramElement<const GrPrimitiveProcessor> ProgramPrimitiveProcessor;
+ ProgramPrimitiveProcessor fPrimitiveProcessor;
+ const GrPipeline* fPipeline;
+ GrBatchTracker fBatchTracker;
+ SkSTArray<4, DrawInfo, true> fDraws;
+ };
+
+ enum {
+ kFlushBufferInitialSizeInBytes = 8 * sizeof(BufferedFlush),
+ kDrawRecorderInitialSizeInBytes = 8 * sizeof(DrawInfo),
+ };
+
+ typedef GrTRecorder<BufferedFlush, TBufferAlign> FlushBuffer;
+
+ FlushBuffer fFlushBuffer;
+};
+
+#endif
diff --git a/src/gpu/GrDefaultGeoProcFactory.cpp b/src/gpu/GrDefaultGeoProcFactory.cpp
index 20ec665..c0e8ea7 100644
--- a/src/gpu/GrDefaultGeoProcFactory.cpp
+++ b/src/gpu/GrDefaultGeoProcFactory.cpp
@@ -42,7 +42,7 @@
const Attribute* inCoverage() const { return fInCoverage; }
uint8_t coverage() const { return fCoverage; }
- void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE {
+ void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE {
BatchTracker* local = bt->cast<BatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init,
SkToBool(fInColor));
diff --git a/src/gpu/GrDrawTarget.cpp b/src/gpu/GrDrawTarget.cpp
index e8a758d..b89d70e 100644
--- a/src/gpu/GrDrawTarget.cpp
+++ b/src/gpu/GrDrawTarget.cpp
@@ -6,9 +6,9 @@
* found in the LICENSE file.
*/
-
-
#include "GrDrawTarget.h"
+
+#include "GrBatch.h"
#include "GrContext.h"
#include "GrDrawTargetCaps.h"
#include "GrPath.h"
@@ -526,6 +526,29 @@
}
}
+
+void GrDrawTarget::drawBatch(GrPipelineBuilder* pipelineBuilder,
+ GrBatch* batch,
+ const SkRect* devBounds) {
+ SkASSERT(pipelineBuilder);
+ // TODO some kind of checkdraw, but not at this level
+
+ // Setup clip
+ GrScissorState scissorState;
+ GrPipelineBuilder::AutoRestoreEffects are;
+ GrPipelineBuilder::AutoRestoreStencil ars;
+ if (!this->setupClip(pipelineBuilder, &are, &ars, &scissorState, devBounds)) {
+ return;
+ }
+
+ GrDeviceCoordTexture dstCopy;
+ if (!this->setupDstReadIfNecessary(pipelineBuilder, &dstCopy, devBounds)) {
+ return;
+ }
+
+ this->onDrawBatch(batch, *pipelineBuilder, scissorState, dstCopy.texture() ? &dstCopy : NULL);
+}
+
static const GrStencilSettings& winding_path_stencil_settings() {
GR_STATIC_CONST_SAME_STENCIL_STRUCT(gSettings,
kIncClamp_StencilOp,
diff --git a/src/gpu/GrDrawTarget.h b/src/gpu/GrDrawTarget.h
index b66f1c6..18265c8 100644
--- a/src/gpu/GrDrawTarget.h
+++ b/src/gpu/GrDrawTarget.h
@@ -26,6 +26,7 @@
#include "SkTypes.h"
#include "SkXfermode.h"
+class GrBatch;
class GrClipData;
class GrDrawTargetCaps;
class GrPath;
@@ -259,6 +260,11 @@
int vertexCount,
const SkRect* devBounds = NULL);
+ // TODO devbounds should live on the batch
+ void drawBatch(GrPipelineBuilder*,
+ GrBatch*,
+ const SkRect* devBounds = NULL);
+
/**
* Draws path into the stencil buffer. The fill must be either even/odd or
* winding (not inverse or hairline). It will respect the HW antialias flag
@@ -310,14 +316,14 @@
* that rectangle before it is input to GrCoordTransforms that read local
* coordinates
*/
- void drawRect(GrPipelineBuilder* ds,
+ void drawRect(GrPipelineBuilder* pipelineBuilder,
GrColor color,
const SkMatrix& viewMatrix,
const SkRect& rect,
const SkRect* localRect,
const SkMatrix* localMatrix) {
AutoGeometryPush agp(this);
- this->onDrawRect(ds, color, viewMatrix, rect, localRect, localMatrix);
+ this->onDrawRect(pipelineBuilder, color, viewMatrix, rect, localRect, localMatrix);
}
/**
@@ -527,6 +533,7 @@
*/
class DrawInfo {
public:
+ DrawInfo() { fDevBounds = NULL; }
DrawInfo(const DrawInfo& di) { (*this) = di; }
DrawInfo& operator =(const DrawInfo& di);
@@ -539,6 +546,15 @@
int indicesPerInstance() const { return fIndicesPerInstance; }
int instanceCount() const { return fInstanceCount; }
+ void setPrimitiveType(GrPrimitiveType type) { fPrimitiveType = type; }
+ void setStartVertex(int startVertex) { fStartVertex = startVertex; }
+ void setStartIndex(int startIndex) { fStartIndex = startIndex; }
+ void setVertexCount(int vertexCount) { fVertexCount = vertexCount; }
+ void setIndexCount(int indexCount) { fIndexCount = indexCount; }
+ void setVerticesPerInstance(int verticesPerI) { fVerticesPerInstance = verticesPerI; }
+ void setIndicesPerInstance(int indicesPerI) { fIndicesPerInstance = indicesPerI; }
+ void setInstanceCount(int instanceCount) { fInstanceCount = instanceCount; }
+
bool isIndexed() const { return fIndexCount > 0; }
#ifdef SK_DEBUG
bool isInstanced() const; // this version is longer because of asserts
@@ -568,8 +584,6 @@
const SkRect* getDevBounds() const { return fDevBounds; }
private:
- DrawInfo() { fDevBounds = NULL; }
-
friend class GrDrawTarget;
GrPrimitiveType fPrimitiveType;
@@ -708,6 +722,10 @@
const DrawInfo&,
const GrScissorState&,
const GrDeviceCoordTexture* dstCopy) = 0;
+ virtual void onDrawBatch(GrBatch*,
+ const GrPipelineBuilder&,
+ const GrScissorState&,
+ const GrDeviceCoordTexture* dstCopy) = 0;
// TODO copy in order drawbuffer onDrawRect to here
virtual void onDrawRect(GrPipelineBuilder*,
GrColor color,
diff --git a/src/gpu/GrFlushToGpuDrawTarget.h b/src/gpu/GrFlushToGpuDrawTarget.h
index 2383c62..fa06ed9 100644
--- a/src/gpu/GrFlushToGpuDrawTarget.h
+++ b/src/gpu/GrFlushToGpuDrawTarget.h
@@ -46,6 +46,9 @@
GrGpu* getGpu() { return fGpu; }
const GrGpu* getGpu() const{ return fGpu; }
+ GrVertexBufferAllocPool* getVertexAllocPool() { return fVertexPool; }
+ GrIndexBufferAllocPool* getIndexAllocPool() { return fIndexPool; }
+
private:
enum {
kGeoPoolStatePreAllocCnt = 4,
diff --git a/src/gpu/GrGeometryData.h b/src/gpu/GrGeometryData.h
deleted file mode 100644
index a4e9fe6..0000000
--- a/src/gpu/GrGeometryData.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright 2014 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef GrGeometryData_DEFINED
-#define GrGeometryData_DEFINED
-
-#include <new>
-#include "SkTypes.h"
-
-/*
- * A super lightweight base class for GeometryProcessor's to use to store draw data in a reorderable
- * fashion. Its most important feature is a pool allocator. Its virtual, but only so subclasses
- * will have their destructors called.
- */
-
-class GrGeometryData : SkNoncopyable {
-public:
- virtual ~GrGeometryData() {}
-
- /**
- * Helper for down-casting to a GrGeometryData subclass
- */
- template <typename T> const T& cast() const { return *static_cast<const T*>(this); }
-
- void* operator new(size_t size);
-
- void operator delete(void* target);
-
- void* operator new(size_t size, void* placement) {
- return ::operator new(size, placement);
- }
-
- void operator delete(void* target, void* placement) {
- ::operator delete(target, placement);
- }
-};
-
-#endif
diff --git a/src/gpu/GrGeometryProcessor.cpp b/src/gpu/GrGeometryProcessor.cpp
index 75e6ed8..c707efc 100644
--- a/src/gpu/GrGeometryProcessor.cpp
+++ b/src/gpu/GrGeometryProcessor.cpp
@@ -516,7 +516,7 @@
out->setKnownSingleComponent(0xff);
}
-void GrPathProcessor::initBatchTracker(GrBatchTracker* bt, const InitBT& init) const {
+void GrPathProcessor::initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const {
PathBatchTracker* local = bt->cast<PathBatchTracker>();
if (init.fColorIgnored) {
local->fInputColorType = kIgnored_GrGPInput;
diff --git a/src/gpu/GrGeometryProcessor.h b/src/gpu/GrGeometryProcessor.h
index c55b9af..a7fea76 100644
--- a/src/gpu/GrGeometryProcessor.h
+++ b/src/gpu/GrGeometryProcessor.h
@@ -9,7 +9,6 @@
#define GrGeometryProcessor_DEFINED
#include "GrColor.h"
-#include "GrGeometryData.h"
#include "GrProcessor.h"
#include "GrShaderVar.h"
@@ -46,6 +45,8 @@
/*
* A struct for tracking batching decisions. While this lives on GrOptState, it is managed
* entirely by the derived classes of the GP.
+ * // TODO this was an early attempt at handling out of order batching. It should be
+ * used carefully as it is being replaced by GrBatch
*/
class GrBatchTracker {
public:
@@ -65,12 +66,24 @@
SkAlignedSStorage<kMaxSize> fData;
};
+class GrIndexBufferAllocPool;
class GrGLCaps;
class GrGLPrimitiveProcessor;
-class GrOptDrawState;
+class GrVertexBufferAllocPool;
struct GrInitInvariantOutput;
+/*
+ * This struct allows the GrPipeline to communicate information about the pipeline. Most of this
+ * is overrides, but some of it is general information. Logically it should live in GrPipeline.h,
+ * but this is problematic due to circular dependencies.
+ */
+struct GrPipelineInfo {
+ bool fColorIgnored;
+ bool fCoverageIgnored;
+ GrColor fOverrideColor;
+ bool fUsesLocalCoords;
+};
/*
* This enum is shared by GrPrimitiveProcessors and GrGLPrimitiveProcessors to coordinate shaders
@@ -95,17 +108,7 @@
const SkMatrix& viewMatrix() const { return fViewMatrix; }
const SkMatrix& localMatrix() const { return fLocalMatrix; }
- /*
- * This struct allows the optstate to communicate requirements to the GrPrimitiveProcessor.
- */
- struct InitBT {
- bool fColorIgnored;
- bool fCoverageIgnored;
- GrColor fOverrideColor;
- bool fUsesLocalCoords;
- };
-
- virtual void initBatchTracker(GrBatchTracker*, const InitBT&) const = 0;
+ virtual void initBatchTracker(GrBatchTracker*, const GrPipelineInfo&) const = 0;
virtual bool canMakeEqual(const GrBatchTracker& mine,
const GrPrimitiveProcessor& that,
@@ -304,7 +307,8 @@
* TODO this function changes quite a bit with deferred geometry. There the GrGeometryProcessor
* can upload a new color via attribute if needed.
*/
- static GrGPInput GetColorInputType(GrColor* color, GrColor primitiveColor, const InitBT& init,
+ static GrGPInput GetColorInputType(GrColor* color, GrColor primitiveColor,
+ const GrPipelineInfo& init,
bool hasVertexColor) {
if (init.fColorIgnored) {
*color = GrColor_ILLEGAL;
@@ -378,7 +382,7 @@
return SkNEW_ARGS(GrPathProcessor, (color, viewMatrix, localMatrix));
}
- void initBatchTracker(GrBatchTracker*, const InitBT&) const SK_OVERRIDE;
+ void initBatchTracker(GrBatchTracker*, const GrPipelineInfo&) const SK_OVERRIDE;
bool canMakeEqual(const GrBatchTracker& mine,
const GrPrimitiveProcessor& that,
diff --git a/src/gpu/GrInOrderDrawBuffer.cpp b/src/gpu/GrInOrderDrawBuffer.cpp
index 70d61ec..1453276 100644
--- a/src/gpu/GrInOrderDrawBuffer.cpp
+++ b/src/gpu/GrInOrderDrawBuffer.cpp
@@ -20,7 +20,8 @@
: INHERITED(gpu, vertexPool, indexPool)
, fCmdBuffer(kCmdBufferInitialSizeInBytes)
, fPrevState(NULL)
- , fDrawID(0) {
+ , fDrawID(0)
+ , fBatchTarget(gpu, vertexPool, indexPool) {
SkASSERT(vertexPool);
SkASSERT(indexPool);
@@ -210,6 +211,7 @@
Draw* draw = static_cast<Draw*>(&fCmdBuffer.back());
if (!draw->fInfo.isInstanced() ||
+ draw->fInfo.primitiveType() != info.primitiveType() ||
draw->fInfo.verticesPerInstance() != info.verticesPerInstance() ||
draw->fInfo.indicesPerInstance() != info.indicesPerInstance() ||
draw->fInfo.vertexBuffer() != info.vertexBuffer() ||
@@ -266,6 +268,29 @@
this->recordTraceMarkersIfNecessary();
}
+void GrInOrderDrawBuffer::onDrawBatch(GrBatch* batch,
+ const GrPipelineBuilder& pipelineBuilder,
+ const GrScissorState& scissorState,
+ const GrDeviceCoordTexture* dstCopy) {
+ if (!this->recordStateAndShouldDraw(batch, pipelineBuilder, scissorState, dstCopy)) {
+ return;
+ }
+
+ // Check if there is a Batch Draw we can batch with
+ if (kDrawBatch_Cmd != strip_trace_bit(fCmdBuffer.back().fType)) {
+ GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch));
+ return;
+ }
+
+ DrawBatch* draw = static_cast<DrawBatch*>(&fCmdBuffer.back());
+ if (draw->fBatch->combineIfPossible(batch)) {
+ return;
+ } else {
+ GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch));
+ }
+ this->recordTraceMarkersIfNecessary();
+}
+
void GrInOrderDrawBuffer::onStencilPath(const GrPipelineBuilder& pipelineBuilder,
const GrPathProcessor* pathProc,
const GrPath* path,
@@ -410,7 +435,6 @@
return;
}
-
CmdBuffer::Iter iter(fCmdBuffer);
int currCmdMarker = 0;
@@ -419,6 +443,10 @@
// stream.
SetState* currentState = NULL;
+ // TODO to prevent flushing the batch buffer too much, we only flush when wasBatch && !isBatch
+ // In the long term we can delete this and just flush once at the end of all geometry generation
+ bool wasBatch = false;
+
while (iter.next()) {
GrGpuTraceMarker newMarker("", -1);
SkString traceString;
@@ -429,13 +457,30 @@
++currCmdMarker;
}
- if (kSetState_Cmd == strip_trace_bit(iter->fType)) {
+ bool isSetState = kSetState_Cmd == strip_trace_bit(iter->fType);
+
+ if (!isSetState && kDrawBatch_Cmd != strip_trace_bit(iter->fType)) {
+ // TODO see note above, this gets deleted once everyone uses batch drawing
+ if (wasBatch) {
+ wasBatch = false;
+ fBatchTarget.flush();
+ }
+ }
+
+ if (isSetState) {
SetState* ss = reinterpret_cast<SetState*>(iter.get());
- this->getGpu()->buildProgramDesc(&ss->fDesc, *ss->fPrimitiveProcessor, ss->fPipeline,
- ss->fPipeline.descInfo(), ss->fBatchTracker);
+ // TODO sometimes we have a prim proc, othertimes we have a GrBatch. Eventually we will
+ // only have GrBatch and we can delete this
+ if (ss->fPrimitiveProcessor) {
+ this->getGpu()->buildProgramDesc(&ss->fDesc, *ss->fPrimitiveProcessor,
+ ss->fPipeline,
+ ss->fPipeline.descInfo(),
+ ss->fBatchTracker);
+ } else {
+ wasBatch = true;
+ }
currentState = ss;
-
} else {
iter->execute(this, currentState);
}
@@ -445,6 +490,11 @@
}
}
+ // TODO see note above, one last catch
+ if (wasBatch) {
+ fBatchTarget.flush();
+ }
+
SkASSERT(fGpuCmdMarkers.count() == currCmdMarker);
++fDrawID;
}
@@ -484,6 +534,11 @@
fCount, fStencilSettings);
}
+void GrInOrderDrawBuffer::DrawBatch::execute(GrInOrderDrawBuffer* buf, const SetState* state) {
+ SkASSERT(state);
+ fBatch->generateGeometry(buf->getBatchTarget(), &state->fPipeline);
+}
+
void GrInOrderDrawBuffer::SetState::execute(GrInOrderDrawBuffer*, const SetState*) {}
void GrInOrderDrawBuffer::Clear::execute(GrInOrderDrawBuffer* buf, const SetState*) {
@@ -531,7 +586,7 @@
ss->fPrimitiveProcessor->initBatchTracker(&ss->fBatchTracker,
ss->fPipeline.getInitBatchTracker());
- if (fPrevState &&
+ if (fPrevState && fPrevState->fPrimitiveProcessor.get() &&
fPrevState->fPrimitiveProcessor->canMakeEqual(fPrevState->fBatchTracker,
*ss->fPrimitiveProcessor,
ss->fBatchTracker) &&
@@ -544,6 +599,33 @@
return true;
}
+bool GrInOrderDrawBuffer::recordStateAndShouldDraw(GrBatch* batch,
+ const GrPipelineBuilder& pipelineBuilder,
+ const GrScissorState& scissor,
+ const GrDeviceCoordTexture* dstCopy) {
+ // TODO this gets much simpler when we have batches everywhere.
+ // If the previous command is also a set state, then we check to see if it has a Batch. If so,
+ // and we can make the two batches equal, and we can combine the states, then we make them equal
+ SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState,
+ (batch, pipelineBuilder, *this->getGpu()->caps(), scissor,
+ dstCopy));
+ if (ss->fPipeline.mustSkip()) {
+ fCmdBuffer.pop_back();
+ return false;
+ }
+
+ batch->initBatchTracker(ss->fPipeline.getInitBatchTracker());
+
+ if (fPrevState && !fPrevState->fPrimitiveProcessor.get() &&
+ fPrevState->fPipeline.isEqual(ss->fPipeline)) {
+ fCmdBuffer.pop_back();
+ } else {
+ fPrevState = ss;
+ this->recordTraceMarkersIfNecessary();
+ }
+ return true;
+}
+
void GrInOrderDrawBuffer::recordTraceMarkersIfNecessary() {
SkASSERT(!fCmdBuffer.empty());
SkASSERT(!cmd_has_trace_marker(fCmdBuffer.back().fType));
diff --git a/src/gpu/GrInOrderDrawBuffer.h b/src/gpu/GrInOrderDrawBuffer.h
index afa7d27..a421b3c 100644
--- a/src/gpu/GrInOrderDrawBuffer.h
+++ b/src/gpu/GrInOrderDrawBuffer.h
@@ -9,6 +9,9 @@
#define GrInOrderDrawBuffer_DEFINED
#include "GrFlushToGpuDrawTarget.h"
+
+#include "GrBatch.h"
+#include "GrBatchTarget.h"
#include "GrPipeline.h"
#include "GrPath.h"
#include "GrTRecorder.h"
@@ -53,13 +56,14 @@
private:
typedef GrGpu::DrawArgs DrawArgs;
enum {
- kDraw_Cmd = 1,
- kStencilPath_Cmd = 2,
- kSetState_Cmd = 3,
- kClear_Cmd = 4,
- kCopySurface_Cmd = 5,
- kDrawPath_Cmd = 6,
- kDrawPaths_Cmd = 7,
+ kDraw_Cmd = 1,
+ kStencilPath_Cmd = 2,
+ kSetState_Cmd = 3,
+ kClear_Cmd = 4,
+ kCopySurface_Cmd = 5,
+ kDrawPath_Cmd = 6,
+ kDrawPaths_Cmd = 7,
+ kDrawBatch_Cmd = 8,
};
struct SetState;
@@ -180,6 +184,7 @@
// TODO: rename to SetPipeline once pp, batch tracker, and desc are removed
struct SetState : public Cmd {
+ // TODO get rid of the prim proc version of this when we use batch everywhere
SetState(const GrPipelineBuilder& pipelineBuilder, const GrPrimitiveProcessor* primProc,
const GrDrawTargetCaps& caps,
const GrScissorState& scissor, const GrDeviceCoordTexture* dstCopy)
@@ -187,6 +192,13 @@
, fPrimitiveProcessor(primProc)
, fPipeline(pipelineBuilder, primProc, caps, scissor, dstCopy) {}
+ SetState(GrBatch* batch,
+ const GrPipelineBuilder& pipelineBuilder,
+ const GrDrawTargetCaps& caps,
+ const GrScissorState& scissor, const GrDeviceCoordTexture* dstCopy)
+ : Cmd(kSetState_Cmd)
+ , fPipeline(batch, pipelineBuilder, caps, scissor, dstCopy) {}
+
void execute(GrInOrderDrawBuffer*, const SetState*) SK_OVERRIDE;
typedef GrPendingProgramElement<const GrPrimitiveProcessor> ProgramPrimitiveProcessor;
@@ -196,6 +208,17 @@
GrBatchTracker fBatchTracker;
};
+ struct DrawBatch : public Cmd {
+ DrawBatch(GrBatch* batch) : Cmd(kDrawBatch_Cmd), fBatch(SkRef(batch)) {
+ SkASSERT(!batch->isUsed());
+ }
+
+ void execute(GrInOrderDrawBuffer*, const SetState*) SK_OVERRIDE;
+
+ // TODO it wouldn't be too hard to let batches allocate in the cmd buffer
+ SkAutoTUnref<GrBatch> fBatch;
+ };
+
typedef void* TCmdAlign; // This wouldn't be enough align if a command used long double.
typedef GrTRecorder<Cmd, TCmdAlign> CmdBuffer;
@@ -208,6 +231,10 @@
const DrawInfo&,
const GrScissorState&,
const GrDeviceCoordTexture* dstCopy) SK_OVERRIDE;
+ void onDrawBatch(GrBatch*,
+ const GrPipelineBuilder&,
+ const GrScissorState&,
+ const GrDeviceCoordTexture* dstCopy) SK_OVERRIDE;
void onDrawRect(GrPipelineBuilder*,
GrColor,
const SkMatrix& viewMatrix,
@@ -253,10 +280,16 @@
// Determines whether the current draw operation requires a new GrPipeline and if so
// records it. If the draw can be skipped false is returned and no new GrPipeline is
// recorded.
+ // TODO delete the primproc variant when we have batches everywhere
bool SK_WARN_UNUSED_RESULT recordStateAndShouldDraw(const GrPipelineBuilder&,
const GrPrimitiveProcessor*,
const GrScissorState&,
const GrDeviceCoordTexture*);
+ bool SK_WARN_UNUSED_RESULT recordStateAndShouldDraw(GrBatch*,
+ const GrPipelineBuilder&,
+ const GrScissorState&,
+ const GrDeviceCoordTexture*);
+
// We lazily record clip changes in order to skip clips that have no effect.
void recordClipIfNecessary();
// Records any trace markers for a command after adding it to the buffer.
@@ -264,6 +297,8 @@
bool isIssued(uint32_t drawID) SK_OVERRIDE { return drawID != fDrawID; }
+ GrBatchTarget* getBatchTarget() { return &fBatchTarget; }
+
// TODO: Use a single allocator for commands and records
enum {
kCmdBufferInitialSizeInBytes = 8 * 1024,
@@ -277,6 +312,7 @@
SkTDArray<char> fPathIndexBuffer;
SkTDArray<float> fPathTransformBuffer;
uint32_t fDrawID;
+ GrBatchTarget fBatchTarget;
typedef GrFlushToGpuDrawTarget INHERITED;
};
diff --git a/src/gpu/GrOvalRenderer.cpp b/src/gpu/GrOvalRenderer.cpp
index 4b24e45..b1ef8d3 100644
--- a/src/gpu/GrOvalRenderer.cpp
+++ b/src/gpu/GrOvalRenderer.cpp
@@ -166,7 +166,7 @@
return SkNEW_ARGS(GLProcessor, (*this, bt));
}
- void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE {
+ void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE {
BatchTracker* local = bt->cast<BatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fUsesLocalCoords = init.fUsesLocalCoords;
@@ -365,7 +365,7 @@
return SkNEW_ARGS(GLProcessor, (*this, bt));
}
- void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE {
+ void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE {
BatchTracker* local = bt->cast<BatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fUsesLocalCoords = init.fUsesLocalCoords;
@@ -584,7 +584,7 @@
return SkNEW_ARGS(GLProcessor, (*this, bt));
}
- void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE {
+ void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE {
BatchTracker* local = bt->cast<BatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fUsesLocalCoords = init.fUsesLocalCoords;
diff --git a/src/gpu/GrPipeline.cpp b/src/gpu/GrPipeline.cpp
index c35808e..911169e 100644
--- a/src/gpu/GrPipeline.cpp
+++ b/src/gpu/GrPipeline.cpp
@@ -7,6 +7,7 @@
#include "GrPipeline.h"
+#include "GrBatch.h"
#include "GrDrawTargetCaps.h"
#include "GrGpu.h"
#include "GrPipelineBuilder.h"
@@ -14,13 +15,37 @@
#include "GrXferProcessor.h"
GrPipeline::GrPipeline(const GrPipelineBuilder& pipelineBuilder,
- const GrPrimitiveProcessor* primProc,
- const GrDrawTargetCaps& caps,
- const GrScissorState& scissorState,
- const GrDeviceCoordTexture* dstCopy) {
+ const GrPrimitiveProcessor* primProc,
+ const GrDrawTargetCaps& caps,
+ const GrScissorState& scissorState,
+ const GrDeviceCoordTexture* dstCopy) {
const GrProcOptInfo& colorPOI = pipelineBuilder.colorProcInfo(primProc);
const GrProcOptInfo& coveragePOI = pipelineBuilder.coverageProcInfo(primProc);
+ this->internalConstructor(pipelineBuilder, colorPOI, coveragePOI, caps, scissorState, dstCopy);
+}
+
+GrPipeline::GrPipeline(GrBatch* batch,
+ const GrPipelineBuilder& pipelineBuilder,
+ const GrDrawTargetCaps& caps,
+ const GrScissorState& scissorState,
+ const GrDeviceCoordTexture* dstCopy) {
+ GrBatchOpt batchOpt;
+ batchOpt.fCanTweakAlphaForCoverage = pipelineBuilder.canTweakAlphaForCoverage();
+ batch->initBatchOpt(batchOpt);
+
+ const GrProcOptInfo& colorPOI = pipelineBuilder.colorProcInfo(batch);
+ const GrProcOptInfo& coveragePOI = pipelineBuilder.coverageProcInfo(batch);
+
+ this->internalConstructor(pipelineBuilder, colorPOI, coveragePOI, caps, scissorState, dstCopy);
+}
+
+void GrPipeline::internalConstructor(const GrPipelineBuilder& pipelineBuilder,
+ const GrProcOptInfo& colorPOI,
+ const GrProcOptInfo& coveragePOI,
+ const GrDrawTargetCaps& caps,
+ const GrScissorState& scissorState,
+ const GrDeviceCoordTexture* dstCopy) {
// Create XferProcessor from DS's XPFactory
SkAutoTUnref<GrXferProcessor> xferProcessor(
pipelineBuilder.getXPFactory()->createXferProcessor(colorPOI, coveragePOI));
diff --git a/src/gpu/GrPipeline.h b/src/gpu/GrPipeline.h
index df654ba..24d0b62 100644
--- a/src/gpu/GrPipeline.h
+++ b/src/gpu/GrPipeline.h
@@ -17,8 +17,8 @@
#include "SkMatrix.h"
#include "SkRefCnt.h"
+class GrBatch;
class GrDeviceCoordTexture;
-class GrPathProcessor;
class GrPipelineBuilder;
/**
@@ -29,10 +29,14 @@
public:
SK_DECLARE_INST_COUNT(GrPipeline)
+ // TODO get rid of this version of the constructor when we use batch everywhere
GrPipeline(const GrPipelineBuilder& pipelineBuilder, const GrPrimitiveProcessor*,
const GrDrawTargetCaps&, const GrScissorState&,
const GrDeviceCoordTexture* dstCopy);
+ GrPipeline(GrBatch*, const GrPipelineBuilder&, const GrDrawTargetCaps&,
+ const GrScissorState&, const GrDeviceCoordTexture* dstCopy);
+
/*
* Returns true if it is possible to combine the two GrPipelines and it will update 'this'
* to subsume 'that''s draw.
@@ -132,9 +136,17 @@
const GrProgramDesc::DescInfo& descInfo() const { return fDescInfo; }
- const GrGeometryProcessor::InitBT& getInitBatchTracker() const { return fInitBT; }
+ const GrPipelineInfo& getInitBatchTracker() const { return fInitBT; }
private:
+ // TODO we can have one constructor once GrBatch is complete
+ void internalConstructor(const GrPipelineBuilder&,
+ const GrProcOptInfo& colorPOI,
+ const GrProcOptInfo& coveragePOI,
+ const GrDrawTargetCaps&,
+ const GrScissorState&,
+ const GrDeviceCoordTexture* dstCopy);
+
/**
* Alter the program desc and inputs (attribs and processors) based on the blend optimization.
*/
@@ -164,13 +176,13 @@
RenderTarget fRenderTarget;
GrScissorState fScissorState;
GrStencilSettings fStencilSettings;
- GrPipelineBuilder::DrawFace fDrawFace;
+ GrPipelineBuilder::DrawFace fDrawFace;
GrDeviceCoordTexture fDstCopy;
uint32_t fFlags;
ProgramXferProcessor fXferProcessor;
FragmentStageArray fFragmentStages;
GrProgramDesc::DescInfo fDescInfo;
- GrGeometryProcessor::InitBT fInitBT;
+ GrPipelineInfo fInitBT;
// This function is equivalent to the offset into fFragmentStages where coverage stages begin.
int fNumColorStages;
diff --git a/src/gpu/GrPipelineBuilder.cpp b/src/gpu/GrPipelineBuilder.cpp
index f329c05..90aee74 100644
--- a/src/gpu/GrPipelineBuilder.cpp
+++ b/src/gpu/GrPipelineBuilder.cpp
@@ -20,9 +20,7 @@
, fColorProcInfoValid(false)
, fCoverageProcInfoValid(false)
, fColorCache(GrColor_ILLEGAL)
- , fCoverageCache(GrColor_ILLEGAL)
- , fColorPrimProc(NULL)
- , fCoveragePrimProc(NULL) {
+ , fCoverageCache(GrColor_ILLEGAL) {
SkDEBUGCODE(fBlockEffectRemovalCnt = 0;)
}
@@ -39,8 +37,6 @@
fCoverageProcInfoValid = that.fCoverageProcInfoValid;
fColorCache = that.fColorCache;
fCoverageCache = that.fCoverageCache;
- fColorPrimProc = that.fColorPrimProc;
- fCoveragePrimProc = that.fCoveragePrimProc;
if (fColorProcInfoValid) {
fColorProcInfo = that.fColorProcInfo;
}
@@ -84,9 +80,6 @@
fColorCache = GrColor_ILLEGAL;
fCoverageCache = GrColor_ILLEGAL;
-
- fColorPrimProc = NULL;
- fCoveragePrimProc = NULL;
}
////////////////////////////////////////////////////////////////////////////////
@@ -161,22 +154,29 @@
}
void GrPipelineBuilder::calcColorInvariantOutput(const GrPrimitiveProcessor* pp) const {
- if (!fColorProcInfoValid || fColorPrimProc != pp) {
- fColorProcInfo.calcColorWithPrimProc(pp, fColorStages.begin(), this->numColorStages());
- fColorProcInfoValid = true;
- fColorPrimProc = pp;
- }
+ fColorProcInfo.calcColorWithPrimProc(pp, fColorStages.begin(), this->numColorStages());
+ fColorProcInfoValid = false;
+
}
void GrPipelineBuilder::calcCoverageInvariantOutput(const GrPrimitiveProcessor* pp) const {
- if (!fCoverageProcInfoValid || fCoveragePrimProc != pp) {
- fCoverageProcInfo.calcCoverageWithPrimProc(pp, fCoverageStages.begin(),
- this->numCoverageStages());
- fCoverageProcInfoValid = true;
- fCoveragePrimProc = pp;
- }
+ fCoverageProcInfo.calcCoverageWithPrimProc(pp, fCoverageStages.begin(),
+ this->numCoverageStages());
+ fCoverageProcInfoValid = false;
}
+void GrPipelineBuilder::calcColorInvariantOutput(const GrBatch* batch) const {
+ fColorProcInfo.calcColorWithBatch(batch, fColorStages.begin(), this->numColorStages());
+ fColorProcInfoValid = false;
+}
+
+void GrPipelineBuilder::calcCoverageInvariantOutput(const GrBatch* batch) const {
+ fCoverageProcInfo.calcCoverageWithBatch(batch, fCoverageStages.begin(),
+ this->numCoverageStages());
+ fCoverageProcInfoValid = false;
+}
+
+
void GrPipelineBuilder::calcColorInvariantOutput(GrColor color) const {
if (!fColorProcInfoValid || color != fColorCache) {
GrColorComponentFlags flags = kRGBA_GrColorComponentFlags;
diff --git a/src/gpu/GrPipelineBuilder.h b/src/gpu/GrPipelineBuilder.h
index 9579024..df9a1c8 100644
--- a/src/gpu/GrPipelineBuilder.h
+++ b/src/gpu/GrPipelineBuilder.h
@@ -8,7 +8,7 @@
#ifndef GrPipelineBuilder_DEFINED
#define GrPipelineBuilder_DEFINED
-
+#include "GrBatch.h"
#include "GrBlend.h"
#include "GrDrawTargetCaps.h"
#include "GrGeometryProcessor.h"
@@ -391,6 +391,15 @@
GrPipelineBuilder& operator= (const GrPipelineBuilder& that);
private:
+ // Calculating invariant color / coverage information is expensive, so we partially cache the
+ // results.
+ //
+ // canUseFracCoveragePrimProc() - Called in regular skia draw, caches results but only for a
+ // specific color and coverage. May be called multiple times
+ // willBlendWithDst() - only called by Nvpr, does not cache results
+ // GrOptDrawState constructor - never caches results
+
+ // TODO delete when we have Batch
const GrProcOptInfo& colorProcInfo(const GrPrimitiveProcessor* pp) const {
this->calcColorInvariantOutput(pp);
return fColorProcInfo;
@@ -401,19 +410,30 @@
return fCoverageProcInfo;
}
- /**
- * If fColorProcInfoValid is false, function calculates the invariant output for the color
- * stages and results are stored in fColorProcInfo.
- */
- void calcColorInvariantOutput(const GrPrimitiveProcessor*) const;
+ const GrProcOptInfo& colorProcInfo(const GrBatch* batch) const {
+ this->calcColorInvariantOutput(batch);
+ return fColorProcInfo;
+ }
+
+ const GrProcOptInfo& coverageProcInfo(const GrBatch* batch) const {
+ this->calcCoverageInvariantOutput(batch);
+ return fCoverageProcInfo;
+ }
/**
- * If fCoverageProcInfoValid is false, function calculates the invariant output for the coverage
- * stages and results are stored in fCoverageProcInfo.
+ * Primproc variants of the calc functions
+ * TODO remove these when batch is everywhere
*/
+ void calcColorInvariantOutput(const GrPrimitiveProcessor*) const;
void calcCoverageInvariantOutput(const GrPrimitiveProcessor*) const;
/**
+ * GrBatch provides the initial seed for these loops based off of its initial geometry data
+ */
+ void calcColorInvariantOutput(const GrBatch*) const;
+ void calcCoverageInvariantOutput(const GrBatch*) const;
+
+ /**
* If fColorProcInfoValid is false, function calculates the invariant output for the color
* stages and results are stored in fColorProcInfo.
*/
@@ -445,8 +465,6 @@
mutable bool fCoverageProcInfoValid;
mutable GrColor fColorCache;
mutable GrColor fCoverageCache;
- mutable const GrPrimitiveProcessor* fColorPrimProc;
- mutable const GrPrimitiveProcessor* fCoveragePrimProc;
friend class GrPipeline;
};
diff --git a/src/gpu/GrProcOptInfo.cpp b/src/gpu/GrProcOptInfo.cpp
index a350ef7..dc499fa 100644
--- a/src/gpu/GrProcOptInfo.cpp
+++ b/src/gpu/GrProcOptInfo.cpp
@@ -7,10 +7,29 @@
#include "GrProcOptInfo.h"
+#include "GrBatch.h"
#include "GrFragmentProcessor.h"
#include "GrFragmentStage.h"
#include "GrGeometryProcessor.h"
+void GrProcOptInfo::calcColorWithBatch(const GrBatch* batch,
+ const GrFragmentStage* stages,
+ int stageCount) {
+ GrInitInvariantOutput out;
+ batch->getInvariantOutputColor(&out);
+ fInOut.reset(out);
+ this->internalCalc(stages, stageCount, batch->willReadFragmentPosition());
+}
+
+void GrProcOptInfo::calcCoverageWithBatch(const GrBatch* batch,
+ const GrFragmentStage* stages,
+ int stageCount) {
+ GrInitInvariantOutput out;
+ batch->getInvariantOutputCoverage(&out);
+ fInOut.reset(out);
+ this->internalCalc(stages, stageCount, batch->willReadFragmentPosition());
+}
+
void GrProcOptInfo::calcColorWithPrimProc(const GrPrimitiveProcessor* primProc,
const GrFragmentStage* stages,
int stageCount) {
diff --git a/src/gpu/GrProcOptInfo.h b/src/gpu/GrProcOptInfo.h
index 75d88c6..6e8f615 100644
--- a/src/gpu/GrProcOptInfo.h
+++ b/src/gpu/GrProcOptInfo.h
@@ -11,6 +11,7 @@
#include "GrColor.h"
#include "GrInvariantOutput.h"
+class GrBatch;
class GrFragmentStage;
class GrFragmentProcessor;
class GrPrimitiveProcessor;
@@ -33,6 +34,10 @@
void calcWithInitialValues(const GrFragmentStage*, int stageCount, GrColor startColor,
GrColorComponentFlags flags, bool areCoverageStages);
+ void calcColorWithBatch(const GrBatch*, const GrFragmentStage*, int stagecount);
+ void calcCoverageWithBatch(const GrBatch*, const GrFragmentStage*, int stagecount);
+
+ // TODO delete these when batch is everywhere
void calcColorWithPrimProc(const GrPrimitiveProcessor*, const GrFragmentStage*, int stagecount);
void calcCoverageWithPrimProc(const GrPrimitiveProcessor*, const GrFragmentStage*,
int stagecount);
diff --git a/src/gpu/GrProcessor.cpp b/src/gpu/GrProcessor.cpp
index 9977018..08f437d 100644
--- a/src/gpu/GrProcessor.cpp
+++ b/src/gpu/GrProcessor.cpp
@@ -8,7 +8,6 @@
#include "GrProcessor.h"
#include "GrContext.h"
#include "GrCoordTransform.h"
-#include "GrGeometryData.h"
#include "GrGeometryProcessor.h"
#include "GrInvariantOutput.h"
#include "GrMemoryPool.h"
@@ -172,19 +171,6 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
-/*
- * GrGeometryData shares the same pool so it lives in this file too
- */
-void* GrGeometryData::operator new(size_t size) {
- return GrProcessor_Globals::GetTLS()->allocate(size);
-}
-
-void GrGeometryData::operator delete(void* target) {
- GrProcessor_Globals::GetTLS()->release(target);
-}
-
-///////////////////////////////////////////////////////////////////////////////////////////////////
-
// Initial static variable from GrXPFactory
int32_t GrXPFactory::gCurrXPFClassID =
GrXPFactory::kIllegalXPFClassID;
diff --git a/src/gpu/effects/GrBezierEffect.cpp b/src/gpu/effects/GrBezierEffect.cpp
index 1c6d469..37e1dff 100644
--- a/src/gpu/effects/GrBezierEffect.cpp
+++ b/src/gpu/effects/GrBezierEffect.cpp
@@ -208,7 +208,7 @@
return (ce.fEdgeType == fEdgeType);
}
-void GrConicEffect::initBatchTracker(GrBatchTracker* bt, const InitBT& init) const {
+void GrConicEffect::initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const {
ConicBatchTracker* local = bt->cast<ConicBatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fCoverageScale = fCoverageScale;
@@ -432,7 +432,7 @@
return (ce.fEdgeType == fEdgeType);
}
-void GrQuadEffect::initBatchTracker(GrBatchTracker* bt, const InitBT& init) const {
+void GrQuadEffect::initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const {
QuadBatchTracker* local = bt->cast<QuadBatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fCoverageScale = fCoverageScale;
@@ -677,7 +677,7 @@
return (ce.fEdgeType == fEdgeType);
}
-void GrCubicEffect::initBatchTracker(GrBatchTracker* bt, const InitBT& init) const {
+void GrCubicEffect::initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const {
CubicBatchTracker* local = bt->cast<CubicBatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fUsesLocalCoords = init.fUsesLocalCoords;
diff --git a/src/gpu/effects/GrBezierEffect.h b/src/gpu/effects/GrBezierEffect.h
index 020286f..b0039e9 100644
--- a/src/gpu/effects/GrBezierEffect.h
+++ b/src/gpu/effects/GrBezierEffect.h
@@ -105,7 +105,7 @@
virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker& bt,
const GrGLCaps&) const SK_OVERRIDE;
- void initBatchTracker(GrBatchTracker*, const InitBT&) const SK_OVERRIDE;
+ void initBatchTracker(GrBatchTracker*, const GrPipelineInfo&) const SK_OVERRIDE;
bool onCanMakeEqual(const GrBatchTracker&,
const GrGeometryProcessor&,
const GrBatchTracker&) const SK_OVERRIDE;
@@ -190,7 +190,7 @@
virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker& bt,
const GrGLCaps&) const SK_OVERRIDE;
- void initBatchTracker(GrBatchTracker*, const InitBT&) const SK_OVERRIDE;
+ void initBatchTracker(GrBatchTracker*, const GrPipelineInfo&) const SK_OVERRIDE;
bool onCanMakeEqual(const GrBatchTracker&,
const GrGeometryProcessor&,
const GrBatchTracker&) const SK_OVERRIDE;
@@ -271,7 +271,7 @@
virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker& bt,
const GrGLCaps&) const SK_OVERRIDE;
- void initBatchTracker(GrBatchTracker*, const InitBT&) const SK_OVERRIDE;
+ void initBatchTracker(GrBatchTracker*, const GrPipelineInfo&) const SK_OVERRIDE;
bool onCanMakeEqual(const GrBatchTracker&,
const GrGeometryProcessor&,
const GrBatchTracker&) const SK_OVERRIDE;
diff --git a/src/gpu/effects/GrBitmapTextGeoProc.cpp b/src/gpu/effects/GrBitmapTextGeoProc.cpp
index 924f7cd..3133265 100644
--- a/src/gpu/effects/GrBitmapTextGeoProc.cpp
+++ b/src/gpu/effects/GrBitmapTextGeoProc.cpp
@@ -145,7 +145,7 @@
return SkNEW_ARGS(GrGLBitmapTextGeoProc, (*this, bt));
}
-void GrBitmapTextGeoProc::initBatchTracker(GrBatchTracker* bt, const InitBT& init) const {
+void GrBitmapTextGeoProc::initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const {
BitmapTextBatchTracker* local = bt->cast<BitmapTextBatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init,
SkToBool(fInColor));
diff --git a/src/gpu/effects/GrBitmapTextGeoProc.h b/src/gpu/effects/GrBitmapTextGeoProc.h
index d73fba8..d5684d7 100644
--- a/src/gpu/effects/GrBitmapTextGeoProc.h
+++ b/src/gpu/effects/GrBitmapTextGeoProc.h
@@ -43,7 +43,7 @@
virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker& bt,
const GrGLCaps& caps) const SK_OVERRIDE;
- void initBatchTracker(GrBatchTracker*, const InitBT&) const SK_OVERRIDE;
+ void initBatchTracker(GrBatchTracker*, const GrPipelineInfo&) const SK_OVERRIDE;
bool onCanMakeEqual(const GrBatchTracker&,
const GrGeometryProcessor&,
const GrBatchTracker&) const SK_OVERRIDE;
diff --git a/src/gpu/effects/GrDashingEffect.cpp b/src/gpu/effects/GrDashingEffect.cpp
index 4a03406..34e0301 100644
--- a/src/gpu/effects/GrDashingEffect.cpp
+++ b/src/gpu/effects/GrDashingEffect.cpp
@@ -501,7 +501,7 @@
virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker&,
const GrGLCaps&) const SK_OVERRIDE;
- void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE;
+ void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE;
bool onCanMakeEqual(const GrBatchTracker&,
const GrGeometryProcessor&,
@@ -706,7 +706,7 @@
fCenterX == dce.fCenterX);
}
-void DashingCircleEffect::initBatchTracker(GrBatchTracker* bt, const InitBT& init) const {
+void DashingCircleEffect::initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const {
DashingCircleBatchTracker* local = bt->cast<DashingCircleBatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fUsesLocalCoords = init.fUsesLocalCoords;
@@ -795,7 +795,7 @@
virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker& bt,
const GrGLCaps&) const SK_OVERRIDE;
- void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE;
+ void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE;
bool onCanMakeEqual(const GrBatchTracker&,
const GrGeometryProcessor&,
@@ -1013,7 +1013,7 @@
fIntervalLength == de.fIntervalLength);
}
-void DashingLineEffect::initBatchTracker(GrBatchTracker* bt, const InitBT& init) const {
+void DashingLineEffect::initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const {
DashingLineBatchTracker* local = bt->cast<DashingLineBatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fUsesLocalCoords = init.fUsesLocalCoords;
diff --git a/src/gpu/effects/GrDistanceFieldTextureEffect.cpp b/src/gpu/effects/GrDistanceFieldTextureEffect.cpp
index 28220a7..96dea3e 100755
--- a/src/gpu/effects/GrDistanceFieldTextureEffect.cpp
+++ b/src/gpu/effects/GrDistanceFieldTextureEffect.cpp
@@ -252,7 +252,7 @@
return SkNEW_ARGS(GrGLDistanceFieldTextureEffect, (*this, bt));
}
-void GrDistanceFieldTextureEffect::initBatchTracker(GrBatchTracker* bt, const InitBT& init) const {
+void GrDistanceFieldTextureEffect::initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const {
DistanceFieldBatchTracker* local = bt->cast<DistanceFieldBatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init,
SkToBool(fInColor));
@@ -508,7 +508,7 @@
}
void GrDistanceFieldNoGammaTextureEffect::initBatchTracker(GrBatchTracker* bt,
- const InitBT& init) const {
+ const GrPipelineInfo& init) const {
DistanceFieldNoGammaBatchTracker* local = bt->cast<DistanceFieldNoGammaBatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init,
SkToBool(fInColor));
@@ -824,7 +824,7 @@
}
void GrDistanceFieldLCDTextureEffect::initBatchTracker(GrBatchTracker* bt,
- const InitBT& init) const {
+ const GrPipelineInfo& init) const {
DistanceFieldLCDBatchTracker* local = bt->cast<DistanceFieldLCDBatchTracker>();
local->fInputColorType = GetColorInputType(&local->fColor, this->color(), init, false);
local->fUsesLocalCoords = init.fUsesLocalCoords;
diff --git a/src/gpu/effects/GrDistanceFieldTextureEffect.h b/src/gpu/effects/GrDistanceFieldTextureEffect.h
index 5a99d24..33209a5 100644
--- a/src/gpu/effects/GrDistanceFieldTextureEffect.h
+++ b/src/gpu/effects/GrDistanceFieldTextureEffect.h
@@ -83,7 +83,7 @@
virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker& bt,
const GrGLCaps&) const SK_OVERRIDE;
- void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE;
+ void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE;
bool onCanMakeEqual(const GrBatchTracker&,
const GrGeometryProcessor&,
@@ -148,7 +148,7 @@
virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker& bt,
const GrGLCaps&) const SK_OVERRIDE;
- void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE;
+ void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE;
bool onCanMakeEqual(const GrBatchTracker&,
const GrGeometryProcessor&,
@@ -206,7 +206,7 @@
virtual GrGLPrimitiveProcessor* createGLInstance(const GrBatchTracker& bt,
const GrGLCaps&) const SK_OVERRIDE;
- void initBatchTracker(GrBatchTracker* bt, const InitBT& init) const SK_OVERRIDE;
+ void initBatchTracker(GrBatchTracker* bt, const GrPipelineInfo& init) const SK_OVERRIDE;
bool onCanMakeEqual(const GrBatchTracker&,
const GrGeometryProcessor&,