Lift the tessellation atlas into its own path renderer
Creates a new path renderer, GrAtlasPathRenderer, that handles all the
atlasing. Managing the atlas in its own path renderer gives us more
control over when atlasing happens in the chain, will allow us to more
easily use the atlas in kCoverage mode, and makes the clipping code
cleaner.
Bug: skia:12258
Change-Id: Ie0b669974936c23895c8ab794e2d97206ed140f8
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/431896
Commit-Queue: Chris Dalton <csmartdalton@google.com>
Reviewed-by: Michael Ludwig <michaelludwig@google.com>
diff --git a/src/gpu/ops/GrAtlasInstancedHelper.cpp b/src/gpu/ops/GrAtlasInstancedHelper.cpp
new file mode 100644
index 0000000..082c240
--- /dev/null
+++ b/src/gpu/ops/GrAtlasInstancedHelper.cpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2020 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ops/GrAtlasInstancedHelper.h"
+
+#include "src/gpu/GrVertexWriter.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLVarying.h"
+#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
+
+void GrAtlasInstancedHelper::appendInstanceAttribs(
+ SkTArray<GrGeometryProcessor::Attribute>* instanceAttribs) const {
+ instanceAttribs->emplace_back("locations", kFloat4_GrVertexAttribType, kFloat4_GrSLType);
+ if (fShaderFlags & ShaderFlags::kCheckBounds) {
+ instanceAttribs->emplace_back("sizeInAtlas", kFloat2_GrVertexAttribType, kFloat2_GrSLType);
+ }
+}
+
+void GrAtlasInstancedHelper::writeInstanceData(GrVertexWriter* instanceWriter,
+ const Instance* i) const {
+ SkASSERT(i->fLocationInAtlas.x() >= 0);
+ SkASSERT(i->fLocationInAtlas.y() >= 0);
+ instanceWriter->write(
+ // A negative x coordinate in the atlas indicates that the path is transposed.
+ // Also add 1 since we can't negate zero.
+ (float)(i->fTransposedInAtlas ? -i->fLocationInAtlas.x() - 1
+ : i->fLocationInAtlas.x() + 1),
+ (float)i->fLocationInAtlas.y(),
+ (float)i->fPathDevIBounds.left(),
+ (float)i->fPathDevIBounds.top(),
+ GrVertexWriter::If(fShaderFlags & ShaderFlags::kCheckBounds,
+ SkSize::Make(i->fPathDevIBounds.size())));
+}
+
+void GrAtlasInstancedHelper::injectShaderCode(
+ const GrGLSLGeometryProcessor::EmitArgs& args, const GrShaderVar& devCoord,
+ GrGLSLUniformHandler::UniformHandle* atlasAdjustUniformHandle) const {
+ GrGLSLVarying atlasCoord(kFloat2_GrSLType);
+ args.fVaryingHandler->addVarying("atlasCoord", &atlasCoord);
+
+ const char* atlasAdjustName;
+ *atlasAdjustUniformHandle = args.fUniformHandler->addUniform(
+ nullptr, kVertex_GrShaderFlag, kFloat2_GrSLType, "atlas_adjust", &atlasAdjustName);
+
+ args.fVertBuilder->codeAppendf(R"(
+ // A negative x coordinate in the atlas indicates that the path is transposed.
+ // We also added 1 since we can't negate zero.
+ float2 atlasTopLeft = float2(abs(locations.x) - 1, locations.y);
+ float2 devTopLeft = locations.zw;
+ bool transposed = locations.x < 0;
+ float2 atlasCoord = %s - devTopLeft;
+ if (transposed) {
+ atlasCoord = atlasCoord.yx;
+ }
+ atlasCoord += atlasTopLeft;
+ %s = atlasCoord * %s;)", devCoord.c_str(), atlasCoord.vsOut(), atlasAdjustName);
+
+ if (fShaderFlags & ShaderFlags::kCheckBounds) {
+ GrGLSLVarying atlasBounds(kFloat4_GrSLType);
+ args.fVaryingHandler->addVarying("atlasbounds", &atlasBounds,
+ GrGLSLVaryingHandler::Interpolation::kCanBeFlat);
+ args.fVertBuilder->codeAppendf(R"(
+ float4 atlasBounds = atlasTopLeft.xyxy + (transposed ? sizeInAtlas.00yx
+ : sizeInAtlas.00xy);
+ %s = atlasBounds * %s.xyxy;)", atlasBounds.vsOut(), atlasAdjustName);
+
+ args.fFragBuilder->codeAppendf(R"(
+ half atlasCoverage = 0;
+ float2 atlasCoord = %s;
+ float4 atlasBounds = %s;
+ if (all(greaterThan(atlasCoord, atlasBounds.xy)) &&
+ all(lessThan(atlasCoord, atlasBounds.zw))) {
+ atlasCoverage = )", atlasCoord.fsIn(), atlasBounds.fsIn());
+ args.fFragBuilder->appendTextureLookup(args.fTexSamplers[0], "atlasCoord");
+ args.fFragBuilder->codeAppendf(R"(.a;
+ })");
+ } else {
+ args.fFragBuilder->codeAppendf("half atlasCoverage = ");
+ args.fFragBuilder->appendTextureLookup(args.fTexSamplers[0], atlasCoord.fsIn());
+ args.fFragBuilder->codeAppendf(".a;");
+ }
+
+ if (fShaderFlags & ShaderFlags::kInvertCoverage) {
+ args.fFragBuilder->codeAppendf("%s *= (1 - atlasCoverage);", args.fOutputCoverage);
+ } else {
+ args.fFragBuilder->codeAppendf("%s *= atlasCoverage;", args.fOutputCoverage);
+ }
+}
+
+void GrAtlasInstancedHelper::setUniformData(
+ const GrGLSLProgramDataManager& pdman,
+ const GrGLSLUniformHandler::UniformHandle& atlasAdjustUniformHandle) const {
+ SkASSERT(fAtlasProxy->isInstantiated());
+ SkISize dimensions = fAtlasProxy->backingStoreDimensions();
+ pdman.set2f(atlasAdjustUniformHandle, 1.f / dimensions.width(), 1.f / dimensions.height());
+}
diff --git a/src/gpu/ops/GrAtlasInstancedHelper.h b/src/gpu/ops/GrAtlasInstancedHelper.h
new file mode 100644
index 0000000..28a7921
--- /dev/null
+++ b/src/gpu/ops/GrAtlasInstancedHelper.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2021 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGrAtlasInstancedHelper_DEFINED
+#define GrGrAtlasInstancedHelper_DEFINED
+
+#include "src/core/SkIPoint16.h"
+#include "src/gpu/GrGeometryProcessor.h"
+#include "src/gpu/GrSurfaceProxyView.h"
+#include "src/gpu/glsl/GrGLSLGeometryProcessor.h"
+#include "src/gpu/glsl/GrGLSLUniformHandler.h"
+
+struct GrVertexWriter;
+
+// This class encapsulates all the necessary steps for an instanced GrGeometryProcessor to clip
+// against a path mask from an atlas.
+class GrAtlasInstancedHelper {
+public:
+ enum class ShaderFlags {
+ kNone = 0,
+ kInvertCoverage = 1 << 0,
+ kCheckBounds = 1 << 1
+ };
+
+ GR_DECL_BITFIELD_CLASS_OPS_FRIENDS(ShaderFlags);
+
+ constexpr static int kNumShaderFlags = 2;
+
+ GrAtlasInstancedHelper(GrSurfaceProxyView atlasView, ShaderFlags shaderFlags)
+ : fAtlasProxy(atlasView.detachProxy())
+ , fAtlasSwizzle(atlasView.swizzle())
+ , fShaderFlags(shaderFlags) {
+ // Bottom left origin is not supported.
+ SkASSERT(atlasView.origin() == kTopLeft_GrSurfaceOrigin);
+ }
+
+ GrSurfaceProxy* proxy() const { return fAtlasProxy.get(); }
+ const GrSwizzle& atlasSwizzle() const { return fAtlasSwizzle; }
+
+ // Returns whether the two helpers can be batched together in a single draw.
+ bool isCompatible(const GrAtlasInstancedHelper& helper) {
+ // TODO: We may want to consider two helpers compatible if they only differ in the
+ // kCheckBounds flag -- we can always promote one to checking its bounds.
+ SkASSERT(fAtlasProxy != helper.fAtlasProxy || fAtlasSwizzle == helper.fAtlasSwizzle);
+ return fAtlasProxy == helper.fAtlasProxy && fShaderFlags == helper.fShaderFlags;
+ }
+
+ // Adds bits to the shader key that uniquely identify this specific helper's shader code.
+ void getKeyBits(GrProcessorKeyBuilder* b) const {
+ b->addBits(kNumShaderFlags, (int)fShaderFlags, "atlasFlags");
+ }
+
+ // Appends the instanced input attribs to the back of the array that we will need in order to
+ // locate our path in the atlas.
+ void appendInstanceAttribs(SkTArray<GrGeometryProcessor::Attribute>* instanceAttribs) const;
+
+ struct Instance {
+ Instance(SkIPoint16 locationInAtlas, const SkIRect& pathDevIBounds, bool transposedInAtlas)
+ : fLocationInAtlas(locationInAtlas)
+ , fPathDevIBounds(pathDevIBounds)
+ , fTransposedInAtlas(transposedInAtlas) {
+ SkASSERT(fLocationInAtlas.x() >= 0);
+ SkASSERT(fLocationInAtlas.y() >= 0);
+ }
+ SkIPoint16 fLocationInAtlas;
+ SkIRect fPathDevIBounds;
+ bool fTransposedInAtlas;
+ };
+
+ // Writes out the given instance data, formatted for the specific attribs that we added during
+ // appendInstanceAttribs().
+ void writeInstanceData(GrVertexWriter* instanceWriter, const Instance*) const;
+
+ // Injects vertex code, fragment code, varyings, and uniforms to ultimately multiply
+ // "args.fOutputCoverage" in the fragment shader by the atlas coverage.
+ //
+ // The caller is responsible to store "atlasAdjustUniformHandle" and pass it to
+ // setUniformData().
+ void injectShaderCode(const GrGLSLGeometryProcessor::EmitArgs&, const GrShaderVar& devCoord,
+ GrGLSLUniformHandler::UniformHandle* atlasAdjustUniformHandle) const;
+
+ // The atlas clip requires one uniform value -- "atlasAdjustUniform". The caller should have
+ // stored this handle after its call to injectShaderCode(). This method sets its value prior to
+ // drawing.
+ void setUniformData(const GrGLSLProgramDataManager&,
+ const GrGLSLUniformHandler::UniformHandle& atlasAdjustUniformHandle) const;
+
+private:
+ const sk_sp<GrSurfaceProxy> fAtlasProxy;
+ const GrSwizzle fAtlasSwizzle;
+ const ShaderFlags fShaderFlags;
+};
+
+GR_MAKE_BITFIELD_CLASS_OPS(GrAtlasInstancedHelper::ShaderFlags);
+
+#endif
diff --git a/src/gpu/ops/GrAtlasPathRenderer.cpp b/src/gpu/ops/GrAtlasPathRenderer.cpp
new file mode 100644
index 0000000..4620bf3
--- /dev/null
+++ b/src/gpu/ops/GrAtlasPathRenderer.cpp
@@ -0,0 +1,386 @@
+/*
+ * Copyright 2019 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ops/GrAtlasPathRenderer.h"
+
+#include "include/private/SkVx.h"
+#include "src/core/SkIPoint16.h"
+#include "src/gpu/GrClip.h"
+#include "src/gpu/GrDirectContextPriv.h"
+#include "src/gpu/GrSurfaceDrawContext.h"
+#include "src/gpu/GrVx.h"
+#include "src/gpu/effects/GrModulateAtlasCoverageEffect.h"
+#include "src/gpu/geometry/GrStyledShape.h"
+#include "src/gpu/ops/GrDrawAtlasPathOp.h"
+#include "src/gpu/tessellate/GrAtlasRenderTask.h"
+#include "src/gpu/tessellate/GrTessellationPathRenderer.h"
+#include "src/gpu/tessellate/shaders/GrTessellationShader.h"
+
+using grvx::float2;
+using grvx::int2;
+
+constexpr static auto kAtlasAlpha8Type = GrColorType::kAlpha_8;
+constexpr static int kAtlasInitialSize = 512;
+
+// The atlas is only used for small-area paths, which means at least one dimension of every path is
+// guaranteed to be quite small. So if we transpose tall paths, then every path will have a small
+// height, which lends very well to efficient pow2 atlas packing.
+constexpr static auto kAtlasAlgorithm = GrDynamicAtlas::RectanizerAlgorithm::kPow2;
+
+// Ensure every path in the atlas falls in or below the 128px high rectanizer band.
+constexpr static int kAtlasMaxPathHeight = 128;
+
+bool GrAtlasPathRenderer::IsSupported(GrRecordingContext* rContext) {
+ const GrCaps& caps = *rContext->priv().caps();
+ auto atlasFormat = caps.getDefaultBackendFormat(kAtlasAlpha8Type, GrRenderable::kYes);
+ return rContext->asDirectContext() && // The atlas doesn't support DDL yet.
+ caps.internalMultisampleCount(atlasFormat) > 1 &&
+ // GrAtlasRenderTask currently requires tessellation. In the future it could use the
+ // default path renderer when tessellation isn't available.
+ GrTessellationPathRenderer::IsSupported(caps);
+}
+
+sk_sp<GrAtlasPathRenderer> GrAtlasPathRenderer::Make(GrRecordingContext* rContext) {
+ return IsSupported(rContext)
+ ? sk_sp<GrAtlasPathRenderer>(new GrAtlasPathRenderer(rContext->asDirectContext()))
+ : nullptr;
+}
+
+GrAtlasPathRenderer::GrAtlasPathRenderer(GrDirectContext* dContext) {
+ SkASSERT(IsSupported(dContext));
+ const GrCaps& caps = *dContext->priv().caps();
+#if GR_TEST_UTILS
+ fAtlasMaxSize = dContext->priv().options().fMaxTextureAtlasSize;
+#else
+ fAtlasMaxSize = 2048;
+#endif
+ fAtlasMaxSize = SkPrevPow2(std::min(fAtlasMaxSize, (float)caps.maxPreferredRenderTargetSize()));
+ fAtlasInitialSize = SkNextPow2(std::min(kAtlasInitialSize, (int)fAtlasMaxSize));
+}
+
+// Returns the rect [topLeftFloor, botRightCeil], which is the rect [r] rounded out to integer
+// boundaries.
+static std::tuple<float2,float2> round_out(const SkRect& r) {
+ return {skvx::floor(float2::Load(&r.fLeft)), skvx::ceil(float2::Load(&r.fRight))};
+}
+
+bool GrAtlasPathRenderer::pathFitsInAtlas(const SkRect& pathDevBounds) const {
+ auto [topLeftFloor, botRightCeil] = round_out(pathDevBounds);
+ float2 size = botRightCeil - topLeftFloor;
+ return // Ensure the path's largest dimension fits in the atlas.
+ skvx::all(size <= fAtlasMaxSize) &&
+ // Since we will transpose tall skinny paths, limiting to kAtlasMaxPathHeight^2 pixels
+ // guarantees heightInAtlas <= kAtlasMaxPathHeight, while also allowing paths that are
+ // very wide and short.
+ size[0] * size[1] <= kAtlasMaxPathHeight * kAtlasMaxPathHeight;
+}
+
+void GrAtlasPathRenderer::AtlasPathKey::set(const SkMatrix& m, const SkPath& path) {
+ using grvx::float2;
+ fPathGenID = path.getGenerationID();
+ fAffineMatrix[0] = m.getScaleX();
+ fAffineMatrix[1] = m.getSkewX();
+ fAffineMatrix[2] = m.getSkewY();
+ fAffineMatrix[3] = m.getScaleY();
+ float2 translate = {m.getTranslateX(), m.getTranslateY()};
+ float2 subpixelPosition = translate - skvx::floor(translate);
+ float2 subpixelPositionKey = skvx::trunc(subpixelPosition *
+ GrTessellationShader::kLinearizationPrecision);
+ skvx::cast<uint8_t>(subpixelPositionKey).store(fSubpixelPositionKey);
+ fFillRule = (uint16_t)GrFillRuleForSkPath(path); // Fill rule doesn't affect the path's genID.
+}
+
+bool GrAtlasPathRenderer::addPathToAtlas(GrRecordingContext* rContext,
+ const SkMatrix& viewMatrix,
+ const SkPath& path,
+ const SkRect& pathDevBounds,
+ SkIRect* devIBounds,
+ SkIPoint16* locationInAtlas,
+ bool* transposedInAtlas,
+ const DrawRefsAtlasCallback& drawRefsAtlasCallback) {
+ SkASSERT(!viewMatrix.hasPerspective()); // See onCanDrawPath().
+
+ pathDevBounds.roundOut(devIBounds);
+#ifdef SK_DEBUG
+ // is_visible() should have guaranteed the path's bounds were representable as ints, since clip
+ // bounds within the max render target size are nowhere near INT_MAX.
+ auto [topLeftFloor, botRightCeil] = round_out(pathDevBounds);
+ SkASSERT(skvx::all(skvx::cast<float>(int2::Load(&devIBounds->fLeft)) == topLeftFloor));
+ SkASSERT(skvx::all(skvx::cast<float>(int2::Load(&devIBounds->fRight)) == botRightCeil));
+#endif
+
+ int widthInAtlas = devIBounds->width();
+ int heightInAtlas = devIBounds->height();
+ // is_visible() should have guaranteed the path's bounds were non-empty.
+ SkASSERT(widthInAtlas > 0 && heightInAtlas > 0);
+
+ if (SkNextPow2(widthInAtlas) == SkNextPow2(heightInAtlas)) {
+ // Both dimensions go to the same pow2 band in the atlas. Use the larger dimension as height
+ // for more efficient packing.
+ *transposedInAtlas = widthInAtlas > heightInAtlas;
+ } else {
+ // Both dimensions go to different pow2 bands in the atlas. Use the smaller pow2 band for
+ // most efficient packing.
+ *transposedInAtlas = heightInAtlas > widthInAtlas;
+ }
+ if (*transposedInAtlas) {
+ std::swap(heightInAtlas, widthInAtlas);
+ }
+ SkASSERT(widthInAtlas <= (int)fAtlasMaxSize);
+ SkASSERT(heightInAtlas <= kAtlasMaxPathHeight);
+
+ // Check if this path is already in the atlas. This is mainly for clip paths.
+ AtlasPathKey atlasPathKey;
+ if (!path.isVolatile()) {
+ atlasPathKey.set(viewMatrix, path);
+ if (const SkIPoint16* existingLocation = fAtlasPathCache.find(atlasPathKey)) {
+ *locationInAtlas = *existingLocation;
+ return true;
+ }
+ }
+
+ if (fAtlasRenderTasks.empty() ||
+ !fAtlasRenderTasks.back()->addPath(viewMatrix, path, devIBounds->topLeft(), widthInAtlas,
+ heightInAtlas, *transposedInAtlas, locationInAtlas)) {
+ // We either don't have an atlas yet or the current one is full. Try to replace it.
+ GrAtlasRenderTask* currentAtlasTask = (!fAtlasRenderTasks.empty())
+ ? fAtlasRenderTasks.back().get() : nullptr;
+ if (currentAtlasTask &&
+ drawRefsAtlasCallback &&
+ drawRefsAtlasCallback(currentAtlasTask->atlasProxy())) {
+ // The draw already refs the current atlas. Give up. Otherwise the draw would ref two
+ // different atlases and they couldn't share a texture.
+ return false;
+ }
+ // Replace the atlas with a new one.
+ auto dynamicAtlas = std::make_unique<GrDynamicAtlas>(
+ kAtlasAlpha8Type, GrDynamicAtlas::InternalMultisample::kYes,
+ SkISize{fAtlasInitialSize, fAtlasInitialSize}, fAtlasMaxSize,
+ *rContext->priv().caps(), kAtlasAlgorithm);
+ auto newAtlasTask = sk_make_sp<GrAtlasRenderTask>(rContext,
+ sk_make_sp<GrArenas>(),
+ std::move(dynamicAtlas));
+ rContext->priv().drawingManager()->addAtlasTask(newAtlasTask, currentAtlasTask);
+ SkAssertResult(newAtlasTask->addPath(viewMatrix, path, devIBounds->topLeft(), widthInAtlas,
+ heightInAtlas, *transposedInAtlas, locationInAtlas));
+ fAtlasRenderTasks.push_back(std::move(newAtlasTask));
+ fAtlasPathCache.reset();
+ }
+
+ // Remember this path's location in the atlas, in case it gets drawn again.
+ if (!path.isVolatile()) {
+ fAtlasPathCache.set(atlasPathKey, *locationInAtlas);
+ }
+ return true;
+}
+
+// Returns whether the given proxyOwner uses the atlasProxy.
+template<typename T> bool refs_atlas(const T* proxyOwner, const GrSurfaceProxy* atlasProxy) {
+ bool refsAtlas = false;
+ auto checkForAtlasRef = [atlasProxy, &refsAtlas](GrSurfaceProxy* proxy, GrMipmapped) {
+ if (proxy == atlasProxy) {
+ refsAtlas = true;
+ }
+ };
+ if (proxyOwner) {
+ proxyOwner->visitProxies(checkForAtlasRef);
+ }
+ return refsAtlas;
+}
+
+GrPathRenderer::CanDrawPath GrAtlasPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
+#ifdef SK_DEBUG
+ if (!fAtlasRenderTasks.empty()) {
+ // args.fPaint should NEVER reference our current atlas. If it does, it means somebody
+ // intercepted a clip FP meant for a different op and will cause rendering artifacts.
+ const GrSurfaceProxy* atlasProxy = fAtlasRenderTasks.back()->atlasProxy();
+ SkASSERT(!refs_atlas(args.fPaint->getColorFragmentProcessor(), atlasProxy));
+ SkASSERT(!refs_atlas(args.fPaint->getCoverageFragmentProcessor(), atlasProxy));
+ }
+ SkASSERT(!args.fHasUserStencilSettings); // See onGetStencilSupport().
+#endif
+ bool canDrawPath = args.fShape->style().isSimpleFill() &&
+ // The MSAA requirement is a temporary limitation in order to preserve
+ // functionality for refactoring. TODO: Allow kCoverage AA types.
+ args.fAAType == GrAAType::kMSAA &&
+ !args.fShape->style().hasPathEffect() &&
+ !args.fViewMatrix->hasPerspective() &&
+ this->pathFitsInAtlas(args.fViewMatrix->mapRect(args.fShape->bounds()));
+ return canDrawPath ? CanDrawPath::kYes : CanDrawPath::kNo;
+}
+
+static bool is_visible(const SkRect& pathDevBounds, const SkIRect& clipBounds) {
+ float2 pathTopLeft = float2::Load(&pathDevBounds.fLeft);
+ float2 pathBotRight = float2::Load(&pathDevBounds.fRight);
+ // Empty paths are never visible. Phrase this as a NOT of positive logic so we also return false
+ // in the case of NaN.
+ if (!skvx::all(pathTopLeft < pathBotRight)) {
+ return false;
+ }
+ float2 clipTopLeft = skvx::cast<float>(int2::Load(&clipBounds.fLeft));
+ float2 clipBotRight = skvx::cast<float>(int2::Load(&clipBounds.fRight));
+ static_assert(sizeof(clipBounds) == sizeof(clipTopLeft) + sizeof(clipBotRight));
+ return skvx::all(pathTopLeft < clipBotRight) && skvx::all(pathBotRight > clipTopLeft);
+}
+
+bool GrAtlasPathRenderer::onDrawPath(const DrawPathArgs& args) {
+ SkPath path;
+ args.fShape->asPath(&path);
+
+ const SkRect pathDevBounds = args.fViewMatrix->mapRect(args.fShape->bounds());
+ SkASSERT(this->pathFitsInAtlas(pathDevBounds));
+
+ if (!is_visible(pathDevBounds, args.fClip->getConservativeBounds())) {
+ // The path is empty or outside the clip. No mask is needed.
+ if (path.isInverseFillType()) {
+ args.fSurfaceDrawContext->drawPaint(args.fClip, std::move(args.fPaint),
+ *args.fViewMatrix);
+ }
+ return true;
+ }
+
+ SkIRect devIBounds;
+ SkIPoint16 locationInAtlas;
+ bool transposedInAtlas;
+ SkAssertResult(this->addPathToAtlas(args.fContext, *args.fViewMatrix, path, pathDevBounds,
+ &devIBounds, &locationInAtlas, &transposedInAtlas,
+ nullptr/*DrawRefsAtlasCallback -- see onCanDrawPath()*/));
+
+ const SkIRect& fillBounds = args.fShape->inverseFilled()
+ ? (args.fClip
+ ? args.fClip->getConservativeBounds()
+ : args.fSurfaceDrawContext->asSurfaceProxy()->backingStoreBoundsIRect())
+ : devIBounds;
+ const GrCaps& caps = *args.fSurfaceDrawContext->caps();
+ auto op = GrOp::Make<GrDrawAtlasPathOp>(args.fContext,
+ args.fSurfaceDrawContext->arenaAlloc(),
+ fillBounds, *args.fViewMatrix,
+ std::move(args.fPaint), locationInAtlas,
+ devIBounds, transposedInAtlas,
+ fAtlasRenderTasks.back()->readView(caps),
+ args.fShape->inverseFilled());
+ args.fSurfaceDrawContext->addDrawOp(args.fClip, std::move(op));
+ return true;
+}
+
+GrFPResult GrAtlasPathRenderer::makeAtlasClipEffect(GrRecordingContext* rContext,
+ const GrOp* opBeingClipped,
+ std::unique_ptr<GrFragmentProcessor> inputFP,
+ const SkIRect& drawBounds,
+ const SkMatrix& viewMatrix,
+ const SkPath& path) {
+ if (viewMatrix.hasPerspective()) {
+ return GrFPFailure(std::move(inputFP));
+ }
+
+ const SkRect pathDevBounds = viewMatrix.mapRect(path.getBounds());
+ if (!is_visible(pathDevBounds, drawBounds)) {
+ // The path is empty or outside the drawBounds. No mask is needed.
+ return path.isInverseFillType() ? GrFPSuccess(std::move(inputFP))
+ : GrFPFailure(std::move(inputFP));
+ }
+
+ if (!this->pathFitsInAtlas(pathDevBounds)) {
+ // The path is too big.
+ return GrFPFailure(std::move(inputFP));
+ }
+
+ SkIRect devIBounds;
+ SkIPoint16 locationInAtlas;
+ bool transposedInAtlas;
+ // Called if the atlas runs out of room, to determine if it's safe to create a new one. (Draws
+ // can never access more than one atlas.)
+ auto drawRefsAtlasCallback = [opBeingClipped, &inputFP](const GrSurfaceProxy* atlasProxy) {
+ return refs_atlas(opBeingClipped, atlasProxy) ||
+ refs_atlas(inputFP.get(), atlasProxy);
+ };
+ // addPathToAtlas() ignores inverseness of the fill. See GrAtlasRenderTask::getAtlasUberPath().
+ if (!this->addPathToAtlas(rContext, viewMatrix, path, pathDevBounds, &devIBounds,
+ &locationInAtlas, &transposedInAtlas, drawRefsAtlasCallback)) {
+ // The atlas ran out of room and we were unable to start a new one.
+ return GrFPFailure(std::move(inputFP));
+ }
+
+ SkMatrix atlasMatrix;
+ auto [atlasX, atlasY] = locationInAtlas;
+ if (!transposedInAtlas) {
+ atlasMatrix = SkMatrix::Translate(atlasX - devIBounds.left(), atlasY - devIBounds.top());
+ } else {
+ atlasMatrix.setAll(0, 1, atlasX - devIBounds.top(),
+ 1, 0, atlasY - devIBounds.left(),
+ 0, 0, 1);
+ }
+ auto flags = GrModulateAtlasCoverageEffect::Flags::kNone;
+ if (path.isInverseFillType()) {
+ flags |= GrModulateAtlasCoverageEffect::Flags::kInvertCoverage;
+ }
+ if (!devIBounds.contains(drawBounds)) {
+ flags |= GrModulateAtlasCoverageEffect::Flags::kCheckBounds;
+ // At this point in time we expect callers to tighten the scissor for "kIntersect" clips, as
+ // opposed to us having to check the path bounds. Feel free to remove this assert if that
+ // ever changes.
+ SkASSERT(path.isInverseFillType());
+ }
+ GrSurfaceProxyView atlasView = fAtlasRenderTasks.back()->readView(*rContext->priv().caps());
+ return GrFPSuccess(std::make_unique<GrModulateAtlasCoverageEffect>(flags, std::move(inputFP),
+ std::move(atlasView),
+ atlasMatrix, devIBounds));
+}
+
+#ifdef SK_DEBUG
+// Ensures the atlas dependencies are set up such that each atlas will be totally out of service
+// before we render the next one in line. This means there will only ever be one atlas active at a
+// time and that they can all share the same texture.
+static void validate_atlas_dependencies(const SkTArray<sk_sp<GrAtlasRenderTask>>& atlasTasks) {
+ for (int i = atlasTasks.count() - 1; i >= 1; --i) {
+ GrAtlasRenderTask* atlasTask = atlasTasks[i].get();
+ GrAtlasRenderTask* previousAtlasTask = atlasTasks[i - 1].get();
+ // Double check that atlasTask depends on every dependent of its previous atlas. If this
+ // fires it might mean previousAtlasTask gained a new dependent after atlasTask came into
+ // service (maybe by an op that hadn't yet been added to an opsTask when we registered the
+ // new atlas with the drawingManager).
+ for (GrRenderTask* previousAtlasUser : previousAtlasTask->dependents()) {
+ SkASSERT(atlasTask->dependsOn(previousAtlasUser));
+ }
+ }
+}
+#endif
+
+void GrAtlasPathRenderer::preFlush(GrOnFlushResourceProvider* onFlushRP,
+ SkSpan<const uint32_t> /* taskIDs */) {
+ if (fAtlasRenderTasks.empty()) {
+ SkASSERT(fAtlasPathCache.count() == 0);
+ return;
+ }
+
+ // Verify the atlases can all share the same texture.
+ SkDEBUGCODE(validate_atlas_dependencies(fAtlasRenderTasks);)
+
+ // Instantiate the first atlas.
+ fAtlasRenderTasks[0]->instantiate(onFlushRP);
+
+ // Instantiate the remaining atlases.
+ GrTexture* firstAtlasTexture = fAtlasRenderTasks[0]->atlasProxy()->peekTexture();
+ SkASSERT(firstAtlasTexture);
+ for (int i = 1; i < fAtlasRenderTasks.count(); ++i) {
+ GrAtlasRenderTask* atlasTask = fAtlasRenderTasks[i].get();
+ if (atlasTask->atlasProxy()->backingStoreDimensions() == firstAtlasTexture->dimensions()) {
+ atlasTask->instantiate(onFlushRP, sk_ref_sp(firstAtlasTexture));
+ } else {
+ // The atlases are expected to all be full size except possibly the final one.
+ SkASSERT(i == fAtlasRenderTasks.count() - 1);
+ SkASSERT(atlasTask->atlasProxy()->backingStoreDimensions().area() <
+ firstAtlasTexture->dimensions().area());
+ // TODO: Recycle the larger atlas texture anyway?
+ atlasTask->instantiate(onFlushRP);
+ }
+ }
+
+ // Reset all atlas data.
+ fAtlasRenderTasks.reset();
+ fAtlasPathCache.reset();
+}
diff --git a/src/gpu/ops/GrAtlasPathRenderer.h b/src/gpu/ops/GrAtlasPathRenderer.h
new file mode 100644
index 0000000..55eb824
--- /dev/null
+++ b/src/gpu/ops/GrAtlasPathRenderer.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2019 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrAtlasPathRenderer_DEFINED
+#define GrAtlasPathRenderer_DEFINED
+
+#include "include/private/SkTHash.h"
+#include "src/core/SkIPoint16.h"
+#include "src/gpu/GrDynamicAtlas.h"
+#include "src/gpu/GrFragmentProcessor.h"
+#include "src/gpu/GrOnFlushResourceProvider.h"
+#include "src/gpu/GrPathRenderer.h"
+
+class GrAtlasRenderTask;
+class GrOp;
+class GrRecordingContext;
+
+// Draws paths by first rendering their coverage mask into an offscreen atlas.
+class GrAtlasPathRenderer : public GrPathRenderer, public GrOnFlushCallbackObject {
+public:
+ static bool IsSupported(GrRecordingContext*);
+
+ // Returns a GrAtlasPathRenderer if it is supported, otherwise null.
+ static sk_sp<GrAtlasPathRenderer> Make(GrRecordingContext* rContext);
+
+ const char* name() const final { return "GrAtlasPathRenderer"; }
+
+ StencilSupport onGetStencilSupport(const GrStyledShape&) const override {
+ return kNoSupport_StencilSupport;
+ }
+
+ CanDrawPath onCanDrawPath(const CanDrawPathArgs&) const override;
+
+ bool onDrawPath(const DrawPathArgs&) override;
+
+ // Returns a fragment processor that modulates inputFP by the given deviceSpacePath's coverage,
+ // implemented using an internal atlas.
+ //
+ // Returns 'inputFP' wrapped in GrFPFailure() if the path was too large, or if the current atlas
+ // is full and already used by either opBeingClipped or inputFP. (Currently, "too large" means
+ // more than 128*128 total pixels, or larger than the atlas size in either dimension.)
+ //
+ // Also returns GrFPFailure() if the view matrix has perspective.
+ GrFPResult makeAtlasClipEffect(GrRecordingContext*,
+ const GrOp* opBeingClipped,
+ std::unique_ptr<GrFragmentProcessor> inputFP,
+ const SkIRect& drawBounds,
+ const SkMatrix&,
+ const SkPath&);
+
+private:
+ // The atlas is not compatible with DDL. We can only use it on direct contexts.
+ GrAtlasPathRenderer(GrDirectContext*);
+
+ // Returns true if the given device-space path bounds are no larger than 128*128 total pixels
+ // and no larger than the max atlas size in either dimension.
+ bool pathFitsInAtlas(const SkRect& pathDevBounds) const;
+
+ // Returns true if the draw being set up already uses the given atlasProxy.
+ using DrawRefsAtlasCallback = std::function<bool(const GrSurfaceProxy* atlasProxy)>;
+
+ // Adds the filled path to an atlas.
+ //
+ // pathFitsInAtlas() and is_visible() both must have returned true before making this call.
+ //
+ // Fails and returns false if the current atlas is full and already in use according to
+ // DrawRefsAtlasCallback.
+ bool addPathToAtlas(GrRecordingContext*,
+ const SkMatrix&,
+ const SkPath&,
+ const SkRect& pathDevBounds,
+ SkIRect* devIBounds,
+ SkIPoint16* locationInAtlas,
+ bool* transposedInAtlas,
+ const DrawRefsAtlasCallback&);
+
+ // Instantiates texture(s) for all atlases we've created since the last flush. Atlases that are
+ // the same size will be instantiated with the same backing texture.
+ void preFlush(GrOnFlushResourceProvider*, SkSpan<const uint32_t> taskIDs) override;
+
+ float fAtlasMaxSize = 0;
+ int fAtlasInitialSize = 0;
+
+ // A collection of all atlases we've created and used since the last flush. We instantiate these
+ // at flush time during preFlush().
+ SkSTArray<4, sk_sp<GrAtlasRenderTask>> fAtlasRenderTasks;
+
+ // This simple cache remembers the locations of cacheable path masks in the most recent atlas.
+ // Its main motivation is for clip paths.
+ struct AtlasPathKey {
+ void set(const SkMatrix&, const SkPath&);
+ bool operator==(const AtlasPathKey& k) const {
+ static_assert(sizeof(*this) == sizeof(uint32_t) * 6);
+ return !memcmp(this, &k, sizeof(*this));
+ }
+ uint32_t fPathGenID;
+ float fAffineMatrix[4];
+ uint8_t fSubpixelPositionKey[2];
+ uint16_t fFillRule;
+ };
+ SkTHashMap<AtlasPathKey, SkIPoint16> fAtlasPathCache;
+};
+
+#endif
diff --git a/src/gpu/ops/GrDrawAtlasPathOp.cpp b/src/gpu/ops/GrDrawAtlasPathOp.cpp
new file mode 100644
index 0000000..a05999c
--- /dev/null
+++ b/src/gpu/ops/GrDrawAtlasPathOp.cpp
@@ -0,0 +1,219 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/ops/GrDrawAtlasPathOp.h"
+
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/GrOpsRenderPass.h"
+#include "src/gpu/GrProgramInfo.h"
+#include "src/gpu/GrResourceProvider.h"
+#include "src/gpu/GrVertexWriter.h"
+#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
+#include "src/gpu/glsl/GrGLSLGeometryProcessor.h"
+#include "src/gpu/glsl/GrGLSLVarying.h"
+#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
+
+namespace {
+
+class DrawAtlasPathShader : public GrGeometryProcessor {
+public:
+ DrawAtlasPathShader(bool usesLocalCoords, const GrAtlasInstancedHelper* atlasHelper,
+ const GrShaderCaps& shaderCaps)
+ : GrGeometryProcessor(kDrawAtlasPathShader_ClassID)
+ , fUsesLocalCoords(usesLocalCoords)
+ , fAtlasHelper(atlasHelper)
+ , fAtlasAccess(GrSamplerState::Filter::kNearest, fAtlasHelper->proxy()->backendFormat(),
+ fAtlasHelper->atlasSwizzle()) {
+ if (!shaderCaps.vertexIDSupport()) {
+ constexpr static Attribute kUnitCoordAttrib("unitCoord", kFloat2_GrVertexAttribType,
+ kFloat2_GrSLType);
+ this->setVertexAttributes(&kUnitCoordAttrib, 1);
+ }
+ fAttribs.emplace_back("fillBounds", kFloat4_GrVertexAttribType, kFloat4_GrSLType);
+ if (fUsesLocalCoords) {
+ fAttribs.emplace_back("affineMatrix", kFloat4_GrVertexAttribType, kFloat4_GrSLType);
+ fAttribs.emplace_back("translate", kFloat2_GrVertexAttribType, kFloat2_GrSLType);
+ }
+ SkASSERT(fAttribs.count() == this->colorAttribIdx());
+ fAttribs.emplace_back("color", kFloat4_GrVertexAttribType, kHalf4_GrSLType);
+ fAtlasHelper->appendInstanceAttribs(&fAttribs);
+ SkASSERT(fAttribs.count() <= kMaxInstanceAttribs);
+ this->setInstanceAttributes(fAttribs.data(), fAttribs.count());
+ this->setTextureSamplerCnt(1);
+ }
+
+private:
+ int colorAttribIdx() const { return fUsesLocalCoords ? 3 : 1; }
+ const char* name() const override { return "DrawAtlasPathShader"; }
+ void getGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder* b) const override {
+ b->addBits(1, fUsesLocalCoords, "localCoords");
+ fAtlasHelper->getKeyBits(b);
+ }
+ const TextureSampler& onTextureSampler(int) const override { return fAtlasAccess; }
+ GrGLSLGeometryProcessor* createGLSLInstance(const GrShaderCaps&) const override;
+
+ const bool fUsesLocalCoords;
+ const GrAtlasInstancedHelper* const fAtlasHelper;
+ TextureSampler fAtlasAccess;
+ constexpr static int kMaxInstanceAttribs = 6;
+ SkSTArray<kMaxInstanceAttribs, GrGeometryProcessor::Attribute> fAttribs;
+
+ class Impl;
+};
+
+class DrawAtlasPathShader::Impl : public GrGLSLGeometryProcessor {
+ void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
+ const auto& shader = args.fGeomProc.cast<DrawAtlasPathShader>();
+ args.fVaryingHandler->emitAttributes(shader);
+
+ if (args.fShaderCaps->vertexIDSupport()) {
+ // If we don't have sk_VertexID support then "unitCoord" already came in as a vertex
+ // attrib.
+ args.fVertBuilder->codeAppendf(R"(
+ float2 unitCoord = float2(sk_VertexID & 1, sk_VertexID >> 1);)");
+ }
+
+ args.fVertBuilder->codeAppendf(R"(
+ float2 devCoord = mix(fillBounds.xy, fillBounds.zw, unitCoord);)");
+ gpArgs->fPositionVar.set(kFloat2_GrSLType, "devCoord");
+
+ if (shader.fUsesLocalCoords) {
+ args.fVertBuilder->codeAppendf(R"(
+ float2x2 M = float2x2(affineMatrix);
+ float2 localCoord = inverse(M) * (devCoord - translate);)");
+ gpArgs->fLocalCoordVar.set(kFloat2_GrSLType, "localCoord");
+ }
+
+ args.fFragBuilder->codeAppendf("half4 %s = half4(1);", args.fOutputCoverage);
+ shader.fAtlasHelper->injectShaderCode(args, gpArgs->fPositionVar, &fAtlasAdjustUniform);
+
+ args.fFragBuilder->codeAppendf("half4 %s;", args.fOutputColor);
+ args.fVaryingHandler->addPassThroughAttribute(
+ shader.fAttribs[shader.colorAttribIdx()], args.fOutputColor,
+ GrGLSLVaryingHandler::Interpolation::kCanBeFlat);
+ }
+
+ void setData(const GrGLSLProgramDataManager& pdman,
+ const GrShaderCaps&,
+ const GrGeometryProcessor& geomProc) override {
+ auto* atlasHelper = geomProc.cast<DrawAtlasPathShader>().fAtlasHelper;
+ atlasHelper->setUniformData(pdman, fAtlasAdjustUniform);
+ }
+
+ GrGLSLUniformHandler::UniformHandle fAtlasAdjustUniform;
+};
+
+GrGLSLGeometryProcessor* DrawAtlasPathShader::createGLSLInstance(const GrShaderCaps&) const {
+ return new Impl();
+}
+
+} // namespace
+
+GrProcessorSet::Analysis GrDrawAtlasPathOp::finalize(const GrCaps& caps, const GrAppliedClip* clip,
+ GrClampType clampType) {
+ const GrProcessorSet::Analysis& analysis = fProcessors.finalize(
+ fHeadInstance->fColor, GrProcessorAnalysisCoverage::kSingleChannel, clip,
+ &GrUserStencilSettings::kUnused, caps, clampType, &fHeadInstance->fColor);
+ fUsesLocalCoords = analysis.usesLocalCoords();
+ return analysis;
+}
+
+GrOp::CombineResult GrDrawAtlasPathOp::onCombineIfPossible(GrOp* op, SkArenaAlloc*, const GrCaps&) {
+ auto* that = op->cast<GrDrawAtlasPathOp>();
+
+ if (!fAtlasHelper.isCompatible(that->fAtlasHelper) ||
+ fProcessors != that->fProcessors) {
+ return CombineResult::kCannotCombine;
+ }
+
+ SkASSERT(fUsesLocalCoords == that->fUsesLocalCoords);
+ *fTailInstance = that->fHeadInstance;
+ fTailInstance = that->fTailInstance;
+ fInstanceCount += that->fInstanceCount;
+ return CombineResult::kMerged;
+}
+
+void GrDrawAtlasPathOp::prepareProgram(const GrCaps& caps, SkArenaAlloc* arena,
+ const GrSurfaceProxyView& writeView, bool usesMSAASurface,
+ GrAppliedClip&& appliedClip,
+ const GrDstProxyView& dstProxyView,
+ GrXferBarrierFlags renderPassXferBarriers,
+ GrLoadOp colorLoadOp) {
+ SkASSERT(!fProgram);
+ GrPipeline::InitArgs initArgs;
+ if (usesMSAASurface) {
+ initArgs.fInputFlags |= GrPipeline::InputFlags::kHWAntialias;
+ }
+ initArgs.fCaps = ∩︀
+ initArgs.fDstProxyView = dstProxyView;
+ initArgs.fWriteSwizzle = writeView.swizzle();
+ auto pipeline = arena->make<GrPipeline>(initArgs, std::move(fProcessors),
+ std::move(appliedClip));
+ auto shader = arena->make<DrawAtlasPathShader>(fUsesLocalCoords, &fAtlasHelper,
+ *caps.shaderCaps());
+ fProgram = arena->make<GrProgramInfo>(writeView, pipeline, &GrUserStencilSettings::kUnused,
+ shader, GrPrimitiveType::kTriangleStrip, 0,
+ renderPassXferBarriers, colorLoadOp);
+}
+
+void GrDrawAtlasPathOp::onPrePrepare(GrRecordingContext* rContext,
+ const GrSurfaceProxyView& writeView,
+ GrAppliedClip* appliedClip, const GrDstProxyView& dstProxyView,
+ GrXferBarrierFlags renderPassXferBarriers,
+ GrLoadOp colorLoadOp) {
+ this->prepareProgram(*rContext->priv().caps(), rContext->priv().recordTimeAllocator(),
+ writeView, writeView.asRenderTargetProxy()->numSamples() > 1,
+ std::move(*appliedClip), dstProxyView, renderPassXferBarriers,
+ colorLoadOp);
+ SkASSERT(fProgram);
+ rContext->priv().recordProgramInfo(fProgram);
+}
+
+GR_DECLARE_STATIC_UNIQUE_KEY(gUnitQuadBufferKey);
+
+void GrDrawAtlasPathOp::onPrepare(GrOpFlushState* flushState) {
+ if (!fProgram) {
+ this->prepareProgram(flushState->caps(), flushState->allocator(), flushState->writeView(),
+ flushState->usesMSAASurface(), flushState->detachAppliedClip(),
+ flushState->dstProxyView(), flushState->renderPassBarriers(),
+ flushState->colorLoadOp());
+ SkASSERT(fProgram);
+ }
+
+ // FIXME(skbug.com/12201): Our draw's MSAA state should match the render target, but DDL doesn't
+ // yet communicate DMSAA state to onPrePrepare.
+ SkASSERT(fProgram->pipeline().isHWAntialiasState() == flushState->usesMSAASurface());
+
+ if (GrVertexWriter instanceWriter = flushState->makeVertexSpace(
+ fProgram->geomProc().instanceStride(), fInstanceCount, &fInstanceBuffer,
+ &fBaseInstance)) {
+ for (const Instance* i = fHeadInstance; i; i = i->fNext) {
+ instanceWriter.write(
+ SkRect::Make(i->fFillBounds),
+ GrVertexWriter::If(fUsesLocalCoords,
+ i->fLocalToDeviceIfUsingLocalCoords),
+ i->fColor);
+ fAtlasHelper.writeInstanceData(&instanceWriter, &i->fAtlasInstance);
+ }
+ }
+
+ if (!flushState->caps().shaderCaps()->vertexIDSupport()) {
+ constexpr static SkPoint kUnitQuad[4] = {{0,0}, {0,1}, {1,0}, {1,1}};
+
+ GR_DEFINE_STATIC_UNIQUE_KEY(gUnitQuadBufferKey);
+
+ fVertexBufferIfNoIDSupport = flushState->resourceProvider()->findOrMakeStaticBuffer(
+ GrGpuBufferType::kVertex, sizeof(kUnitQuad), kUnitQuad, gUnitQuadBufferKey);
+ }
+}
+
+void GrDrawAtlasPathOp::onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) {
+ flushState->bindPipelineAndScissorClip(*fProgram, this->bounds());
+ flushState->bindTextures(fProgram->geomProc(), *fAtlasHelper.proxy(), fProgram->pipeline());
+ flushState->bindBuffers(nullptr, std::move(fInstanceBuffer), fVertexBufferIfNoIDSupport);
+ flushState->drawInstanced(fInstanceCount, fBaseInstance, 4, 0);
+}
diff --git a/src/gpu/ops/GrDrawAtlasPathOp.h b/src/gpu/ops/GrDrawAtlasPathOp.h
new file mode 100644
index 0000000..1e79ec9
--- /dev/null
+++ b/src/gpu/ops/GrDrawAtlasPathOp.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2020 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrDrawAtlasPathOp_DEFINED
+#define GrDrawAtlasPathOp_DEFINED
+
+#include "src/core/SkIPoint16.h"
+#include "src/gpu/ops/GrAtlasInstancedHelper.h"
+#include "src/gpu/ops/GrDrawOp.h"
+
+// Fills a rectangle of pixels with a clip against coverage values from an atlas.
+class GrDrawAtlasPathOp : public GrDrawOp {
+public:
+ DEFINE_OP_CLASS_ID
+
+ GrDrawAtlasPathOp(SkArenaAlloc* arena, const SkIRect& fillBounds, const SkMatrix& localToDevice,
+ GrPaint&& paint, SkIPoint16 locationInAtlas, const SkIRect& pathDevIBounds,
+ bool transposedInAtlas, GrSurfaceProxyView atlasView, bool isInverseFill)
+ : GrDrawOp(ClassID())
+ , fHeadInstance(arena->make<Instance>(fillBounds, localToDevice, paint.getColor4f(),
+ locationInAtlas, pathDevIBounds,
+ transposedInAtlas))
+ , fTailInstance(&fHeadInstance->fNext)
+ , fAtlasHelper(std::move(atlasView),
+ isInverseFill ? GrAtlasInstancedHelper::ShaderFlags::kCheckBounds |
+ GrAtlasInstancedHelper::ShaderFlags::kInvertCoverage
+ : GrAtlasInstancedHelper::ShaderFlags::kNone)
+ , fProcessors(std::move(paint)) {
+ this->setBounds(SkRect::Make(fillBounds), HasAABloat::kYes, IsHairline::kNo);
+ }
+
+ const char* name() const override { return "GrDrawAtlasPathOp"; }
+ FixedFunctionFlags fixedFunctionFlags() const override { return FixedFunctionFlags::kNone; }
+ void visitProxies(const GrVisitProxyFunc& func) const override {
+ func(fAtlasHelper.proxy(), GrMipmapped::kNo);
+ fProcessors.visitProxies(func);
+ }
+ GrProcessorSet::Analysis finalize(const GrCaps&, const GrAppliedClip*, GrClampType) override;
+ CombineResult onCombineIfPossible(GrOp*, SkArenaAlloc*, const GrCaps&) override;
+
+ void onPrePrepare(GrRecordingContext*, const GrSurfaceProxyView& writeView, GrAppliedClip*,
+ const GrDstProxyView&, GrXferBarrierFlags, GrLoadOp colorLoadOp) override;
+ void onPrepare(GrOpFlushState*) override;
+ void onExecute(GrOpFlushState*, const SkRect& chainBounds) override;
+
+private:
+ void prepareProgram(const GrCaps&, SkArenaAlloc*, const GrSurfaceProxyView& writeView,
+ bool usesMSAASurface, GrAppliedClip&&, const GrDstProxyView&,
+ GrXferBarrierFlags, GrLoadOp colorLoadOp);
+
+ struct Instance {
+ Instance(const SkIRect& fillIBounds, const SkMatrix& m,
+ const SkPMColor4f& color, SkIPoint16 locationInAtlas,
+ const SkIRect& pathDevIBounds, bool transposedInAtlas)
+ : fFillBounds(fillIBounds)
+ , fLocalToDeviceIfUsingLocalCoords{m.getScaleX(), m.getSkewY(),
+ m.getSkewX(), m.getScaleY(),
+ m.getTranslateX(), m.getTranslateY()}
+ , fColor(color)
+ , fAtlasInstance(locationInAtlas, pathDevIBounds, transposedInAtlas) {
+ }
+ SkIRect fFillBounds;
+ std::array<float, 6> fLocalToDeviceIfUsingLocalCoords;
+ SkPMColor4f fColor;
+ GrAtlasInstancedHelper::Instance fAtlasInstance;
+ Instance* fNext = nullptr;
+ };
+
+ Instance* fHeadInstance;
+ Instance** fTailInstance;
+
+ GrAtlasInstancedHelper fAtlasHelper;
+ bool fUsesLocalCoords = false;
+
+ int fInstanceCount = 1;
+
+ GrProgramInfo* fProgram = nullptr;
+
+ sk_sp<const GrBuffer> fInstanceBuffer;
+ int fBaseInstance;
+
+ // Only used if sk_VertexID is not supported.
+ sk_sp<const GrGpuBuffer> fVertexBufferIfNoIDSupport;
+
+ GrProcessorSet fProcessors;
+};
+
+#endif