Rewrite tessellation atlases as normal render tasks
Rewrites tessellation atlases as normal render tasks instead of
"onFlush" tasks. These tasks get inserted into the DAG upfront, lay
out their atlases as dependent tasks get built and reference them, and
finally add their ops to render themselves during onMakeClosed. Doing it
this way allows us to pause the flush and re-render the atlas whenever
it runs out of room.
Bug: b/188794626
Bug: chromium:928984
Change-Id: Id59a5527924c63d5ff7c5bce46a88368e79fc3ef
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/420556
Commit-Queue: Chris Dalton <csmartdalton@google.com>
Reviewed-by: Brian Salomon <bsalomon@google.com>
Reviewed-by: Adlai Holler <adlai@google.com>
diff --git a/gm/manypathatlases.cpp b/gm/manypathatlases.cpp
new file mode 100644
index 0000000..50271d2
--- /dev/null
+++ b/gm/manypathatlases.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2019 Google LLC.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "gm/gm.h"
+
+#include "include/core/SkPath.h"
+#include "include/gpu/GrContextOptions.h"
+#include "include/gpu/GrRecordingContext.h"
+#include "src/gpu/GrDirectContextPriv.h"
+#include "src/gpu/GrDrawingManager.h"
+#include "src/gpu/GrRecordingContextPriv.h"
+#include "src/gpu/GrSurfaceDrawContext.h"
+#include "tools/ToolUtils.h"
+
+namespace skiagm {
+
+/**
+ * This test originally ensured that the ccpr path cache preserved fill rules properly. CCRP is gone
+ * now, but we decided to keep the test.
+ */
+class ManyPathAtlasesGM : public GpuGM {
+private:
+ SkString onShortName() override { return SkString("manypathatlases"); }
+ SkISize onISize() override { return SkISize::Make(128, 128); }
+
+ void modifyGrContextOptions(GrContextOptions* ctxOptions) override {
+ ctxOptions->fMaxTextureAtlasSize = 128; // Put each path in its own atlas.
+ }
+
+ DrawResult onDraw(GrRecordingContext*, GrSurfaceDrawContext*, SkCanvas* canvas,
+ SkString* errorMsg) override {
+ canvas->clear({1,1,0,1});
+ SkPath clip = SkPath().moveTo(-50, 20)
+ .cubicTo(-50, -20, 50, -20, 50, 40)
+ .cubicTo(20, 0, -20, 0, -50, 20);
+ clip.transform(SkMatrix::Translate(64, 70));
+ for (int i = 0; i < 4; ++i) {
+ SkPath rotatedClip = clip;
+ rotatedClip.transform(SkMatrix::RotateDeg(30 * i + 128, {64, 70}));
+ rotatedClip.setIsVolatile(true);
+ canvas->clipPath(rotatedClip, SkClipOp::kDifference, true);
+ }
+ SkPath path = SkPath().moveTo(20, 0)
+ .lineTo(108, 0).cubicTo(108, 20, 108, 20, 128, 20)
+ .lineTo(128, 108).cubicTo(108, 108, 108, 108, 108, 128)
+ .lineTo(20, 128).cubicTo(20, 108, 20, 108, 0, 108)
+ .lineTo(0, 20).cubicTo(20, 20, 20, 20, 20, 0);
+ path.setIsVolatile(true);
+ SkPaint teal;
+ teal.setColor4f({.03f, .91f, .87f, 1});
+ teal.setAntiAlias(true);
+ canvas->drawPath(path, teal);
+ return DrawResult::kOk;
+ }
+};
+
+DEF_GM( return new ManyPathAtlasesGM(); )
+
+} // namespace skiagm
diff --git a/gn/gm.gni b/gn/gm.gni
index 317d72b..debca07 100644
--- a/gn/gm.gni
+++ b/gn/gm.gni
@@ -261,6 +261,7 @@
"$_gm/make_raster_image.cpp",
"$_gm/makecolorspace.cpp",
"$_gm/mandoline.cpp",
+ "$_gm/manypathatlases.cpp",
"$_gm/manypaths.cpp",
"$_gm/matrixconvolution.cpp",
"$_gm/matriximagefilter.cpp",
diff --git a/gn/gpu.gni b/gn/gpu.gni
index fecb19c..667ea6f 100644
--- a/gn/gpu.gni
+++ b/gn/gpu.gni
@@ -410,6 +410,8 @@
"$_src/gpu/gradients/generated/GrUnrolledBinaryGradientColorizer.h",
# tessellate
+ "$_src/gpu/tessellate/GrAtlasRenderTask.cpp",
+ "$_src/gpu/tessellate/GrAtlasRenderTask.h",
"$_src/gpu/tessellate/GrCullTest.h",
"$_src/gpu/tessellate/GrDrawAtlasPathOp.cpp",
"$_src/gpu/tessellate/GrDrawAtlasPathOp.h",
diff --git a/include/gpu/GrContextOptions.h b/include/gpu/GrContextOptions.h
index e3bbe06..204277a 100644
--- a/include/gpu/GrContextOptions.h
+++ b/include/gpu/GrContextOptions.h
@@ -345,6 +345,11 @@
* If true, then always try to use hardware tessellation, regardless of how small a path may be.
*/
bool fAlwaysPreferHardwareTessellation = false;
+
+ /**
+ * Maximum width and height of internal texture atlases.
+ */
+ int fMaxTextureAtlasSize = 2048;
#endif
GrDriverBugWorkarounds fDriverBugWorkarounds;
diff --git a/src/gpu/GrClipStack.cpp b/src/gpu/GrClipStack.cpp
index f15373a..8f92852 100644
--- a/src/gpu/GrClipStack.cpp
+++ b/src/gpu/GrClipStack.cpp
@@ -234,11 +234,12 @@
// TODO: Currently this only works with tessellation because the tessellation path renderer owns and
// manages the atlas. The high-level concept could be generalized to support any path renderer going
// into a shared atlas.
-static GrFPResult clip_atlas_fp(GrTessellationPathRenderer* tessellator,
+static GrFPResult clip_atlas_fp(GrRecordingContext* rContext,
+ const GrOp* opBeingClipped,
+ GrTessellationPathRenderer* tessellator,
const SkIRect& scissorBounds,
const GrClipStack::Element& e,
- std::unique_ptr<GrFragmentProcessor> inputFP,
- const GrCaps& caps) {
+ std::unique_ptr<GrFragmentProcessor> inputFP) {
SkPath path;
e.fShape.asPath(&path);
SkASSERT(!path.isInverseFillType());
@@ -246,8 +247,8 @@
// Toggling fill type does not affect the path's "generationID" key.
path.toggleInverseFillType();
}
- return tessellator->makeAtlasClipFP(scissorBounds, e.fLocalToDevice, path, e.fAA,
- std::move(inputFP), caps);
+ return tessellator->makeAtlasClipFP(rContext, opBeingClipped, std::move(inputFP), scissorBounds,
+ e.fLocalToDevice, path, e.fAA);
}
static void draw_to_sw_mask(GrSWMaskHelper* helper, const GrClipStack::Element& e, bool clearMask) {
@@ -1388,9 +1389,10 @@
*caps->shaderCaps(),
std::move(clipFP));
if (!fullyApplied && tessellator) {
- std::tie(fullyApplied, clipFP) = clip_atlas_fp(tessellator, scissorBounds,
+ std::tie(fullyApplied, clipFP) = clip_atlas_fp(context, opBeingClipped ,
+ tessellator, scissorBounds,
e.asElement(),
- std::move(clipFP), *caps);
+ std::move(clipFP));
}
if (fullyApplied) {
remainingAnalyticFPs--;
diff --git a/src/gpu/GrDrawingManager.cpp b/src/gpu/GrDrawingManager.cpp
index 85cea19..6c71c4d 100644
--- a/src/gpu/GrDrawingManager.cpp
+++ b/src/gpu/GrDrawingManager.cpp
@@ -657,7 +657,8 @@
// activeTask does.
bool isActiveResolveTask =
fActiveOpsTask && fActiveOpsTask->fTextureResolveTask == fDAG[i].get();
- SkASSERT(isActiveResolveTask || fDAG[i]->isClosed());
+ bool isAtlas = fDAG[i]->isSetFlag(GrRenderTask::kAtlas_Flag);
+ SkASSERT(isActiveResolveTask || isAtlas || fDAG[i]->isClosed());
}
}
@@ -704,6 +705,32 @@
return opsTask;
}
+void GrDrawingManager::addAtlasTask(sk_sp<GrRenderTask> atlasTask,
+ GrRenderTask* previousAtlasTask) {
+ SkDEBUGCODE(this->validate());
+ SkASSERT(fContext);
+
+ if (previousAtlasTask) {
+ previousAtlasTask->makeClosed(fContext);
+ for (GrRenderTask* previousAtlasUser : previousAtlasTask->dependents()) {
+ // Make the new atlas depend on everybody who used the old atlas, and close their tasks.
+ // This guarantees that the previous atlas is totally out of service before we render
+ // the next one, meaning there is only ever one atlas active at a time and that they can
+ // all share the same texture.
+ atlasTask->addDependency(previousAtlasUser);
+ previousAtlasUser->makeClosed(fContext);
+ if (previousAtlasUser == fActiveOpsTask) {
+ fActiveOpsTask = nullptr;
+ }
+ }
+ }
+
+ atlasTask->setFlag(GrRenderTask::kAtlas_Flag);
+ this->insertTaskBeforeLast(std::move(atlasTask));
+
+ SkDEBUGCODE(this->validate());
+}
+
GrTextureResolveRenderTask* GrDrawingManager::newTextureResolveRenderTask(const GrCaps& caps) {
// Unlike in the "new opsTask" case, we do not want to close the active opsTask, nor (if we are
// in sorting and opsTask reduction mode) the render tasks that depend on any proxy's current
diff --git a/src/gpu/GrDrawingManager.h b/src/gpu/GrDrawingManager.h
index 9bfb8f4..7e39505 100644
--- a/src/gpu/GrDrawingManager.h
+++ b/src/gpu/GrDrawingManager.h
@@ -55,6 +55,12 @@
sk_sp<GrArenas> arenas,
bool flushTimeOpsTask);
+ // Adds 'atlasTask' to the DAG and leaves it open.
+ //
+ // If 'previousAtlasTask' is provided, closes it and configures dependencies to guarantee
+ // previousAtlasTask and all its users are completely out of service before atlasTask executes.
+ void addAtlasTask(sk_sp<GrRenderTask> atlasTask, GrRenderTask* previousAtlasTask);
+
// Create a render task that can resolve MSAA and/or regenerate mipmap levels on proxies. This
// method will only add the new render task to the list. It is up to the caller to call
// addProxy() on the returned object.
diff --git a/src/gpu/GrDynamicAtlas.cpp b/src/gpu/GrDynamicAtlas.cpp
index ea44c8c..1b4cf24 100644
--- a/src/gpu/GrDynamicAtlas.cpp
+++ b/src/gpu/GrDynamicAtlas.cpp
@@ -91,8 +91,8 @@
[this](GrResourceProvider* resourceProvider, const LazyAtlasDesc& desc) {
if (!fBackingTexture) {
fBackingTexture = resourceProvider->createTexture(
- {fWidth, fHeight}, desc.fFormat, desc.fRenderable, desc.fSampleCnt,
- desc.fMipmapped, desc.fBudgeted, desc.fProtected);
+ fTextureProxy->backingStoreDimensions(), desc.fFormat, desc.fRenderable,
+ desc.fSampleCnt, desc.fMipmapped, desc.fBudgeted, desc.fProtected);
}
return GrSurfaceProxy::LazyCallbackResult(fBackingTexture);
},
@@ -109,11 +109,16 @@
return fNodeAllocator.make<Node>(previous, rectanizer, l, t);
}
-GrSurfaceProxyView GrDynamicAtlas::surfaceProxyView(const GrCaps& caps) const {
+GrSurfaceProxyView GrDynamicAtlas::readView(const GrCaps& caps) const {
return {fTextureProxy, kTextureOrigin,
caps.getReadSwizzle(fTextureProxy->backendFormat(), fColorType)};
}
+GrSurfaceProxyView GrDynamicAtlas::writeView(const GrCaps& caps) const {
+ return {fTextureProxy, kTextureOrigin,
+ caps.getWriteSwizzle(fTextureProxy->backendFormat(), fColorType)};
+}
+
bool GrDynamicAtlas::addRect(int width, int height, SkIPoint16* location) {
// This can't be called anymore once instantiate() has been called.
SkASSERT(!this->isInstantiated());
@@ -171,17 +176,20 @@
return true;
}
-std::unique_ptr<GrSurfaceDrawContext> GrDynamicAtlas::instantiate(
- GrOnFlushResourceProvider* onFlushRP, sk_sp<GrTexture> backingTexture) {
+void GrDynamicAtlas::instantiate(GrOnFlushResourceProvider* onFlushRP,
+ sk_sp<GrTexture> backingTexture) {
SkASSERT(!this->isInstantiated()); // This method should only be called once.
// Caller should have cropped any paths to the destination render target instead of asking for
// an atlas larger than maxRenderTargetSize.
SkASSERT(std::max(fHeight, fWidth) <= fMaxAtlasSize);
SkASSERT(fMaxAtlasSize <= onFlushRP->caps()->maxRenderTargetSize());
- // Finalize the content size of our proxy. The GPU can potentially make optimizations if it
- // knows we only intend to write out a smaller sub-rectangle of the backing texture.
- fTextureProxy->priv().setLazyDimensions(fDrawBounds);
+ if (fTextureProxy->isFullyLazy()) {
+ // Finalize the content size of our proxy. The GPU can potentially make optimizations if it
+ // knows we only intend to write out a smaller sub-rectangle of the backing texture.
+ fTextureProxy->priv().setLazyDimensions(fDrawBounds);
+ }
+ SkASSERT(fTextureProxy->dimensions() == fDrawBounds);
if (backingTexture) {
#ifdef SK_DEBUG
@@ -189,21 +197,9 @@
SkASSERT(backingRT);
SkASSERT(backingRT->backendFormat() == fTextureProxy->backendFormat());
SkASSERT(backingRT->numSamples() == fTextureProxy->asRenderTargetProxy()->numSamples());
- SkASSERT(backingRT->width() == fWidth);
- SkASSERT(backingRT->height() == fHeight);
+ SkASSERT(backingRT->dimensions() == fTextureProxy->backingStoreDimensions());
#endif
fBackingTexture = std::move(backingTexture);
}
- auto sdc = onFlushRP->makeSurfaceDrawContext(fTextureProxy, kTextureOrigin, fColorType,
- nullptr, SkSurfaceProps());
- if (!sdc) {
- onFlushRP->printWarningMessage(SkStringPrintf(
- "WARNING: failed to allocate a %ix%i atlas. Some masks will not be drawn.\n",
- fWidth, fHeight).c_str());
- return nullptr;
- }
-
- SkIRect clearRect = SkIRect::MakeSize(fDrawBounds);
- sdc->clearAtLeast(clearRect, SK_PMColor4fTRANSPARENT);
- return sdc;
+ onFlushRP->instatiateProxy(fTextureProxy.get());
}
diff --git a/src/gpu/GrDynamicAtlas.h b/src/gpu/GrDynamicAtlas.h
index 38ba3c7..c82bae1 100644
--- a/src/gpu/GrDynamicAtlas.h
+++ b/src/gpu/GrDynamicAtlas.h
@@ -54,27 +54,25 @@
void reset(SkISize initialSize, const GrCaps& desc);
+ GrColorType colorType() const { return fColorType; }
int maxAtlasSize() const { return fMaxAtlasSize; }
GrTextureProxy* textureProxy() const { return fTextureProxy.get(); }
- GrSurfaceProxyView surfaceProxyView(const GrCaps&) const;
+ GrSurfaceProxyView readView(const GrCaps&) const;
+ GrSurfaceProxyView writeView(const GrCaps&) const;
bool isInstantiated() const { return fTextureProxy->isInstantiated(); }
- int currentWidth() const { return fWidth; }
- int currentHeight() const { return fHeight; }
// Attempts to add a rect to the atlas. Returns true if successful, along with the rect's
// top-left location in the atlas.
bool addRect(int width, int height, SkIPoint16* location);
const SkISize& drawBounds() { return fDrawBounds; }
- // Instantiates our texture proxy for the atlas and returns a pre-cleared GrSurfaceDrawContext
- // that the caller may use to render the content. After this call, it is no longer valid to call
+ // Instantiates our texture proxy for the atlas. After this call, it is no longer valid to call
// addRect(), setUserBatchID(), or this method again.
//
// 'backingTexture', if provided, is a renderable texture with which to instantiate our proxy.
// If null then we will create a texture using the resource provider. The purpose of this param
- // is to provide a guaranteed way to recycle a stashed atlas texture from a previous flush.
- std::unique_ptr<GrSurfaceDrawContext> instantiate(
- GrOnFlushResourceProvider*, sk_sp<GrTexture> backingTexture = nullptr);
+ // is to provide a guaranteed way to recycle textures from previous atlases.
+ void instantiate(GrOnFlushResourceProvider*, sk_sp<GrTexture> backingTexture = nullptr);
private:
class Node;
diff --git a/src/gpu/GrOpsTask.cpp b/src/gpu/GrOpsTask.cpp
index 2d47502..5f2884c 100644
--- a/src/gpu/GrOpsTask.cpp
+++ b/src/gpu/GrOpsTask.cpp
@@ -382,7 +382,8 @@
op->visitProxies(addDependency);
- this->recordOp(std::move(op), GrProcessorSet::EmptySetAnalysis(), nullptr, nullptr, caps);
+ this->recordOp(std::move(op), false/*usesMSAA*/, GrProcessorSet::EmptySetAnalysis(), nullptr,
+ nullptr, caps);
}
void GrOpsTask::addDrawOp(GrDrawingManager* drawingMgr, GrOp::Owner op, bool usesMSAA,
@@ -412,16 +413,7 @@
fRenderPassXferBarriers |= GrXferBarrierFlags::kBlend;
}
-#ifdef SK_DEBUG
- // Ensure we can support dynamic msaa if the caller is trying to trigger it.
- GrRenderTargetProxy* rtProxy = this->target(0)->asRenderTargetProxy();
- if (rtProxy->numSamples() == 1 && usesMSAA) {
- SkASSERT(caps.supportsDynamicMSAA(rtProxy));
- }
-#endif
- fUsesMSAASurface |= usesMSAA;
-
- this->recordOp(std::move(op), processorAnalysis, clip.doesClip() ? &clip : nullptr,
+ this->recordOp(std::move(op), usesMSAA, processorAnalysis, clip.doesClip() ? &clip : nullptr,
&dstProxyView, caps);
}
@@ -947,19 +939,27 @@
}
void GrOpsTask::recordOp(
- GrOp::Owner op, GrProcessorSet::Analysis processorAnalysis, GrAppliedClip* clip,
- const GrDstProxyView* dstProxyView, const GrCaps& caps) {
- SkDEBUGCODE(op->validate();)
- SkASSERT(processorAnalysis.requiresDstTexture() == (dstProxyView && dstProxyView->proxy()));
+ GrOp::Owner op, bool usesMSAA, GrProcessorSet::Analysis processorAnalysis,
+ GrAppliedClip* clip, const GrDstProxyView* dstProxyView, const GrCaps& caps) {
GrSurfaceProxy* proxy = this->target(0);
+#ifdef SK_DEBUG
+ op->validate();
+ SkASSERT(processorAnalysis.requiresDstTexture() == (dstProxyView && dstProxyView->proxy()));
SkASSERT(proxy);
-
// A closed GrOpsTask should never receive new/more ops
SkASSERT(!this->isClosed());
+ // Ensure we can support dynamic msaa if the caller is trying to trigger it.
+ if (proxy->asRenderTargetProxy()->numSamples() == 1 && usesMSAA) {
+ SkASSERT(caps.supportsDynamicMSAA(proxy->asRenderTargetProxy()));
+ }
+#endif
+
if (!op->bounds().isFinite()) {
return;
}
+ fUsesMSAASurface |= usesMSAA;
+
// Account for this op's bounds before we attempt to combine.
// NOTE: The caller should have already called "op->setClippedBounds()" by now, if applicable.
fTotalBounds.join(op->bounds());
diff --git a/src/gpu/GrOpsTask.h b/src/gpu/GrOpsTask.h
index 39a58a6..d469e15 100644
--- a/src/gpu/GrOpsTask.h
+++ b/src/gpu/GrOpsTask.h
@@ -106,18 +106,7 @@
const GrOp* getChain(int index) const { return fOpChains[index].head(); }
#endif
-private:
- bool isNoOp() const {
- // TODO: GrLoadOp::kDiscard (i.e., storing a discard) should also be grounds for skipping
- // execution. We currently don't because of Vulkan. See http://skbug.com/9373.
- //
- // TODO: We should also consider stencil load/store here. We get away with it for now
- // because we never discard stencil buffers.
- return fOpChains.empty() && GrLoadOp::kLoad == fColorLoadOp;
- }
-
- void deleteOps();
-
+protected:
enum class StencilContent {
kDontCare,
kUserBitsCleared, // User bits: cleared
@@ -138,6 +127,23 @@
fInitialStencilContent = initialContent;
}
+ void recordOp(GrOp::Owner, bool usesMSAA, GrProcessorSet::Analysis, GrAppliedClip*,
+ const GrDstProxyView*, const GrCaps&);
+
+ ExpectedOutcome onMakeClosed(GrRecordingContext*, SkIRect* targetUpdateBounds) override;
+
+private:
+ bool isNoOp() const {
+ // TODO: GrLoadOp::kDiscard (i.e., storing a discard) should also be grounds for skipping
+ // execution. We currently don't because of Vulkan. See http://skbug.com/9373.
+ //
+ // TODO: We should also consider stencil load/store here. We get away with it for now
+ // because we never discard stencil buffers.
+ return fOpChains.empty() && GrLoadOp::kLoad == fColorLoadOp;
+ }
+
+ void deleteOps();
+
// If a surfaceDrawContext splits its opsTask, it uses this method to guarantee stencil values
// get preserved across its split tasks.
void setMustPreserveStencil() { fMustPreserveStencil = true; }
@@ -226,13 +232,8 @@
void gatherProxyIntervals(GrResourceAllocator*) const override;
- void recordOp(GrOp::Owner, GrProcessorSet::Analysis, GrAppliedClip*,
- const GrDstProxyView*, const GrCaps&);
-
void forwardCombine(const GrCaps&);
- ExpectedOutcome onMakeClosed(GrRecordingContext*, SkIRect* targetUpdateBounds) override;
-
// Remove all ops, proxies, etc. Used in the merging algorithm when tasks can be skipped.
void reset();
diff --git a/src/gpu/GrRenderTask.cpp b/src/gpu/GrRenderTask.cpp
index 1c15d66..22a7604 100644
--- a/src/gpu/GrRenderTask.cpp
+++ b/src/gpu/GrRenderTask.cpp
@@ -149,10 +149,12 @@
return; // don't add duplicate dependencies
}
- // We are closing 'dependedOnTask' here bc the current contents of it are what 'this'
- // renderTask depends on. We need a break in 'dependedOnTask' so that the usage of
- // that state has a chance to execute.
- dependedOnTask->makeClosed(drawingMgr->getContext());
+ if (!dependedOnTask->isSetFlag(kAtlas_Flag)) {
+ // We are closing 'dependedOnTask' here bc the current contents of it are what 'this'
+ // renderTask depends on. We need a break in 'dependedOnTask' so that the usage of
+ // that state has a chance to execute.
+ dependedOnTask->makeClosed(drawingMgr->getContext());
+ }
}
auto resolveFlags = GrSurfaceProxy::ResolveFlags::kNone;
diff --git a/src/gpu/GrRenderTask.h b/src/gpu/GrRenderTask.h
index 972c63a..e39245a 100644
--- a/src/gpu/GrRenderTask.h
+++ b/src/gpu/GrRenderTask.h
@@ -180,9 +180,10 @@
kClosed_Flag = 0x01, //!< This task can't accept any more dependencies.
kDisowned_Flag = 0x02, //!< This task is disowned by its creating GrDrawingManager.
kSkippable_Flag = 0x04, //!< This task is skippable.
+ kAtlas_Flag = 0x08, //!< This task is atlas.
- kWasOutput_Flag = 0x08, //!< Flag for topological sorting
- kTempMark_Flag = 0x10, //!< Flag for topological sorting
+ kWasOutput_Flag = 0x10, //!< Flag for topological sorting
+ kTempMark_Flag = 0x20, //!< Flag for topological sorting
};
void setFlag(uint32_t flag) {
@@ -199,13 +200,13 @@
void setIndex(uint32_t index) {
SkASSERT(!this->isSetFlag(kWasOutput_Flag));
- SkASSERT(index < (1 << 27));
- fFlags |= index << 5;
+ SkASSERT(index < (1 << 26));
+ fFlags |= index << 6;
}
uint32_t getIndex() const {
SkASSERT(this->isSetFlag(kWasOutput_Flag));
- return fFlags >> 5;
+ return fFlags >> 6;
}
private:
diff --git a/src/gpu/ops/GrSimpleMeshDrawOpHelper.cpp b/src/gpu/ops/GrSimpleMeshDrawOpHelper.cpp
index 5c29d4f..b3a2932 100644
--- a/src/gpu/ops/GrSimpleMeshDrawOpHelper.cpp
+++ b/src/gpu/ops/GrSimpleMeshDrawOpHelper.cpp
@@ -88,7 +88,7 @@
if (fProcessors) {
GrProcessorAnalysisCoverage coverage = geometryCoverage;
if (GrProcessorAnalysisCoverage::kNone == coverage) {
- coverage = clip->hasCoverageFragmentProcessor()
+ coverage = (clip && clip->hasCoverageFragmentProcessor())
? GrProcessorAnalysisCoverage::kSingleChannel
: GrProcessorAnalysisCoverage::kNone;
}
diff --git a/src/gpu/tessellate/GrAtlasRenderTask.cpp b/src/gpu/tessellate/GrAtlasRenderTask.cpp
new file mode 100644
index 0000000..79c0b8b
--- /dev/null
+++ b/src/gpu/tessellate/GrAtlasRenderTask.cpp
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2021 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/tessellate/GrAtlasRenderTask.h"
+
+#include "src/core/SkBlendModePriv.h"
+#include "src/core/SkIPoint16.h"
+#include "src/gpu/GrGpu.h"
+#include "src/gpu/GrOpFlushState.h"
+#include "src/gpu/ops/GrFillRectOp.h"
+#include "src/gpu/tessellate/GrPathStencilCoverOp.h"
+
+GrAtlasRenderTask::GrAtlasRenderTask(GrRecordingContext* rContext, GrAuditTrail* auditTrail,
+ sk_sp<GrArenas> arenas,
+ std::unique_ptr<GrDynamicAtlas> dynamicAtlas)
+ : GrOpsTask(rContext->priv().drawingManager(),
+ dynamicAtlas->writeView(*rContext->priv().caps()), auditTrail,
+ std::move(arenas))
+ , fDynamicAtlas(std::move(dynamicAtlas)) {
+}
+
+bool GrAtlasRenderTask::addPath(const SkMatrix& viewMatrix, const SkPath& path, bool antialias,
+ SkIPoint pathDevTopLeft, int widthInAtlas, int heightInAtlas,
+ bool transposedInAtlas, SkIPoint16* locationInAtlas) {
+ SkASSERT(!this->isClosed());
+ SkASSERT(this->isEmpty());
+ SkASSERT(!fDynamicAtlas->isInstantiated()); // Paths can't be added after instantiate().
+
+ if (!fDynamicAtlas->addRect(widthInAtlas, heightInAtlas, locationInAtlas)) {
+ return false;
+ }
+
+ SkMatrix pathToAtlasMatrix = viewMatrix;
+ if (transposedInAtlas) {
+ std::swap(pathToAtlasMatrix[0], pathToAtlasMatrix[3]);
+ std::swap(pathToAtlasMatrix[1], pathToAtlasMatrix[4]);
+ float tx=pathToAtlasMatrix.getTranslateX(), ty=pathToAtlasMatrix.getTranslateY();
+ pathToAtlasMatrix.setTranslateX(ty - pathDevTopLeft.y() + locationInAtlas->x());
+ pathToAtlasMatrix.setTranslateY(tx - pathDevTopLeft.x() + locationInAtlas->y());
+ } else {
+ pathToAtlasMatrix.postTranslate(locationInAtlas->x() - pathDevTopLeft.x(),
+ locationInAtlas->y() - pathDevTopLeft.y());
+ }
+
+ // Concatenate this path onto our uber path that matches its fill and AA types.
+ SkPath* uberPath = this->getUberPath(path.getFillType(), antialias);
+ uberPath->moveTo(locationInAtlas->x(), locationInAtlas->y()); // Implicit moveTo(0,0).
+ uberPath->addPath(path, pathToAtlasMatrix);
+ return true;
+}
+
+GrRenderTask::ExpectedOutcome GrAtlasRenderTask::onMakeClosed(GrRecordingContext* rContext,
+ SkIRect* targetUpdateBounds) {
+ // We don't add our ops until now, at which point we know the atlas is done being built.
+ SkASSERT(this->isEmpty());
+ SkASSERT(!fDynamicAtlas->isInstantiated()); // Instantiation happens after makeClosed().
+
+ const GrCaps& caps = *rContext->priv().caps();
+
+ // Set our dimensions now. GrOpsTask will need them when we add our ops.
+ this->target(0)->priv().setLazyDimensions(fDynamicAtlas->drawBounds());
+ this->target(0)->asRenderTargetProxy()->setNeedsStencil();
+ SkRect drawRect = target(0)->getBoundsRect();
+
+ // Clear the atlas.
+ if (caps.performColorClearsAsDraws() || caps.performStencilClearsAsDraws()) {
+ this->setColorLoadOp(GrLoadOp::kDiscard);
+ this->setInitialStencilContent(GrOpsTask::StencilContent::kDontCare);
+
+ constexpr static GrUserStencilSettings kClearStencil(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kAlways,
+ 0xffff,
+ GrUserStencilOp::kReplace,
+ GrUserStencilOp::kReplace,
+ 0xffff>());
+
+ this->stencilAtlasRect(rContext, drawRect, SK_PMColor4fTRANSPARENT, &kClearStencil);
+ } else {
+ this->setColorLoadOp(GrLoadOp::kClear);
+ this->setInitialStencilContent(GrOpsTask::StencilContent::kUserBitsCleared);
+ }
+
+ // Add ops to stencil the atlas paths.
+ for (auto antialias : {false, true}) {
+ for (auto fillType : {SkPathFillType::kWinding, SkPathFillType::kEvenOdd}) {
+ SkPath* uberPath = this->getUberPath(fillType, antialias);
+ if (uberPath->isEmpty()) {
+ continue;
+ }
+ uberPath->setFillType(fillType);
+ GrAAType aaType = (antialias) ? GrAAType::kMSAA : GrAAType::kNone;
+ auto op = GrOp::Make<GrPathStencilCoverOp>(
+ rContext, SkMatrix::I(), *uberPath, GrPaint(), aaType,
+ GrTessellationPathRenderer::PathFlags::kStencilOnly, drawRect);
+ this->addAtlasDrawOp(std::move(op), antialias, caps);
+ }
+ }
+
+ // Finally, draw a fullscreen rect to cover our stencilled paths.
+ const GrUserStencilSettings* stencil;
+ if (caps.discardStencilValuesAfterRenderPass()) {
+ constexpr static GrUserStencilSettings kTestStencil(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kNotEqual,
+ 0xffff,
+ GrUserStencilOp::kKeep,
+ GrUserStencilOp::kKeep,
+ 0xffff>());
+
+ // This is the final op in the task. Since Ganesh is planning to discard the stencil values
+ // anyway, there is no need to reset the stencil values back to 0.
+ stencil = &kTestStencil;
+ } else {
+ constexpr static GrUserStencilSettings kTestAndResetStencil(
+ GrUserStencilSettings::StaticInit<
+ 0x0000,
+ GrUserStencilTest::kNotEqual,
+ 0xffff,
+ GrUserStencilOp::kZero,
+ GrUserStencilOp::kKeep,
+ 0xffff>());
+
+ // Outset the cover rect to make extra sure we clear every stencil value touched by the
+ // atlas.
+ drawRect.outset(1, 1);
+ stencil = &kTestAndResetStencil;
+ }
+ this->stencilAtlasRect(rContext, drawRect, SK_PMColor4fWHITE, stencil);
+
+ this->GrOpsTask::onMakeClosed(rContext, targetUpdateBounds);
+
+ // Don't mark msaa dirty. Since this op defers being closed, the drawing manager's dirty
+ // tracking doesn't work anyway. We will just resolve msaa manually during onExecute.
+ return ExpectedOutcome::kTargetUnchanged;
+}
+
+void GrAtlasRenderTask::stencilAtlasRect(GrRecordingContext* rContext, const SkRect& rect,
+ const SkPMColor4f& color,
+ const GrUserStencilSettings* stencil) {
+ GrPaint paint;
+ paint.setColor4f(color);
+ paint.setXPFactory(SkBlendMode_AsXPFactory(SkBlendMode::kSrc));
+ GrQuad quad(rect);
+ DrawQuad drawQuad{quad, quad, GrQuadAAFlags::kAll};
+ auto op = GrFillRectOp::Make(rContext, std::move(paint), GrAAType::kMSAA, &drawQuad, stencil);
+ this->addAtlasDrawOp(std::move(op), true/*usesMSAA*/, *rContext->priv().caps());
+}
+
+void GrAtlasRenderTask::addAtlasDrawOp(GrOp::Owner op, bool usesMSAA, const GrCaps& caps) {
+ SkASSERT(!this->isClosed());
+
+ auto drawOp = static_cast<GrDrawOp*>(op.get());
+ SkDEBUGCODE(drawOp->fAddDrawOpCalled = true;)
+
+ auto processorAnalysis = drawOp->finalize(caps, nullptr,
+ GrColorTypeClampType(fDynamicAtlas->colorType()));
+ SkASSERT(!processorAnalysis.requiresDstTexture());
+ SkASSERT(!processorAnalysis.usesNonCoherentHWBlending());
+
+ drawOp->setClippedBounds(drawOp->bounds());
+ this->recordOp(std::move(op), usesMSAA, processorAnalysis, nullptr, nullptr, caps);
+}
+
+bool GrAtlasRenderTask::onExecute(GrOpFlushState* flushState) {
+ if (!this->GrOpsTask::onExecute(flushState)) {
+ return false;
+ }
+ if (this->target(0)->requiresManualMSAAResolve()) {
+ // Since atlases don't get closed until they are done being built, the drawingManager
+ // doesn't detect that they need an MSAA resolve. Do it here manually.
+ auto nativeRect = GrNativeRect::MakeIRectRelativeTo(
+ GrDynamicAtlas::kTextureOrigin,
+ this->target(0)->backingStoreDimensions().height(),
+ SkIRect::MakeSize(fDynamicAtlas->drawBounds()));
+ flushState->gpu()->resolveRenderTarget(this->target(0)->peekRenderTarget(), nativeRect);
+ }
+ return true;
+}
diff --git a/src/gpu/tessellate/GrAtlasRenderTask.h b/src/gpu/tessellate/GrAtlasRenderTask.h
new file mode 100644
index 0000000..8215ec4
--- /dev/null
+++ b/src/gpu/tessellate/GrAtlasRenderTask.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2021 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrGrAtlasRenderTask_DEFINED
+#define GrGrAtlasRenderTask_DEFINED
+
+#include "include/core/SkPath.h"
+#include "src/gpu/GrDynamicAtlas.h"
+#include "src/gpu/GrOpsTask.h"
+
+struct SkIPoint16;
+
+// Represents a GrRenderTask that draws paths into an atlas. This task gets added the DAG and left
+// open, lays out its atlas while future tasks call addPath(), and finally adds its internal draw
+// ops during onMakeClosed().
+//
+// The atlas texture does not get instantiated automatically. It is the creator's responsibility to
+// call instantiate() at flush time.
+class GrAtlasRenderTask : public GrOpsTask {
+public:
+ GrAtlasRenderTask(GrRecordingContext*, GrAuditTrail*, sk_sp<GrArenas>,
+ std::unique_ptr<GrDynamicAtlas>);
+
+ const GrTextureProxy* atlasProxy() const { return fDynamicAtlas->textureProxy(); }
+ GrSurfaceProxyView readView(const GrCaps& caps) const { return fDynamicAtlas->readView(caps); }
+
+ // Allocates a rectangle for, and stages the given path to be rendered into the atlas. Returns
+ // false if there was not room in the atlas. On success, writes out the location of the path's
+ // upper-left corner to 'locationInAtlas'.
+ bool addPath(const SkMatrix&, const SkPath&, bool antialias, SkIPoint pathDevTopLeft,
+ int widthInAtlas, int heightInAtlas, bool transposedInAtlas,
+ SkIPoint16* locationInAtlas);
+
+ // Must be called at flush time. The texture proxy is instantiated with 'backingTexture', if
+ // provided. See GrDynamicAtlas.
+ void instantiate(GrOnFlushResourceProvider* onFlushRP,
+ sk_sp<GrTexture> backingTexture = nullptr) {
+ SkASSERT(this->isClosed());
+ fDynamicAtlas->instantiate(onFlushRP, std::move(backingTexture));
+ }
+
+private:
+ // Adds internal ops to render the atlas before deferring to GrOpsTask::onMakeClosed.
+ ExpectedOutcome onMakeClosed(GrRecordingContext*, SkIRect* targetUpdateBounds) override;
+
+ void stencilAtlasRect(GrRecordingContext*, const SkRect&, const SkPMColor4f&,
+ const GrUserStencilSettings*);
+ void addAtlasDrawOp(GrOp::Owner, bool usesMSAA, const GrCaps&);
+
+ // Executes the GrOpsTask and resolves msaa if needed.
+ bool onExecute(GrOpFlushState* flushState) override;
+
+ SkPath* getUberPath(SkPathFillType fillType, bool antialias) {
+ int idx = (int)antialias << 1;
+ idx |= (int)fillType & 1;
+ return &fUberPaths[idx];
+ }
+
+ const std::unique_ptr<GrDynamicAtlas> fDynamicAtlas;
+ SkPath fUberPaths[4]; // 2 fillTypes * 2 antialias modes.
+};
+
+#endif
diff --git a/src/gpu/tessellate/GrTessellationPathRenderer.cpp b/src/gpu/tessellate/GrTessellationPathRenderer.cpp
index 85f2aa0..846a59f 100644
--- a/src/gpu/tessellate/GrTessellationPathRenderer.cpp
+++ b/src/gpu/tessellate/GrTessellationPathRenderer.cpp
@@ -15,10 +15,8 @@
#include "src/gpu/GrRecordingContextPriv.h"
#include "src/gpu/GrSurfaceDrawContext.h"
#include "src/gpu/GrVx.h"
-#include "src/gpu/effects/GrBlendFragmentProcessor.h"
#include "src/gpu/geometry/GrStyledShape.h"
-#include "src/gpu/geometry/GrWangsFormula.h"
-#include "src/gpu/ops/GrFillRectOp.h"
+#include "src/gpu/tessellate/GrAtlasRenderTask.h"
#include "src/gpu/tessellate/GrDrawAtlasPathOp.h"
#include "src/gpu/tessellate/GrPathInnerTriangulateOp.h"
#include "src/gpu/tessellate/GrPathStencilCoverOp.h"
@@ -26,10 +24,8 @@
#include "src/gpu/tessellate/GrStrokeTessellateOp.h"
#include "src/gpu/tessellate/shaders/GrModulateAtlasCoverageFP.h"
-constexpr static SkISize kAtlasInitialSize{512, 512};
-constexpr static int kMaxAtlasSize = 2048;
-
constexpr static auto kAtlasAlpha8Type = GrColorType::kAlpha_8;
+constexpr static int kAtlasInitialSize = 512;
// The atlas is only used for small-area paths, which means at least one dimension of every path is
// guaranteed to be quite small. So if we transpose tall paths, then every path will have a small
@@ -37,7 +33,7 @@
constexpr static auto kAtlasAlgorithm = GrDynamicAtlas::RectanizerAlgorithm::kPow2;
// Ensure every path in the atlas falls in or below the 128px high rectanizer band.
-constexpr static int kMaxAtlasPathHeight = 128;
+constexpr static int kAtlasMaxPathHeight = 128;
bool GrTessellationPathRenderer::IsSupported(const GrCaps& caps) {
return !caps.avoidStencilBuffers() &&
@@ -46,15 +42,18 @@
!caps.disableTessellationPathRenderer();
}
-GrTessellationPathRenderer::GrTessellationPathRenderer(GrRecordingContext* rContext)
- : fAtlas(kAtlasAlpha8Type, GrDynamicAtlas::InternalMultisample::kYes, kAtlasInitialSize,
- std::min(kMaxAtlasSize, rContext->priv().caps()->maxPreferredRenderTargetSize()),
- *rContext->priv().caps(), kAtlasAlgorithm) {
+GrTessellationPathRenderer::GrTessellationPathRenderer(GrRecordingContext* rContext) {
const GrCaps& caps = *rContext->priv().caps();
auto atlasFormat = caps.getDefaultBackendFormat(kAtlasAlpha8Type, GrRenderable::kYes);
if (rContext->asDirectContext() && // The atlas doesn't support DDL yet.
caps.internalMultisampleCount(atlasFormat) > 1) {
- fMaxAtlasPathWidth = fAtlas.maxAtlasSize() / 2; // Enable the atlas.
+#if GR_TEST_UTILS
+ fAtlasMaxSize = rContext->priv().options().fMaxTextureAtlasSize;
+#else
+ fAtlasMaxSize = 2048;
+#endif
+ fAtlasMaxSize = SkPrevPow2(std::min(fAtlasMaxSize, caps.maxPreferredRenderTargetSize()));
+ fAtlasInitialSize = SkNextPow2(std::min(kAtlasInitialSize, fAtlasMaxSize));
}
}
@@ -135,26 +134,33 @@
SkRect pathDevBounds;
args.fViewMatrix->mapRect(&pathDevBounds, args.fShape->bounds());
- // See if the path is small and simple enough to atlas instead of drawing directly.
- //
- // NOTE: The atlas uses alpha8 coverage even for msaa render targets. We could theoretically
- // render the sample mask to an integer texture, but such a scheme would probably require
- // GL_EXT_post_depth_coverage, which appears to have low adoption.
- SkIRect devIBounds;
- SkIPoint16 locationInAtlas;
- bool transposedInAtlas;
- if (args.fUserStencilSettings->isUnused() &&
- this->tryAddPathToAtlas(*args.fContext->priv().caps(), *args.fViewMatrix, path,
- pathDevBounds, args.fAAType != GrAAType::kNone, &devIBounds,
- &locationInAtlas, &transposedInAtlas)) {
- // The atlas is not compatible with DDL. We should only be using it on direct contexts.
- SkASSERT(args.fContext->asDirectContext());
- auto op = GrOp::Make<GrDrawAtlasPathOp>(args.fContext, surfaceDrawContext->numSamples(),
- sk_ref_sp(fAtlas.textureProxy()), devIBounds,
- locationInAtlas, transposedInAtlas,
- *args.fViewMatrix, std::move(args.fPaint));
- surfaceDrawContext->addDrawOp(args.fClip, std::move(op));
- return true;
+ if (args.fUserStencilSettings->isUnused()) {
+ // See if the path is small and simple enough to atlas instead of drawing directly.
+ //
+ // NOTE: The atlas uses alpha8 coverage even for msaa render targets. We could theoretically
+ // render the sample mask to an integer texture, but such a scheme would probably require
+ // GL_EXT_post_depth_coverage, which appears to have low adoption.
+ SkIRect devIBounds;
+ SkIPoint16 locationInAtlas;
+ bool transposedInAtlas;
+ auto visitProxiesUsedByDraw = [&args](GrVisitProxyFunc visitor) {
+ if (args.fPaint.hasColorFragmentProcessor()) {
+ args.fPaint.getColorFragmentProcessor()->visitProxies(visitor);
+ }
+ if (args.fPaint.hasCoverageFragmentProcessor()) {
+ args.fPaint.getCoverageFragmentProcessor()->visitProxies(visitor);
+ }
+ };
+ if (this->tryAddPathToAtlas(args.fContext, *args.fViewMatrix, path, pathDevBounds,
+ args.fAAType != GrAAType::kNone, &devIBounds, &locationInAtlas,
+ &transposedInAtlas, visitProxiesUsedByDraw)) {
+ auto op = GrOp::Make<GrDrawAtlasPathOp>(
+ args.fContext, surfaceDrawContext->numSamples(),
+ sk_ref_sp(fAtlasRenderTasks.back()->atlasProxy()), devIBounds, locationInAtlas,
+ transposedInAtlas, *args.fViewMatrix, std::move(args.fPaint));
+ surfaceDrawContext->addDrawOp(args.fClip, std::move(op));
+ return true;
+ }
}
// Handle convex paths only if we couldn't fit them in the atlas. We give the atlas priority in
@@ -210,21 +216,28 @@
surfaceDrawContext->addDrawOp(args.fClip, std::move(op));
}
-GrFPResult GrTessellationPathRenderer::makeAtlasClipFP(const SkIRect& drawBounds,
- const SkMatrix& viewMatrix,
- const SkPath& path, GrAA aa,
+GrFPResult GrTessellationPathRenderer::makeAtlasClipFP(GrRecordingContext* rContext,
+ const GrOp* opBeingClipped,
std::unique_ptr<GrFragmentProcessor> inputFP,
- const GrCaps& caps) {
+ const SkIRect& drawBounds,
+ const SkMatrix& viewMatrix,
+ const SkPath& path, GrAA aa) {
if (viewMatrix.hasPerspective()) {
return GrFPFailure(std::move(inputFP));
}
SkIRect devIBounds;
SkIPoint16 locationInAtlas;
bool transposedInAtlas;
+ auto visitProxiesUsedByDraw = [&opBeingClipped, &inputFP](GrVisitProxyFunc visitor) {
+ opBeingClipped->visitProxies(visitor);
+ if (inputFP) {
+ inputFP->visitProxies(visitor);
+ }
+ };
// tryAddPathToAtlas() ignores inverseness of the fill. See getAtlasUberPath().
- if (!this->tryAddPathToAtlas(caps, viewMatrix, path, viewMatrix.mapRect(path.getBounds()),
+ if (!this->tryAddPathToAtlas(rContext, viewMatrix, path, viewMatrix.mapRect(path.getBounds()),
aa != GrAA::kNo, &devIBounds, &locationInAtlas,
- &transposedInAtlas)) {
+ &transposedInAtlas, visitProxiesUsedByDraw)) {
// The path is too big, or the atlas ran out of room.
return GrFPFailure(std::move(inputFP));
}
@@ -248,8 +261,9 @@
// ever changes.
SkASSERT(path.isInverseFillType());
}
+ GrSurfaceProxyView atlasView = fAtlasRenderTasks.back()->readView(*rContext->priv().caps());
return GrFPSuccess(std::make_unique<GrModulateAtlasCoverageFP>(flags, std::move(inputFP),
- fAtlas.surfaceProxyView(caps),
+ std::move(atlasView),
atlasMatrix, devIBounds));
}
@@ -270,17 +284,22 @@
fPathGenID = path.getGenerationID();
}
-bool GrTessellationPathRenderer::tryAddPathToAtlas(const GrCaps& caps, const SkMatrix& viewMatrix,
- const SkPath& path, const SkRect& pathDevBounds,
- bool antialias, SkIRect* devIBounds,
- SkIPoint16* locationInAtlas,
- bool* transposedInAtlas) {
+bool GrTessellationPathRenderer::tryAddPathToAtlas(GrRecordingContext* rContext,
+ const SkMatrix& viewMatrix, const SkPath& path,
+ const SkRect& pathDevBounds, bool antialias,
+ SkIRect* devIBounds, SkIPoint16* locationInAtlas,
+ bool* transposedInAtlas,
+ const VisitProxiesFn& visitProxiesUsedByDraw) {
SkASSERT(!viewMatrix.hasPerspective()); // See onCanDrawPath().
- if (!fMaxAtlasPathWidth) {
+ if (!fAtlasMaxSize) {
return false;
}
+ // The atlas is not compatible with DDL. We should only be using it on direct contexts.
+ SkASSERT(rContext->asDirectContext());
+
+ const GrCaps& caps = *rContext->priv().caps();
if (!caps.multisampleDisableSupport() && !antialias) {
return false;
}
@@ -289,18 +308,18 @@
// guarantees that every atlas entry has a small height, which lends very well to efficient pow2
// atlas packing.
pathDevBounds.roundOut(devIBounds);
- int maxDimenstion = devIBounds->width();
+ int maxDimension = devIBounds->width();
int minDimension = devIBounds->height();
- *transposedInAtlas = minDimension > maxDimenstion;
+ *transposedInAtlas = minDimension > maxDimension;
if (*transposedInAtlas) {
- std::swap(minDimension, maxDimenstion);
+ std::swap(minDimension, maxDimension);
}
// Check if the path is too large for an atlas. Since we transpose paths in the atlas so height
- // is always "minDimension", limiting to kMaxAtlasPathHeight^2 pixels guarantees height <=
- // kMaxAtlasPathHeight, while also allowing paths that are very wide and short.
- if ((uint64_t)maxDimenstion * minDimension > kMaxAtlasPathHeight * kMaxAtlasPathHeight ||
- maxDimenstion > fMaxAtlasPathWidth) {
+ // is always "minDimension", limiting to kAtlasMaxPathHeight^2 pixels guarantees height <=
+ // kAtlasMaxPathHeight, while also allowing paths that are very wide and short.
+ if ((uint64_t)maxDimension * minDimension > kAtlasMaxPathHeight * kAtlasMaxPathHeight ||
+ maxDimension > fAtlasMaxSize) {
return false;
}
@@ -314,105 +333,103 @@
}
}
- if (!fAtlas.addRect(maxDimenstion, minDimension, locationInAtlas)) {
- return false;
+ if (fAtlasRenderTasks.empty() ||
+ !fAtlasRenderTasks.back()->addPath(viewMatrix, path, antialias, devIBounds->topLeft(),
+ maxDimension, minDimension, *transposedInAtlas,
+ locationInAtlas)) {
+ // We either don't have an atlas yet or the current one is full. Try to replace it.
+ GrAtlasRenderTask* currentAtlasTask = (!fAtlasRenderTasks.empty())
+ ? fAtlasRenderTasks.back().get() : nullptr;
+ if (currentAtlasTask) {
+ // Don't allow the current atlas to be replaced if the draw already uses it. Otherwise
+ // the draw would use two different atlases, which breaks our guarantee that there will
+ // only ever be one atlas active at a time.
+ const GrSurfaceProxy* currentAtlasProxy = currentAtlasTask->atlasProxy();
+ bool drawUsesCurrentAtlas = false;
+ visitProxiesUsedByDraw([currentAtlasProxy, &drawUsesCurrentAtlas](GrSurfaceProxy* proxy,
+ GrMipmapped) {
+ if (proxy == currentAtlasProxy) {
+ drawUsesCurrentAtlas = true;
+ }
+ });
+ if (drawUsesCurrentAtlas) {
+ // The draw already uses the current atlas. Give up.
+ return false;
+ }
+ }
+ // Replace the atlas with a new one.
+ auto dynamicAtlas = std::make_unique<GrDynamicAtlas>(
+ kAtlasAlpha8Type, GrDynamicAtlas::InternalMultisample::kYes,
+ SkISize{fAtlasInitialSize, fAtlasInitialSize}, fAtlasMaxSize,
+ *rContext->priv().caps(), kAtlasAlgorithm);
+ auto newAtlasTask = sk_make_sp<GrAtlasRenderTask>(rContext, rContext->priv().auditTrail(),
+ sk_make_sp<GrArenas>(),
+ std::move(dynamicAtlas));
+ rContext->priv().drawingManager()->addAtlasTask(newAtlasTask, currentAtlasTask);
+ SkAssertResult(newAtlasTask->addPath(viewMatrix, path, antialias, devIBounds->topLeft(),
+ maxDimension, minDimension, *transposedInAtlas,
+ locationInAtlas));
+ fAtlasRenderTasks.push_back(std::move(newAtlasTask));
+ fAtlasPathCache.reset();
}
// Remember this path's location in the atlas, in case it gets drawn again.
if (!path.isVolatile()) {
fAtlasPathCache.set(atlasPathKey, *locationInAtlas);
}
-
- SkMatrix atlasMatrix = viewMatrix;
- if (*transposedInAtlas) {
- std::swap(atlasMatrix[0], atlasMatrix[3]);
- std::swap(atlasMatrix[1], atlasMatrix[4]);
- float tx=atlasMatrix.getTranslateX(), ty=atlasMatrix.getTranslateY();
- atlasMatrix.setTranslateX(ty - devIBounds->y() + locationInAtlas->x());
- atlasMatrix.setTranslateY(tx - devIBounds->x() + locationInAtlas->y());
- } else {
- atlasMatrix.postTranslate(locationInAtlas->x() - devIBounds->x(),
- locationInAtlas->y() - devIBounds->y());
- }
-
- // Concatenate this path onto our uber path that matches its fill and AA types.
- SkPath* uberPath = this->getAtlasUberPath(path.getFillType(), antialias);
- uberPath->moveTo(locationInAtlas->x(), locationInAtlas->y()); // Implicit moveTo(0,0).
- uberPath->addPath(path, atlasMatrix);
return true;
}
+#ifdef SK_DEBUG
+// Ensures the atlas dependencies are set up such that each atlas will be totally out of service
+// before we render the next one in line. This means there will only ever be one atlas active at a
+// time and that they can all share the same texture.
+void validate_atlas_dependencies(const SkTArray<sk_sp<GrAtlasRenderTask>>& atlasTasks) {
+ for (int i = atlasTasks.count() - 1; i >= 1; --i) {
+ GrAtlasRenderTask* atlasTask = atlasTasks[i].get();
+ GrAtlasRenderTask* previousAtlasTask = atlasTasks[i - 1].get();
+ // Double check that atlasTask depends on every dependent of its previous atlas. If this
+ // fires it might mean previousAtlasTask gained a new dependent after atlasTask came into
+ // service (maybe by an op that hadn't yet been added to an opsTask when we registered the
+ // new atlas with the drawingManager).
+ for (GrRenderTask* previousAtlasUser : previousAtlasTask->dependents()) {
+ SkASSERT(atlasTask->dependsOn(previousAtlasUser));
+ }
+ }
+}
+#endif
+
void GrTessellationPathRenderer::preFlush(GrOnFlushResourceProvider* onFlushRP,
SkSpan<const uint32_t> /* taskIDs */) {
- if (!fAtlas.drawBounds().isEmpty()) {
- this->renderAtlas(onFlushRP);
- fAtlas.reset(kAtlasInitialSize, *onFlushRP->caps());
- }
- for (SkPath& path : fAtlasUberPaths) {
- path.reset();
- }
- fAtlasPathCache.reset();
-}
-
-constexpr static GrUserStencilSettings kTestStencil(
- GrUserStencilSettings::StaticInit<
- 0x0000,
- GrUserStencilTest::kNotEqual,
- 0xffff,
- GrUserStencilOp::kKeep,
- GrUserStencilOp::kKeep,
- 0xffff>());
-
-constexpr static GrUserStencilSettings kTestAndResetStencil(
- GrUserStencilSettings::StaticInit<
- 0x0000,
- GrUserStencilTest::kNotEqual,
- 0xffff,
- GrUserStencilOp::kZero,
- GrUserStencilOp::kKeep,
- 0xffff>());
-
-void GrTessellationPathRenderer::renderAtlas(GrOnFlushResourceProvider* onFlushRP) {
- auto rtc = fAtlas.instantiate(onFlushRP);
- if (!rtc) {
+ if (fAtlasRenderTasks.empty()) {
+ SkASSERT(fAtlasPathCache.count() == 0);
return;
}
- SkRect atlasRect = SkRect::MakeIWH(fAtlas.drawBounds().width(), fAtlas.drawBounds().height());
+ // Verify the atlases can all share the same texture.
+ SkDEBUGCODE(validate_atlas_dependencies(fAtlasRenderTasks);)
- // Add ops to stencil the atlas paths.
- for (auto antialias : {false, true}) {
- for (auto fillType : {SkPathFillType::kWinding, SkPathFillType::kEvenOdd}) {
- SkPath* uberPath = this->getAtlasUberPath(fillType, antialias);
- if (uberPath->isEmpty()) {
- continue;
- }
- uberPath->setFillType(fillType);
- GrAAType aaType = (antialias) ? GrAAType::kMSAA : GrAAType::kNone;
- auto op = GrOp::Make<GrPathStencilCoverOp>(onFlushRP->recordingContext(), SkMatrix::I(),
- *uberPath, GrPaint(), aaType,
- PathFlags::kStencilOnly, atlasRect);
- rtc->addDrawOp(nullptr, std::move(op));
+ // Instantiate the first atlas.
+ fAtlasRenderTasks[0]->instantiate(onFlushRP);
+
+ // Instantiate the remaining atlases.
+ GrTexture* firstAtlasTexture = fAtlasRenderTasks[0]->atlasProxy()->peekTexture();
+ SkASSERT(firstAtlasTexture);
+ for (int i = 1; i < fAtlasRenderTasks.count(); ++i) {
+ GrAtlasRenderTask* atlasTask = fAtlasRenderTasks[i].get();
+ if (atlasTask->atlasProxy()->backingStoreDimensions() == firstAtlasTexture->dimensions()) {
+ atlasTask->instantiate(onFlushRP, sk_ref_sp(firstAtlasTexture));
+ } else {
+ // The atlases are expected to all be full size except possibly the final one.
+ SkASSERT(i == fAtlasRenderTasks.count() - 1);
+ SkASSERT(atlasTask->atlasProxy()->backingStoreDimensions().area() <
+ firstAtlasTexture->dimensions().area());
+ // TODO: Recycle the larger atlas texture anyway?
+ atlasTask->instantiate(onFlushRP);
}
}
- // Finally, draw a fullscreen rect to convert our stencilled paths into alpha coverage masks.
- GrPaint paint;
- paint.setColor4f(SK_PMColor4fWHITE);
- const GrUserStencilSettings* stencil;
- if (onFlushRP->caps()->discardStencilValuesAfterRenderPass()) {
- // This is the final op in the surfaceDrawContext. Since Ganesh is planning to discard the
- // stencil values anyway, there is no need to reset the stencil values back to 0.
- stencil = &kTestStencil;
- } else {
- // Outset the cover rect in case there are T-junctions in the path bounds.
- atlasRect.outset(1, 1);
- stencil = &kTestAndResetStencil;
- }
- rtc->stencilRect(nullptr, stencil, std::move(paint), GrAA::kYes, SkMatrix::I(), atlasRect);
-
- if (rtc->asSurfaceProxy()->requiresManualMSAAResolve()) {
- onFlushRP->addTextureResolveTask(sk_ref_sp(rtc->asTextureProxy()),
- GrSurfaceProxy::ResolveFlags::kMSAA);
- }
+ // Reset all atlas data.
+ fAtlasRenderTasks.reset();
+ fAtlasPathCache.reset();
}
diff --git a/src/gpu/tessellate/GrTessellationPathRenderer.h b/src/gpu/tessellate/GrTessellationPathRenderer.h
index 4629085..6c8b5aa 100644
--- a/src/gpu/tessellate/GrTessellationPathRenderer.h
+++ b/src/gpu/tessellate/GrTessellationPathRenderer.h
@@ -16,7 +16,8 @@
#include "src/gpu/GrDynamicAtlas.h"
#include "src/gpu/GrOnFlushResourceProvider.h"
#include "src/gpu/GrPathRenderer.h"
-#include <map>
+
+class GrAtlasRenderTask;
// This is the tie-in point for path rendering via GrPathTessellateOp. This path renderer draws
// paths using a hybrid Red Book "stencil, then cover" method. Curves get linearized by GPU
@@ -45,36 +46,39 @@
// Returns a fragment processor that modulates inputFP by the given deviceSpacePath's coverage,
// implemented using an internal atlas.
//
- // Returns 'inputFP' wrapped in GrFPFailure() if the path was too big, or if the atlas was out
- // of room. (Currently, "too big" means more than 128*128 total pixels, or larger than half the
- // atlas size in either dimension.)
+ // Returns 'inputFP' wrapped in GrFPFailure() if the path was too large, or if the current atlas
+ // is full and already used by either opBeingClipped or inputFP. (Currently, "too large" means
+ // more than 128*128 total pixels, or larger than the atlas size in either dimension.)
//
// Also returns GrFPFailure() if the view matrix has perspective.
- GrFPResult makeAtlasClipFP(const SkIRect& drawBounds, const SkMatrix&, const SkPath&, GrAA,
- std::unique_ptr<GrFragmentProcessor> inputFP, const GrCaps&);
+ GrFPResult makeAtlasClipFP(GrRecordingContext*, const GrOp* opBeingClipped,
+ std::unique_ptr<GrFragmentProcessor> inputFP,
+ const SkIRect& drawBounds, const SkMatrix&, const SkPath&, GrAA);
void preFlush(GrOnFlushResourceProvider*, SkSpan<const uint32_t> taskIDs) override;
private:
- SkPath* getAtlasUberPath(SkPathFillType fillType, bool antialias) {
- int idx = (int)antialias << 1;
- idx |= (int)fillType & 1;
- return &fAtlasUberPaths[idx];
- }
- // Adds the filled path to fAtlas if the path is small enough, and if the atlas isn't full.
- // Currently, "small enough" means 128*128 total pixels or less, and no larger than half the
- // atlas size in either dimension.
- bool tryAddPathToAtlas(const GrCaps&, const SkMatrix&, const SkPath&,
+ using VisitProxiesFn = std::function<void(const GrVisitProxyFunc&)>;
+
+ // Adds the filled path to an atlas.
+ //
+ // Fails and returns false if the path is too large, or if the current atlas is full and already
+ // in use according to 'visitProxiesUsedByDraw'. (Currently, "too large" means more than 128*128
+ // total pixels, or larger than the atlas size in either dimension.)
+ bool tryAddPathToAtlas(GrRecordingContext*, const SkMatrix&, const SkPath&,
const SkRect& pathDevBounds, bool antialias, SkIRect* devIBounds,
- SkIPoint16* locationInAtlas, bool* transposedInAtlas);
- void renderAtlas(GrOnFlushResourceProvider*);
+ SkIPoint16* locationInAtlas, bool* transposedInAtlas,
+ const VisitProxiesFn& visitProxiesUsedByDraw);
- GrDynamicAtlas fAtlas;
- int fMaxAtlasPathWidth = 0;
- SkPath fAtlasUberPaths[4]; // 2 fillTypes * 2 antialias modes.
+ int fAtlasMaxSize = 0;
+ int fAtlasInitialSize = 0;
- // This simple cache remembers the locations of cacheable path masks in the atlas. Its main
- // motivation is for clip paths.
+ // A collection of all atlases we've created and used since the last flush. We instantiate these
+ // at flush time during preFlush().
+ SkSTArray<4, sk_sp<GrAtlasRenderTask>> fAtlasRenderTasks;
+
+ // This simple cache remembers the locations of cacheable path masks in the most recent atlas.
+ // Its main motivation is for clip paths.
struct AtlasPathKey {
void set(const SkMatrix&, bool antialias, const SkPath&);
bool operator==(const AtlasPathKey& k) const {
diff --git a/tools/flags/CommonFlagsGpu.cpp b/tools/flags/CommonFlagsGpu.cpp
index 9dceaee..8caf342 100644
--- a/tools/flags/CommonFlagsGpu.cpp
+++ b/tools/flags/CommonFlagsGpu.cpp
@@ -33,7 +33,11 @@
"[~]none [~]dashline [~]aahairline [~]aaconvex [~]aalinearizing [~]small [~]tri "
"[~]tess [~]all");
-static DEFINE_int(internalSamples, 4, "Number of samples for internal draws that use MSAA.");
+static DEFINE_int(internalSamples, -1,
+ "Number of samples for internal draws that use MSAA, or default value if negative.");
+
+static DEFINE_int(maxAtlasSize, -1,
+ "Maximum width and height of internal texture atlases, or default value if negative.");
static DEFINE_bool(disableDriverCorrectnessWorkarounds, false,
"Disables all GPU driver correctness workarounds");
@@ -103,10 +107,16 @@
ctxOptions->fMaxTessellationSegmentsOverride = FLAGS_maxTessellationSegments;
ctxOptions->fAlwaysPreferHardwareTessellation = FLAGS_alwaysHwTess;
ctxOptions->fGpuPathRenderers = collect_gpu_path_renderers_from_flags();
- ctxOptions->fInternalMultisampleCount = FLAGS_internalSamples;
ctxOptions->fDisableDriverCorrectnessWorkarounds = FLAGS_disableDriverCorrectnessWorkarounds;
ctxOptions->fResourceCacheLimitOverride = FLAGS_gpuResourceCacheLimit;
+ if (FLAGS_internalSamples >= 0) {
+ ctxOptions->fInternalMultisampleCount = FLAGS_internalSamples;
+ }
+ if (FLAGS_maxAtlasSize >= 0) {
+ ctxOptions->fMaxTextureAtlasSize = FLAGS_maxAtlasSize;
+ }
+
if (FLAGS_dontReduceOpsTaskSplitting) {
ctxOptions->fReduceOpsTaskSplitting = GrContextOptions::Enable::kNo;
} else {