Delete path caching and path rendering from ccpr
All that's left is a clip atlas renderer.
Bug: chromium:1158093
Change-Id: I8b509904a752a202ff1321e5302c41a3f57a5edb
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/383741
Reviewed-by: Brian Osman <brianosman@google.com>
Commit-Queue: Chris Dalton <csmartdalton@google.com>
diff --git a/src/gpu/ccpr/GrCCAtlas.cpp b/src/gpu/ccpr/GrCCAtlas.cpp
index ebc31d5..a08f86c 100644
--- a/src/gpu/ccpr/GrCCAtlas.cpp
+++ b/src/gpu/ccpr/GrCCAtlas.cpp
@@ -10,7 +10,6 @@
#include "include/private/SkTPin.h"
#include "src/core/SkIPoint16.h"
#include "src/gpu/GrOnFlushResourceProvider.h"
-#include "src/gpu/ccpr/GrCCPathCache.h"
static SkISize choose_initial_atlas_size(const GrCCAtlas::Specs& specs) {
// Begin with the first pow2 dimensions whose area is theoretically large enough to contain the
@@ -30,11 +29,10 @@
specs.fMaxPreferredTextureSize : caps.maxRenderTargetSize();
}
-GrCCAtlas::GrCCAtlas(CoverageType coverageType, const Specs& specs, const GrCaps& caps)
- : GrDynamicAtlas(CoverageTypeToColorType(coverageType),
- CoverageTypeHasInternalMultisample(coverageType),
- choose_initial_atlas_size(specs), choose_max_atlas_size(specs, caps), caps)
- , fCoverageType(coverageType) {
+GrCCAtlas::GrCCAtlas(const Specs& specs, const GrCaps& caps)
+ : GrDynamicAtlas(GrColorType::kAlpha_8, InternalMultisample::kYes,
+ choose_initial_atlas_size(specs), choose_max_atlas_size(specs, caps),
+ caps) {
SkASSERT(specs.fMaxPreferredTextureSize > 0);
}
@@ -53,31 +51,6 @@
fEndStencilResolveInstance = idx;
}
-static uint32_t next_atlas_unique_id() {
- static std::atomic<uint32_t> nextID;
- return nextID.fetch_add(1, std::memory_order_relaxed);
-}
-
-sk_sp<GrCCCachedAtlas> GrCCAtlas::refOrMakeCachedAtlas(GrOnFlushResourceProvider* onFlushRP) {
- if (!fCachedAtlas) {
- static const GrUniqueKey::Domain kAtlasDomain = GrUniqueKey::GenerateDomain();
-
- GrUniqueKey atlasUniqueKey;
- GrUniqueKey::Builder builder(&atlasUniqueKey, kAtlasDomain, 1, "CCPR Atlas");
- builder[0] = next_atlas_unique_id();
- builder.finish();
-
- onFlushRP->assignUniqueKeyToProxy(atlasUniqueKey, this->textureProxy());
-
- fCachedAtlas = sk_make_sp<GrCCCachedAtlas>(fCoverageType, atlasUniqueKey,
- sk_ref_sp(this->textureProxy()));
- }
-
- SkASSERT(fCachedAtlas->coverageType() == fCoverageType);
- SkASSERT(fCachedAtlas->getOnFlushProxy() == this->textureProxy());
- return fCachedAtlas;
-}
-
GrCCAtlas* GrCCAtlasStack::addRect(const SkIRect& devIBounds, SkIVector* devToAtlasOffset) {
GrCCAtlas* retiredAtlas = nullptr;
SkIPoint16 location;
@@ -85,7 +58,7 @@
!fAtlases.back().addRect(devIBounds.width(), devIBounds.height(), &location)) {
// The retired atlas is out of room and can't grow any bigger.
retiredAtlas = !fAtlases.empty() ? &fAtlases.back() : nullptr;
- fAtlases.emplace_back(fCoverageType, fSpecs, *fCaps);
+ fAtlases.emplace_back(fSpecs, *fCaps);
SkASSERT(devIBounds.width() <= fSpecs.fMinWidth);
SkASSERT(devIBounds.height() <= fSpecs.fMinHeight);
SkAssertResult(fAtlases.back().addRect(devIBounds.width(), devIBounds.height(), &location));
diff --git a/src/gpu/ccpr/GrCCAtlas.h b/src/gpu/ccpr/GrCCAtlas.h
index 64255f4..5740b95 100644
--- a/src/gpu/ccpr/GrCCAtlas.h
+++ b/src/gpu/ccpr/GrCCAtlas.h
@@ -10,7 +10,6 @@
#include "src/gpu/GrDynamicAtlas.h"
#include "src/gpu/GrTBlockList.h"
-#include "src/gpu/ccpr/GrCCPathProcessor.h"
class GrCCCachedAtlas;
@@ -32,43 +31,17 @@
void accountForSpace(int width, int height);
};
- enum class CoverageType {
- kA8_Multisample,
- kA8_LiteralCoverage
- };
-
- static constexpr GrColorType CoverageTypeToColorType(CoverageType coverageType) {
- switch (coverageType) {
- case CoverageType::kA8_Multisample:
- case CoverageType::kA8_LiteralCoverage:
- return GrColorType::kAlpha_8;
- }
- SkUNREACHABLE;
- }
-
- static constexpr InternalMultisample CoverageTypeHasInternalMultisample(
- CoverageType coverageType) {
- switch (coverageType) {
- case CoverageType::kA8_LiteralCoverage:
- return InternalMultisample::kNo;
- case CoverageType::kA8_Multisample:
- return InternalMultisample::kYes;
- }
- SkUNREACHABLE;
- }
-
static sk_sp<GrTextureProxy> MakeLazyAtlasProxy(LazyInstantiateAtlasCallback&& callback,
- CoverageType coverageType,
const GrCaps& caps,
GrSurfaceProxy::UseAllocator useAllocator) {
return GrDynamicAtlas::MakeLazyAtlasProxy(std::move(callback),
- CoverageTypeToColorType(coverageType),
- CoverageTypeHasInternalMultisample(coverageType),
+ GrColorType::kAlpha_8,
+ InternalMultisample::kYes,
caps,
useAllocator);
}
- GrCCAtlas(CoverageType, const Specs&, const GrCaps&);
+ GrCCAtlas(const Specs&, const GrCaps&);
~GrCCAtlas() override;
// This is an optional space for the caller to jot down user-defined instance data to use when
@@ -78,13 +51,9 @@
void setEndStencilResolveInstance(int idx);
int getEndStencilResolveInstance() const { return fEndStencilResolveInstance; }
- sk_sp<GrCCCachedAtlas> refOrMakeCachedAtlas(GrOnFlushResourceProvider*);
-
private:
- const CoverageType fCoverageType;
int fFillBatchID;
int fEndStencilResolveInstance;
- sk_sp<GrCCCachedAtlas> fCachedAtlas;
};
/**
@@ -93,13 +62,11 @@
*/
class GrCCAtlasStack {
public:
- using CoverageType = GrCCAtlas::CoverageType;
using CCAtlasAllocator = GrTBlockList<GrCCAtlas, 4>;
- GrCCAtlasStack(CoverageType coverageType, const GrCCAtlas::Specs& specs, const GrCaps* caps)
- : fCoverageType(coverageType), fSpecs(specs), fCaps(caps) {}
+ GrCCAtlasStack(const GrCCAtlas::Specs& specs, const GrCaps* caps)
+ : fSpecs(specs), fCaps(caps) {}
- CoverageType coverageType() const { return fCoverageType; }
bool empty() const { return fAtlases.empty(); }
const GrCCAtlas& front() const { SkASSERT(!this->empty()); return fAtlases.front(); }
GrCCAtlas& front() { SkASSERT(!this->empty()); return fAtlases.front(); }
@@ -118,7 +85,6 @@
GrCCAtlas* addRect(const SkIRect& devIBounds, SkIVector* devToAtlasOffset);
private:
- const CoverageType fCoverageType;
const GrCCAtlas::Specs fSpecs;
const GrCaps* const fCaps;
CCAtlasAllocator fAtlases;
diff --git a/src/gpu/ccpr/GrCCClipPath.cpp b/src/gpu/ccpr/GrCCClipPath.cpp
index 9533d29..eeeae99 100644
--- a/src/gpu/ccpr/GrCCClipPath.cpp
+++ b/src/gpu/ccpr/GrCCClipPath.cpp
@@ -13,9 +13,8 @@
#include "src/gpu/GrTexture.h"
#include "src/gpu/ccpr/GrCCPerFlushResources.h"
-void GrCCClipPath::init(
- const SkPath& deviceSpacePath, const SkIRect& accessRect,
- GrCCAtlas::CoverageType atlasCoverageType, const GrCaps& caps) {
+void GrCCClipPath::init(const SkPath& deviceSpacePath, const SkIRect& accessRect,
+ const GrCaps& caps) {
SkASSERT(!this->isInitialized());
fAtlasLazyProxy = GrCCAtlas::MakeLazyAtlasProxy(
@@ -41,8 +40,7 @@
return GrSurfaceProxy::LazyCallbackResult(
std::move(texture), true,
GrSurfaceProxy::LazyInstantiationKeyMode::kUnsynced);
- },
- atlasCoverageType, caps, GrSurfaceProxy::UseAllocator::kYes);
+ }, caps, GrSurfaceProxy::UseAllocator::kYes);
fDeviceSpacePath = deviceSpacePath;
fDeviceSpacePath.getBounds().roundOut(&fPathDevIBounds);
diff --git a/src/gpu/ccpr/GrCCClipPath.h b/src/gpu/ccpr/GrCCClipPath.h
index c0ec06c..7128fc7 100644
--- a/src/gpu/ccpr/GrCCClipPath.h
+++ b/src/gpu/ccpr/GrCCClipPath.h
@@ -39,7 +39,6 @@
bool isInitialized() const { return fAtlasLazyProxy != nullptr; }
void init(const SkPath& deviceSpacePath,
const SkIRect& desc,
- GrCCAtlas::CoverageType atlasCoverageType,
const GrCaps&);
void addAccess(const SkIRect& accessRect) {
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.cpp b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
deleted file mode 100644
index d123191..0000000
--- a/src/gpu/ccpr/GrCCDrawPathsOp.cpp
+++ /dev/null
@@ -1,462 +0,0 @@
-/*
- * Copyright 2018 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#include "src/gpu/ccpr/GrCCDrawPathsOp.h"
-
-#include "include/gpu/GrRecordingContext.h"
-#include "src/gpu/GrMemoryPool.h"
-#include "src/gpu/GrOpFlushState.h"
-#include "src/gpu/GrRecordingContextPriv.h"
-#include "src/gpu/ccpr/GrCCPathCache.h"
-#include "src/gpu/ccpr/GrCCPerFlushResources.h"
-#include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
-#include "src/gpu/ccpr/GrOctoBounds.h"
-
-static bool uses_varying_coords(const GrPaint& paint) {
- for (const auto& fp : GrFragmentProcessor::PaintRange(paint)) {
- if (fp.usesVaryingCoordsDirectly()) {
- return true;
- }
- }
- return false;
-}
-
-GrOp::Owner GrCCDrawPathsOp::Make(
- GrRecordingContext* context, const SkIRect& clipIBounds, const SkMatrix& m,
- const GrStyledShape& shape, GrPaint&& paint) {
- SkRect conservativeDevBounds;
- m.mapRect(&conservativeDevBounds, shape.bounds());
-
- const SkStrokeRec& stroke = shape.style().strokeRec();
- float strokeDevWidth = 0;
- float conservativeInflationRadius = 0;
- if (!stroke.isFillStyle()) {
- strokeDevWidth = GrCoverageCountingPathRenderer::GetStrokeDevWidth(
- m, stroke, &conservativeInflationRadius);
- conservativeDevBounds.outset(conservativeInflationRadius, conservativeInflationRadius);
- }
-
- std::unique_ptr<GrCCDrawPathsOp> op;
- float conservativeSize = std::max(conservativeDevBounds.height(), conservativeDevBounds.width());
- if (conservativeSize > GrCoverageCountingPathRenderer::kPathCropThreshold) {
- // The path is too large. Crop it or analytic AA can run out of fp32 precision.
- SkPath croppedDevPath;
- shape.asPath(&croppedDevPath);
- croppedDevPath.transform(m, &croppedDevPath);
-
- SkIRect cropBox = clipIBounds;
- GrStyledShape croppedDevShape;
- if (stroke.isFillStyle()) {
- GrCoverageCountingPathRenderer::CropPath(croppedDevPath, cropBox, &croppedDevPath);
- croppedDevShape = GrStyledShape(croppedDevPath);
- conservativeDevBounds = croppedDevShape.bounds();
- } else {
- int r = SkScalarCeilToInt(conservativeInflationRadius);
- cropBox.outset(r, r);
- GrCoverageCountingPathRenderer::CropPath(croppedDevPath, cropBox, &croppedDevPath);
- SkStrokeRec devStroke = stroke;
- devStroke.setStrokeStyle(strokeDevWidth);
- croppedDevShape = GrStyledShape(croppedDevPath, GrStyle(devStroke, nullptr));
- conservativeDevBounds = croppedDevPath.getBounds();
- conservativeDevBounds.outset(conservativeInflationRadius, conservativeInflationRadius);
- }
-
- // FIXME: This breaks local coords: http://skbug.com/8003
- return InternalMake(context, clipIBounds, SkMatrix::I(), croppedDevShape, strokeDevWidth,
- conservativeDevBounds, std::move(paint));
- }
-
- return InternalMake(context, clipIBounds, m, shape, strokeDevWidth, conservativeDevBounds,
- std::move(paint));
-}
-
-GrOp::Owner GrCCDrawPathsOp::InternalMake(
- GrRecordingContext* context, const SkIRect& clipIBounds, const SkMatrix& m,
- const GrStyledShape& shape, float strokeDevWidth, const SkRect& conservativeDevBounds,
- GrPaint&& paint) {
- // The path itself should have been cropped if larger than kPathCropThreshold. If it had a
- // stroke, that would have further inflated its draw bounds.
- SkASSERT(std::max(conservativeDevBounds.height(), conservativeDevBounds.width()) <
- GrCoverageCountingPathRenderer::kPathCropThreshold +
- GrCoverageCountingPathRenderer::kMaxBoundsInflationFromStroke*2 + 1);
-
- SkIRect shapeConservativeIBounds;
- conservativeDevBounds.roundOut(&shapeConservativeIBounds);
-
- SkIRect maskDevIBounds;
- if (!maskDevIBounds.intersect(clipIBounds, shapeConservativeIBounds)) {
- return nullptr;
- }
-
- return GrOp::Make<GrCCDrawPathsOp>(context, m, shape, strokeDevWidth, shapeConservativeIBounds,
- maskDevIBounds, conservativeDevBounds, std::move(paint));
-}
-
-GrCCDrawPathsOp::GrCCDrawPathsOp(const SkMatrix& m, const GrStyledShape& shape,
- float strokeDevWidth, const SkIRect& shapeConservativeIBounds,
- const SkIRect& maskDevIBounds, const SkRect& conservativeDevBounds,
- GrPaint&& paint)
- : GrDrawOp(ClassID())
- , fViewMatrixIfUsingLocalCoords(uses_varying_coords(paint) ? m : SkMatrix::I())
- , fDraws(m, shape, strokeDevWidth, shapeConservativeIBounds, maskDevIBounds,
- paint.getColor4f())
- , fProcessors(std::move(paint)) { // Paint must be moved after fetching its color above.
- SkDEBUGCODE(fBaseInstance = -1);
- // If the path is clipped, CCPR will only draw the visible portion. This helps improve batching,
- // since it eliminates the need for scissor when drawing to the main canvas.
- // FIXME: We should parse the path right here. It will provide a tighter bounding box for us to
- // give the opsTask, as well as enabling threaded parsing when using DDL.
- SkRect clippedDrawBounds;
- if (!clippedDrawBounds.intersect(conservativeDevBounds, SkRect::Make(maskDevIBounds))) {
- clippedDrawBounds.setEmpty();
- }
- // We always have AA bloat, even in MSAA atlas mode. This is because by the time this Op comes
- // along and draws to the main canvas, the atlas has been resolved to analytic coverage.
- this->setBounds(clippedDrawBounds, GrOp::HasAABloat::kYes, GrOp::IsHairline::kNo);
-}
-
-GrCCDrawPathsOp::~GrCCDrawPathsOp() {
- if (fOwningPerOpsTaskPaths) {
- // Remove the list's dangling pointer to this Op before deleting it.
- fOwningPerOpsTaskPaths->fDrawOps.remove(this);
- }
-}
-
-GrCCDrawPathsOp::SingleDraw::SingleDraw(const SkMatrix& m, const GrStyledShape& shape,
- float strokeDevWidth,
- const SkIRect& shapeConservativeIBounds,
- const SkIRect& maskDevIBounds, const SkPMColor4f& color)
- : fMatrix(m)
- , fShape(shape)
- , fStrokeDevWidth(strokeDevWidth)
- , fShapeConservativeIBounds(shapeConservativeIBounds)
- , fMaskDevIBounds(maskDevIBounds)
- , fColor(color) {
-#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
- if (fShape.hasUnstyledKey()) {
- // On AOSP we round view matrix translates to integer values for cachable paths. We do this
- // to match HWUI's cache hit ratio, which doesn't consider the matrix when caching paths.
- fMatrix.setTranslateX(SkScalarRoundToScalar(fMatrix.getTranslateX()));
- fMatrix.setTranslateY(SkScalarRoundToScalar(fMatrix.getTranslateY()));
- }
-#endif
-}
-
-GrProcessorSet::Analysis GrCCDrawPathsOp::finalize(
- const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage,
- GrClampType clampType) {
- SkASSERT(1 == fNumDraws); // There should only be one single path draw in this Op right now.
- return fDraws.head().finalize(caps, clip, hasMixedSampledCoverage, clampType, &fProcessors);
-}
-
-GrProcessorSet::Analysis GrCCDrawPathsOp::SingleDraw::finalize(
- const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage, GrClampType
- clampType, GrProcessorSet* processors) {
- const GrProcessorSet::Analysis& analysis = processors->finalize(
- fColor, GrProcessorAnalysisCoverage::kSingleChannel, clip,
- &GrUserStencilSettings::kUnused, hasMixedSampledCoverage, caps, clampType, &fColor);
-
- // Lines start looking jagged when they get thinner than 1px. For thin strokes it looks better
- // if we can convert them to hairline (i.e., inflate the stroke width to 1px), and instead
- // reduce the opacity to create the illusion of thin-ness. This strategy also helps reduce
- // artifacts from coverage dilation when there are self intersections.
- if (analysis.isCompatibleWithCoverageAsAlpha() &&
- !fShape.style().strokeRec().isFillStyle() && fStrokeDevWidth < 1) {
- // Modifying the shape affects its cache key. The draw can't have a cache entry yet or else
- // our next step would invalidate it.
- SkASSERT(!fCacheEntry);
- SkASSERT(SkStrokeRec::kStroke_Style == fShape.style().strokeRec().getStyle());
-
- SkPath path;
- fShape.asPath(&path);
-
- // Create a hairline version of our stroke.
- SkStrokeRec hairlineStroke = fShape.style().strokeRec();
- hairlineStroke.setStrokeStyle(0);
-
- // How transparent does a 1px stroke have to be in order to appear as thin as the real one?
- float coverage = fStrokeDevWidth;
-
- fShape = GrStyledShape(path, GrStyle(hairlineStroke, nullptr));
- fStrokeDevWidth = 1;
-
- // fShapeConservativeIBounds already accounted for this possibility of inflating the stroke.
- fColor = fColor * coverage;
- }
-
- return analysis;
-}
-
-GrOp::CombineResult GrCCDrawPathsOp::onCombineIfPossible(GrOp* op, SkArenaAlloc*, const GrCaps&) {
- GrCCDrawPathsOp* that = op->cast<GrCCDrawPathsOp>();
- SkASSERT(fOwningPerOpsTaskPaths);
- SkASSERT(fNumDraws);
- SkASSERT(!that->fOwningPerOpsTaskPaths ||
- that->fOwningPerOpsTaskPaths == fOwningPerOpsTaskPaths);
- SkASSERT(that->fNumDraws);
-
- if (fProcessors != that->fProcessors ||
- fViewMatrixIfUsingLocalCoords != that->fViewMatrixIfUsingLocalCoords) {
- return CombineResult::kCannotCombine;
- }
-
- fDraws.append(std::move(that->fDraws), &fOwningPerOpsTaskPaths->fAllocator);
-
- SkDEBUGCODE(fNumDraws += that->fNumDraws);
- SkDEBUGCODE(that->fNumDraws = 0);
- return CombineResult::kMerged;
-}
-
-void GrCCDrawPathsOp::addToOwningPerOpsTaskPaths(sk_sp<GrCCPerOpsTaskPaths> owningPerOpsTaskPaths) {
- SkASSERT(1 == fNumDraws);
- SkASSERT(!fOwningPerOpsTaskPaths);
- fOwningPerOpsTaskPaths = std::move(owningPerOpsTaskPaths);
- fOwningPerOpsTaskPaths->fDrawOps.addToTail(this);
-}
-
-void GrCCDrawPathsOp::accountForOwnPaths(GrCCPathCache* pathCache,
- GrOnFlushResourceProvider* onFlushRP,
- GrCCPerFlushResourceSpecs* specs) {
- for (SingleDraw& draw : fDraws) {
- draw.accountForOwnPath(pathCache, onFlushRP, specs);
- }
-}
-
-void GrCCDrawPathsOp::SingleDraw::accountForOwnPath(
- GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP,
- GrCCPerFlushResourceSpecs* specs) {
- using CoverageType = GrCCAtlas::CoverageType;
-
- SkPath path;
- fShape.asPath(&path);
-
- SkASSERT(!fCacheEntry);
- SkASSERT(!fCachedAtlasProxy);
-
- if (pathCache) {
- fCacheEntry = pathCache->find(
- onFlushRP, fShape, fMaskDevIBounds, fMatrix, &fCachedMaskShift);
- }
-
- if (fCacheEntry) {
- if (const GrCCCachedAtlas* cachedAtlas = fCacheEntry->cachedAtlas()) {
- fCachedAtlasProxy = sk_ref_sp(cachedAtlas->getOnFlushProxy());
- SkASSERT(fCachedAtlasProxy);
- fCachedAtlasCoverageType = cachedAtlas->coverageType();
- if (CoverageType::kA8_LiteralCoverage == fCachedAtlasCoverageType) {
- ++specs->fNumCachedPaths;
- } else {
- // Suggest that this path be copied to a literal coverage atlas, to save memory.
- // (The client may decline this copy via DoCopiesToA8Coverage::kNo.)
- ++specs->fNumCopiedPaths;
- specs->fCopyPathStats.statPath(path);
- specs->fCopyAtlasSpecs.accountForSpace(fCacheEntry->width(), fCacheEntry->height());
- }
- return;
- }
-
- if (this->shouldCachePathMask(onFlushRP->caps()->maxRenderTargetSize())) {
- fDoCachePathMask = true;
- // We don't cache partial masks; ensure the bounds include the entire path.
- fMaskDevIBounds = fShapeConservativeIBounds;
- }
- }
-
- // Plan on rendering this path in a new atlas.
- ++specs->fNumRenderedPaths;
- specs->fRenderedPathStats.statPath(path);
- specs->fRenderedAtlasSpecs.accountForSpace(fMaskDevIBounds.width(), fMaskDevIBounds.height());
-}
-
-bool GrCCDrawPathsOp::SingleDraw::shouldCachePathMask(int maxRenderTargetSize) const {
- SkASSERT(fCacheEntry);
- SkASSERT(!fCachedAtlasProxy);
- if (fCacheEntry->hitCount() <= 1) {
- return false; // Don't cache a path mask until at least its second hit.
- }
-
- int shapeMaxDimension = std::max(
- fShapeConservativeIBounds.height(), fShapeConservativeIBounds.width());
- if (shapeMaxDimension > maxRenderTargetSize) {
- return false; // This path isn't cachable.
- }
-
- int64_t shapeArea = sk_64_mul(
- fShapeConservativeIBounds.height(), fShapeConservativeIBounds.width());
- if (shapeArea < 100*100) {
- // If a path is small enough, we might as well try to render and cache the entire thing, no
- // matter how much of it is actually visible.
- return true;
- }
-
- // The hitRect should already be contained within the shape's bounds, but we still intersect it
- // because it's possible for edges very near pixel boundaries (e.g., 0.999999), to round out
- // inconsistently, depending on the integer translation values and fp32 precision.
- SkIRect hitRect = fCacheEntry->hitRect().makeOffset(fCachedMaskShift);
- hitRect.intersect(fShapeConservativeIBounds);
-
- // Render and cache the entire path mask if we see enough of it to justify rendering all the
- // pixels. Our criteria for "enough" is that we must have seen at least 50% of the path in the
- // past, and in this particular draw we must see at least 10% of it.
- int64_t hitArea = sk_64_mul(hitRect.height(), hitRect.width());
- int64_t drawArea = sk_64_mul(fMaskDevIBounds.height(), fMaskDevIBounds.width());
- return hitArea*2 >= shapeArea && drawArea*10 >= shapeArea;
-}
-
-void GrCCDrawPathsOp::setupResources(
- GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP,
- GrCCPerFlushResources* resources, DoCopiesToA8Coverage doCopies) {
- SkASSERT(fNumDraws > 0);
- SkASSERT(-1 == fBaseInstance);
- fBaseInstance = resources->nextPathInstanceIdx();
-
- for (SingleDraw& draw : fDraws) {
- draw.setupResources(pathCache, onFlushRP, resources, doCopies, this);
- }
-
- if (!fInstanceRanges.empty()) {
- fInstanceRanges.back().fEndInstanceIdx = resources->nextPathInstanceIdx();
- }
-}
-
-void GrCCDrawPathsOp::SingleDraw::setupResources(
- GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP,
- GrCCPerFlushResources* resources, DoCopiesToA8Coverage doCopies, GrCCDrawPathsOp* op) {
- using CoverageType = GrCCAtlas::CoverageType;
-
- SkPath path;
- fShape.asPath(&path);
-
- auto fillRule = (fShape.style().strokeRec().isFillStyle())
- ? GrFillRuleForSkPath(path)
- : GrFillRule::kNonzero;
-
- if (fCacheEntry) {
- // cachedAtlas for fCacheEntry may be empty at the time of accountForOwnPaths, but created
- // during current flush cycle so update fCachedAtlasProxy if needed.
- if (!fCachedAtlasProxy) {
- if (auto cachedAtlas = fCacheEntry->cachedAtlas()) {
- fCachedAtlasProxy = sk_ref_sp(cachedAtlas->getOnFlushProxy());
- fCachedAtlasCoverageType = cachedAtlas->coverageType();
-
- // A path mask didn't exist for this path at the beginning of flush, but we have one
- // now. What this means is that we've drawn the same path multiple times this flush.
- // Let the resources know that we reused one for their internal debug counters.
- SkDEBUGCODE(resources->debugOnly_didReuseRenderedPath());
- }
- }
-
- // Does the path already exist in a cached atlas texture?
- if (fCachedAtlasProxy) {
- if (DoCopiesToA8Coverage::kYes == doCopies &&
- fCachedAtlasCoverageType != CoverageType::kA8_LiteralCoverage) {
- if (fCacheEntry->cachedAtlas()) {
- // This will be a no-op if we already upgraded the path earlier
- // (e.g., if the same path was drawn multiple times during this flush).
- resources->upgradeEntryToLiteralCoverageAtlas(
- pathCache, onFlushRP, fCacheEntry.get(), fillRule);
- fCachedAtlasProxy = sk_ref_sp(fCacheEntry->cachedAtlas()->getOnFlushProxy());
- SkASSERT(fCachedAtlasProxy);
- SkASSERT(CoverageType::kA8_LiteralCoverage
- == fCacheEntry->cachedAtlas()->coverageType());
- fCachedAtlasCoverageType = CoverageType::kA8_LiteralCoverage;
- } else {
- // Since fCachedAtlasProxy is not null, null cachedAtlas means cache entry
- // was evicted after setting up GrCCPerFlushResourceSpecs.
- // Keep resource tracking correct here to prevent assertion failure.
- SkDEBUGCODE(resources->cancelEvictedDoCopies());
- }
- }
-#if 0
- // Simple color manipulation to visualize cached paths.
- fColor = (CoverageType::kA8_LiteralCoverage == fCachedAtlasCoverageType) ?
- SkPMColor4f{0,0,.25,.25} : SkPMColor4f{0,.25,0,.25};
-#endif
- op->recordInstance(fCachedAtlasProxy.get(), resources->nextPathInstanceIdx());
- resources->appendDrawPathInstance().set(*fCacheEntry, fCachedMaskShift, fColor,
- fillRule);
-
- return;
- }
- }
-
- // Render the raw path into a coverage count atlas. renderShapeInAtlas() gives us two tight
- // bounding boxes: One in device space, as well as a second one rotated an additional 45
- // degrees. The path vertex shader uses these two bounding boxes to generate an octagon that
- // circumscribes the path.
- GrOctoBounds octoBounds;
- SkIRect devIBounds;
- SkIVector devToAtlasOffset;
- if (auto atlas = resources->renderShapeInAtlas(
- fMaskDevIBounds, fMatrix, fShape, fStrokeDevWidth, &octoBounds, &devIBounds,
- &devToAtlasOffset)) {
- op->recordInstance(atlas->textureProxy(), resources->nextPathInstanceIdx());
- resources->appendDrawPathInstance().set(octoBounds, devToAtlasOffset, fColor, fillRule);
-
- if (fDoCachePathMask) {
- SkASSERT(fCacheEntry);
- SkASSERT(!fCachedAtlasProxy);
- SkASSERT(fShapeConservativeIBounds == fMaskDevIBounds);
- fCacheEntry->setCoverageCountAtlas(
- onFlushRP, atlas, devToAtlasOffset, octoBounds, devIBounds, fCachedMaskShift);
- }
- }
-}
-
-inline void GrCCDrawPathsOp::recordInstance(GrTextureProxy* atlasProxy, int instanceIdx) {
- if (fInstanceRanges.empty()) {
- fInstanceRanges.push_back({atlasProxy, instanceIdx});
- } else if (fInstanceRanges.back().fAtlasProxy != atlasProxy) {
- fInstanceRanges.back().fEndInstanceIdx = instanceIdx;
- fInstanceRanges.push_back({atlasProxy, instanceIdx});
- }
- SkASSERT(fInstanceRanges.back().fAtlasProxy == atlasProxy);
-}
-
-void GrCCDrawPathsOp::onPrepare(GrOpFlushState* flushState) {
- // The CCPR ops don't know their atlas textures until after the preFlush calls have been
- // executed at the start GrDrawingManger::flush. Thus the proxies are not added during the
- // normal visitProxies calls doing addDrawOp. Therefore, the atlas proxies are added now.
- for (const InstanceRange& range : fInstanceRanges) {
- flushState->sampledProxyArray()->push_back(range.fAtlasProxy);
- }
-}
-
-void GrCCDrawPathsOp::onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) {
- SkASSERT(fOwningPerOpsTaskPaths);
-
- const GrCCPerFlushResources* resources = fOwningPerOpsTaskPaths->fFlushResources.get();
- if (!resources) {
- return; // Setup failed.
- }
-
- GrPipeline::InitArgs initArgs;
- initArgs.fCaps = &flushState->caps();
- initArgs.fDstProxyView = flushState->drawOpArgs().dstProxyView();
- initArgs.fWriteSwizzle = flushState->drawOpArgs().writeView().swizzle();
- auto clip = flushState->detachAppliedClip();
- GrPipeline pipeline(initArgs, std::move(fProcessors), std::move(clip));
-
- int baseInstance = fBaseInstance;
- SkASSERT(baseInstance >= 0); // Make sure setupResources() has been called.
-
- for (const InstanceRange& range : fInstanceRanges) {
- SkASSERT(range.fEndInstanceIdx > baseInstance);
-
- GrSurfaceProxy* atlas = range.fAtlasProxy;
- if (atlas->isInstantiated()) { // Instantiation can fail in exceptional circumstances.
- GrSwizzle swizzle = flushState->caps().getReadSwizzle(atlas->backendFormat(),
- GrColorType::kAlpha_8);
- GrCCPathProcessor pathProc(atlas->peekTexture(), swizzle, GrCCAtlas::kTextureOrigin,
- fViewMatrixIfUsingLocalCoords);
- pathProc.drawPaths(flushState, pipeline, *atlas, *resources, baseInstance,
- range.fEndInstanceIdx, this->bounds());
- }
-
- baseInstance = range.fEndInstanceIdx;
- }
-}
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.h b/src/gpu/ccpr/GrCCDrawPathsOp.h
deleted file mode 100644
index 52a3a2b..0000000
--- a/src/gpu/ccpr/GrCCDrawPathsOp.h
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * Copyright 2018 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef GrCCDrawPathsOp_DEFINED
-#define GrCCDrawPathsOp_DEFINED
-
-#include "src/core/SkTInternalLList.h"
-#include "src/gpu/ccpr/GrCCPathCache.h"
-#include "src/gpu/ccpr/GrCCSTLList.h"
-#include "src/gpu/geometry/GrStyledShape.h"
-#include "src/gpu/ops/GrDrawOp.h"
-
-class GrCCAtlas;
-class GrCCPerFlushResources;
-struct GrCCPerFlushResourceSpecs;
-struct GrCCPerOpsTaskPaths;
-class GrOnFlushResourceProvider;
-class GrRecordingContext;
-
-/**
- * This is the Op that draws paths to the actual canvas, using atlases generated by CCPR.
- */
-class GrCCDrawPathsOp : public GrDrawOp {
-public:
- DEFINE_OP_CLASS_ID
- SK_DECLARE_INTERNAL_LLIST_INTERFACE(GrCCDrawPathsOp);
-
- static GrOp::Owner Make(GrRecordingContext*, const SkIRect& clipIBounds,
- const SkMatrix&, const GrStyledShape&, GrPaint&&);
- ~GrCCDrawPathsOp() override;
-
- const char* name() const override { return "GrCCDrawPathsOp"; }
- FixedFunctionFlags fixedFunctionFlags() const override { return FixedFunctionFlags::kNone; }
- GrProcessorSet::Analysis finalize(const GrCaps&, const GrAppliedClip*,
- bool hasMixedSampledCoverage, GrClampType) override;
- CombineResult onCombineIfPossible(GrOp*, SkArenaAlloc*, const GrCaps&) override;
- void visitProxies(const VisitProxyFunc& fn) const override {
- for (const auto& range : fInstanceRanges) {
- fn(range.fAtlasProxy, GrMipmapped::kNo);
- }
- fProcessors.visitProxies(fn);
- }
- void onPrepare(GrOpFlushState*) override;
-
- void addToOwningPerOpsTaskPaths(sk_sp<GrCCPerOpsTaskPaths> owningPerOpsTaskPaths);
-
- // Makes decisions about how to draw each path (cached, copied, rendered, etc.), and
- // increments/fills out the corresponding GrCCPerFlushResourceSpecs.
- void accountForOwnPaths(GrCCPathCache*, GrOnFlushResourceProvider*, GrCCPerFlushResourceSpecs*);
-
- // Allows the caller to decide whether to actually do the suggested copies from cached 16-bit
- // coverage count atlases, and into 8-bit literal coverage atlases. Purely to save space.
- enum class DoCopiesToA8Coverage : bool {
- kNo = false,
- kYes = true
- };
-
- // Allocates the GPU resources indicated by accountForOwnPaths(), in preparation for drawing. If
- // DoCopiesToA8Coverage is kNo, the paths slated for copy will instead be left in their 16-bit
- // coverage count atlases.
- //
- // NOTE: If using DoCopiesToA8Coverage::kNo, it is the caller's responsibility to have called
- // cancelCopies() on the GrCCPerFlushResourceSpecs, prior to making this call.
- void setupResources(GrCCPathCache*, GrOnFlushResourceProvider*, GrCCPerFlushResources*,
- DoCopiesToA8Coverage);
-
- void onExecute(GrOpFlushState*, const SkRect& chainBounds) override;
-
-private:
- void onPrePrepare(GrRecordingContext*,
- const GrSurfaceProxyView& writeView,
- GrAppliedClip*,
- const GrXferProcessor::DstProxyView&,
- GrXferBarrierFlags renderPassXferBarriers,
- GrLoadOp colorLoadOp) override {}
-
- friend class GrOp;
-
- static GrOp::Owner InternalMake(GrRecordingContext*,
- const SkIRect& clipIBounds,
- const SkMatrix&, const GrStyledShape&,
- float strokeDevWidth,
- const SkRect& conservativeDevBounds,
- GrPaint&&);
-
- GrCCDrawPathsOp(const SkMatrix&, const GrStyledShape&, float strokeDevWidth,
- const SkIRect& shapeConservativeIBounds, const SkIRect& maskDevIBounds,
- const SkRect& conservativeDevBounds, GrPaint&&);
-
- void recordInstance(GrTextureProxy* atlasProxy, int instanceIdx);
-
- const SkMatrix fViewMatrixIfUsingLocalCoords;
-
- class SingleDraw {
- public:
- SingleDraw(const SkMatrix&, const GrStyledShape&, float strokeDevWidth,
- const SkIRect& shapeConservativeIBounds, const SkIRect& maskDevIBounds,
- const SkPMColor4f&);
-
- // See the corresponding methods in GrCCDrawPathsOp.
- GrProcessorSet::Analysis finalize(
- const GrCaps&, const GrAppliedClip*, bool hasMixedSampledCoverage, GrClampType,
- GrProcessorSet*);
- void accountForOwnPath(GrCCPathCache*, GrOnFlushResourceProvider*,
- GrCCPerFlushResourceSpecs*);
- void setupResources(GrCCPathCache*, GrOnFlushResourceProvider*, GrCCPerFlushResources*,
- DoCopiesToA8Coverage, GrCCDrawPathsOp*);
-
- private:
- bool shouldCachePathMask(int maxRenderTargetSize) const;
-
- SkMatrix fMatrix;
- GrStyledShape fShape;
- float fStrokeDevWidth;
- const SkIRect fShapeConservativeIBounds;
- SkIRect fMaskDevIBounds;
- SkPMColor4f fColor;
-
- GrCCPathCache::OnFlushEntryRef fCacheEntry;
- sk_sp<GrTextureProxy> fCachedAtlasProxy;
- GrCCAtlas::CoverageType fCachedAtlasCoverageType;
- SkIVector fCachedMaskShift;
- bool fDoCachePathMask = false;
-
- SingleDraw* fNext = nullptr;
-
- friend class GrCCSTLList<SingleDraw>; // To access fNext.
- };
-
- // Declare fOwningPerOpsTaskPaths first, before fDraws. The draws use memory allocated by
- // fOwningPerOpsTaskPaths, so it must not be unreffed until after fDraws is destroyed.
- sk_sp<GrCCPerOpsTaskPaths> fOwningPerOpsTaskPaths;
-
- GrCCSTLList<SingleDraw> fDraws;
- SkDEBUGCODE(int fNumDraws = 1);
-
- GrProcessorSet fProcessors;
-
- struct InstanceRange {
- GrTextureProxy* fAtlasProxy;
- int fEndInstanceIdx;
- };
-
- SkSTArray<2, InstanceRange, true> fInstanceRanges;
- int fBaseInstance SkDEBUGCODE(= -1);
-};
-
-#endif
diff --git a/src/gpu/ccpr/GrCCPathCache.cpp b/src/gpu/ccpr/GrCCPathCache.cpp
deleted file mode 100644
index 4f16d80..0000000
--- a/src/gpu/ccpr/GrCCPathCache.cpp
+++ /dev/null
@@ -1,440 +0,0 @@
-/*
- * Copyright 2018 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#include "src/gpu/ccpr/GrCCPathCache.h"
-
-#include "include/private/SkNx.h"
-#include "src/gpu/GrOnFlushResourceProvider.h"
-#include "src/gpu/GrProxyProvider.h"
-
-static constexpr int kMaxKeyDataCountU32 = 256; // 1kB of uint32_t's.
-
-DECLARE_SKMESSAGEBUS_MESSAGE(sk_sp<GrCCPathCache::Key>, uint32_t, true);
-
-static inline uint32_t next_path_cache_id() {
- static std::atomic<uint32_t> gNextID(1);
- for (;;) {
- uint32_t id = gNextID.fetch_add(+1, std::memory_order_acquire);
- if (SK_InvalidUniqueID != id) {
- return id;
- }
- }
-}
-
-static inline bool SkShouldPostMessageToBus(
- const sk_sp<GrCCPathCache::Key>& key, uint32_t msgBusUniqueID) {
- return key->pathCacheUniqueID() == msgBusUniqueID;
-}
-
-// The maximum number of cache entries we allow in our own cache.
-static constexpr int kMaxCacheCount = 1 << 16;
-
-
-GrCCPathCache::MaskTransform::MaskTransform(const SkMatrix& m, SkIVector* shift)
- : fMatrix2x2{m.getScaleX(), m.getSkewX(), m.getSkewY(), m.getScaleY()} {
- SkASSERT(!m.hasPerspective());
- Sk2f translate = Sk2f(m.getTranslateX(), m.getTranslateY());
- Sk2f transFloor;
-#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
- // On Android framework we pre-round view matrix translates to integers for better caching.
- transFloor = translate;
-#else
- transFloor = translate.floor();
- (translate - transFloor).store(fSubpixelTranslate);
-#endif
- shift->set((int)transFloor[0], (int)transFloor[1]);
- SkASSERT((float)shift->fX == transFloor[0]); // Make sure transFloor had integer values.
- SkASSERT((float)shift->fY == transFloor[1]);
-}
-
-inline static bool fuzzy_equals(const GrCCPathCache::MaskTransform& a,
- const GrCCPathCache::MaskTransform& b) {
- if ((Sk4f::Load(a.fMatrix2x2) != Sk4f::Load(b.fMatrix2x2)).anyTrue()) {
- return false;
- }
-#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
- if (((Sk2f::Load(a.fSubpixelTranslate) -
- Sk2f::Load(b.fSubpixelTranslate)).abs() > 1.f/256).anyTrue()) {
- return false;
- }
-#endif
- return true;
-}
-
-sk_sp<GrCCPathCache::Key> GrCCPathCache::Key::Make(uint32_t pathCacheUniqueID,
- int dataCountU32, const void* data) {
- void* memory = ::operator new (sizeof(Key) + dataCountU32 * sizeof(uint32_t));
- sk_sp<GrCCPathCache::Key> key(new (memory) Key(pathCacheUniqueID, dataCountU32));
- if (data) {
- memcpy(key->data(), data, key->dataSizeInBytes());
- }
- return key;
-}
-
-void GrCCPathCache::Key::operator delete(void* p) { ::operator delete(p); }
-
-const uint32_t* GrCCPathCache::Key::data() const {
- // The shape key is a variable-length footer to the entry allocation.
- return reinterpret_cast<const uint32_t*>(reinterpret_cast<const char*>(this) + sizeof(Key));
-}
-
-uint32_t* GrCCPathCache::Key::data() {
- // The shape key is a variable-length footer to the entry allocation.
- return reinterpret_cast<uint32_t*>(reinterpret_cast<char*>(this) + sizeof(Key));
-}
-
-void GrCCPathCache::Key::changed() {
- // Our key's corresponding path was invalidated. Post a thread-safe eviction message.
- SkMessageBus<sk_sp<Key>, uint32_t>::Post(sk_ref_sp(this));
-}
-
-GrCCPathCache::GrCCPathCache(uint32_t contextUniqueID)
- : fContextUniqueID(contextUniqueID)
- , fInvalidatedKeysInbox(next_path_cache_id())
- , fScratchKey(Key::Make(fInvalidatedKeysInbox.uniqueID(), kMaxKeyDataCountU32)) {
-}
-
-GrCCPathCache::~GrCCPathCache() {
- while (!fLRU.isEmpty()) {
- this->evict(*fLRU.tail()->fCacheKey, fLRU.tail());
- }
- SkASSERT(0 == fHashTable.count()); // Ensure the hash table and LRU list were coherent.
-
- // Now take all the atlas textures we just invalidated and purge them from the GrResourceCache.
- // We just purge via message bus since we don't have any access to the resource cache right now.
- for (const sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) {
- SkMessageBus<GrUniqueKeyInvalidatedMessage, uint32_t>::Post(
- GrUniqueKeyInvalidatedMessage(proxy->getUniqueKey(), fContextUniqueID));
- }
- for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) {
- SkMessageBus<GrUniqueKeyInvalidatedMessage, uint32_t>::Post(
- GrUniqueKeyInvalidatedMessage(key, fContextUniqueID));
- }
-}
-
-namespace {
-
-// Produces a key that accounts both for a shape's path geometry, as well as any stroke/style.
-class WriteKeyHelper {
-public:
- static constexpr int kStrokeWidthIdx = 0;
- static constexpr int kStrokeMiterIdx = 1;
- static constexpr int kStrokeCapJoinIdx = 2;
- static constexpr int kShapeUnstyledKeyIdx = 3;
-
- WriteKeyHelper(const GrStyledShape& shape) : fShapeUnstyledKeyCount(shape.unstyledKeySize()) {}
-
- // Returns the total number of uint32_t's to allocate for the key.
- int allocCountU32() const { return kShapeUnstyledKeyIdx + fShapeUnstyledKeyCount; }
-
- // Writes the key data to out[].
- void write(const GrStyledShape& shape, uint32_t* out) {
- // Stroke key.
- // We don't use GrStyle::WriteKey() because it does not account for hairlines.
- // http://skbug.com/8273
- SkASSERT(!shape.style().hasPathEffect());
- const SkStrokeRec& stroke = shape.style().strokeRec();
- if (stroke.isFillStyle()) {
- // Use a value for width that won't collide with a valid fp32 value >= 0.
- out[kStrokeWidthIdx] = ~0;
- out[kStrokeMiterIdx] = out[kStrokeCapJoinIdx] = 0;
- } else {
- float width = stroke.getWidth(), miterLimit = stroke.getMiter();
- memcpy(&out[kStrokeWidthIdx], &width, sizeof(float));
- memcpy(&out[kStrokeMiterIdx], &miterLimit, sizeof(float));
- out[kStrokeCapJoinIdx] = (stroke.getCap() << 16) | stroke.getJoin();
- static_assert(sizeof(out[kStrokeWidthIdx]) == sizeof(float));
- }
-
- // Shape unstyled key.
- shape.writeUnstyledKey(&out[kShapeUnstyledKeyIdx]);
- }
-
-private:
- int fShapeUnstyledKeyCount;
-};
-
-} // namespace
-
-GrCCPathCache::OnFlushEntryRef GrCCPathCache::find(
- GrOnFlushResourceProvider* onFlushRP, const GrStyledShape& shape,
- const SkIRect& clippedDrawBounds, const SkMatrix& viewMatrix, SkIVector* maskShift) {
- if (!shape.hasUnstyledKey()) {
- return OnFlushEntryRef();
- }
-
- WriteKeyHelper writeKeyHelper(shape);
- if (writeKeyHelper.allocCountU32() > kMaxKeyDataCountU32) {
- return OnFlushEntryRef();
- }
-
- SkASSERT(fScratchKey->unique());
- fScratchKey->resetDataCountU32(writeKeyHelper.allocCountU32());
- writeKeyHelper.write(shape, fScratchKey->data());
-
- MaskTransform m(viewMatrix, maskShift);
- GrCCPathCacheEntry* entry = nullptr;
- if (HashNode* node = fHashTable.find(*fScratchKey)) {
- entry = node->entry();
- SkASSERT(fLRU.isInList(entry));
-
- if (!fuzzy_equals(m, entry->fMaskTransform)) {
- // The path was reused with an incompatible matrix.
- if (entry->unique()) {
- // This entry is unique: recycle it instead of deleting and malloc-ing a new one.
- SkASSERT(0 == entry->fOnFlushRefCnt); // Because we are unique.
- entry->fMaskTransform = m;
- entry->fHitCount = 0;
- entry->fHitRect = SkIRect::MakeEmpty();
- entry->releaseCachedAtlas(this);
- } else {
- this->evict(*fScratchKey);
- entry = nullptr;
- }
- }
- }
-
- if (!entry) {
- if (fHashTable.count() >= kMaxCacheCount) {
- SkDEBUGCODE(HashNode* node = fHashTable.find(*fLRU.tail()->fCacheKey));
- SkASSERT(node && node->entry() == fLRU.tail());
- this->evict(*fLRU.tail()->fCacheKey); // We've exceeded our limit.
- }
-
- // Create a new entry in the cache.
- sk_sp<Key> permanentKey = Key::Make(fInvalidatedKeysInbox.uniqueID(),
- writeKeyHelper.allocCountU32(), fScratchKey->data());
- SkASSERT(*permanentKey == *fScratchKey);
- SkASSERT(!fHashTable.find(*permanentKey));
- entry = fHashTable.set(HashNode(this, std::move(permanentKey), m, shape))->entry();
-
- SkASSERT(fHashTable.count() <= kMaxCacheCount);
- } else {
- fLRU.remove(entry); // Will be re-added at head.
- }
-
- SkDEBUGCODE(HashNode* node = fHashTable.find(*fScratchKey));
- SkASSERT(node && node->entry() == entry);
- fLRU.addToHead(entry);
-
- if (0 == entry->fOnFlushRefCnt) {
- // Only update the time stamp and hit count if we haven't seen this entry yet during the
- // current flush.
- entry->fTimestamp = this->quickPerFlushTimestamp();
- ++entry->fHitCount;
-
- if (entry->fCachedAtlas) {
- SkASSERT(SkToBool(entry->fCachedAtlas->peekOnFlushRefCnt()) ==
- SkToBool(entry->fCachedAtlas->getOnFlushProxy()));
- if (!entry->fCachedAtlas->getOnFlushProxy()) {
- if (sk_sp<GrTextureProxy> onFlushProxy = onFlushRP->findOrCreateProxyByUniqueKey(
- entry->fCachedAtlas->textureKey(), GrSurfaceProxy::UseAllocator::kNo)) {
- entry->fCachedAtlas->setOnFlushProxy(std::move(onFlushProxy));
- }
- }
- if (!entry->fCachedAtlas->getOnFlushProxy()) {
- // Our atlas's backing texture got purged from the GrResourceCache. Release the
- // cached atlas.
- entry->releaseCachedAtlas(this);
- }
- }
- }
- entry->fHitRect.join(clippedDrawBounds.makeOffset(-*maskShift));
- SkASSERT(!entry->fCachedAtlas || entry->fCachedAtlas->getOnFlushProxy());
- return OnFlushEntryRef::OnFlushRef(entry);
-}
-
-void GrCCPathCache::evict(const GrCCPathCache::Key& key, GrCCPathCacheEntry* entry) {
- if (!entry) {
- HashNode* node = fHashTable.find(key);
- SkASSERT(node);
- entry = node->entry();
- }
- SkASSERT(*entry->fCacheKey == key);
- SkASSERT(!entry->hasBeenEvicted());
- entry->fCacheKey->markShouldDeregister(); // Unregister the path listener.
- entry->releaseCachedAtlas(this);
- fLRU.remove(entry);
- fHashTable.remove(key);
-}
-
-void GrCCPathCache::doPreFlushProcessing() {
- this->evictInvalidatedCacheKeys();
-
- // Mark the per-flush timestamp as needing to be updated with a newer clock reading.
- fPerFlushTimestamp = GrStdSteadyClock::time_point::min();
-}
-
-void GrCCPathCache::purgeEntriesOlderThan(GrProxyProvider* proxyProvider,
- const GrStdSteadyClock::time_point& purgeTime) {
- this->evictInvalidatedCacheKeys();
-
-#ifdef SK_DEBUG
- auto lastTimestamp = (fLRU.isEmpty())
- ? GrStdSteadyClock::time_point::max()
- : fLRU.tail()->fTimestamp;
-#endif
-
- // Evict every entry from our local path cache whose timestamp is older than purgeTime.
- while (!fLRU.isEmpty() && fLRU.tail()->fTimestamp < purgeTime) {
-#ifdef SK_DEBUG
- // Verify that fLRU is sorted by timestamp.
- auto timestamp = fLRU.tail()->fTimestamp;
- SkASSERT(timestamp >= lastTimestamp);
- lastTimestamp = timestamp;
-#endif
- this->evict(*fLRU.tail()->fCacheKey);
- }
-
- // Now take all the atlas textures we just invalidated and purge them from the GrResourceCache.
- this->purgeInvalidatedAtlasTextures(proxyProvider);
-}
-
-void GrCCPathCache::purgeInvalidatedAtlasTextures(GrOnFlushResourceProvider* onFlushRP) {
- for (const sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) {
- onFlushRP->removeUniqueKeyFromProxy(proxy.get());
- }
- fInvalidatedProxies.reset();
-
- for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) {
- onFlushRP->processInvalidUniqueKey(key);
- }
- fInvalidatedProxyUniqueKeys.reset();
-}
-
-void GrCCPathCache::purgeInvalidatedAtlasTextures(GrProxyProvider* proxyProvider) {
- for (const sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) {
- proxyProvider->removeUniqueKeyFromProxy(proxy.get());
- }
- fInvalidatedProxies.reset();
-
- for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) {
- proxyProvider->processInvalidUniqueKey(key, nullptr,
- GrProxyProvider::InvalidateGPUResource::kYes);
- }
- fInvalidatedProxyUniqueKeys.reset();
-}
-
-void GrCCPathCache::evictInvalidatedCacheKeys() {
- SkTArray<sk_sp<Key>> invalidatedKeys;
- fInvalidatedKeysInbox.poll(&invalidatedKeys);
- for (const sk_sp<Key>& key : invalidatedKeys) {
- bool isInCache = !key->shouldDeregister(); // Gets set upon exiting the cache.
- if (isInCache) {
- this->evict(*key);
- }
- }
-}
-
-GrCCPathCache::OnFlushEntryRef
-GrCCPathCache::OnFlushEntryRef::OnFlushRef(GrCCPathCacheEntry* entry) {
- entry->ref();
- ++entry->fOnFlushRefCnt;
- if (entry->fCachedAtlas) {
- entry->fCachedAtlas->incrOnFlushRefCnt();
- }
- return OnFlushEntryRef(entry);
-}
-
-GrCCPathCache::OnFlushEntryRef::~OnFlushEntryRef() {
- if (!fEntry) {
- return;
- }
- --fEntry->fOnFlushRefCnt;
- SkASSERT(fEntry->fOnFlushRefCnt >= 0);
- if (fEntry->fCachedAtlas) {
- fEntry->fCachedAtlas->decrOnFlushRefCnt();
- }
- fEntry->unref();
-}
-
-
-void GrCCPathCacheEntry::setCoverageCountAtlas(
- GrOnFlushResourceProvider* onFlushRP, GrCCAtlas* atlas, const SkIVector& atlasOffset,
- const GrOctoBounds& octoBounds, const SkIRect& devIBounds, const SkIVector& maskShift) {
- SkASSERT(fOnFlushRefCnt > 0);
- SkASSERT(!fCachedAtlas); // Otherwise we would need to call releaseCachedAtlas().
-
- if (this->hasBeenEvicted()) {
- // This entry will never be found in the path cache again. Don't bother trying to save an
- // atlas texture for it in the GrResourceCache.
- return;
- }
-
- fCachedAtlas = atlas->refOrMakeCachedAtlas(onFlushRP);
- fCachedAtlas->incrOnFlushRefCnt(fOnFlushRefCnt);
- fCachedAtlas->addPathPixels(devIBounds.height() * devIBounds.width());
-
- fAtlasOffset = atlasOffset + maskShift;
-
- fOctoBounds.setOffset(octoBounds, -maskShift.fX, -maskShift.fY);
- fDevIBounds = devIBounds.makeOffset(-maskShift);
-}
-
-GrCCPathCacheEntry::ReleaseAtlasResult GrCCPathCacheEntry::upgradeToLiteralCoverageAtlas(
- GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP, GrCCAtlas* atlas,
- const SkIVector& newAtlasOffset) {
- SkASSERT(!this->hasBeenEvicted());
- SkASSERT(fOnFlushRefCnt > 0);
- SkASSERT(fCachedAtlas);
- SkASSERT(GrCCAtlas::CoverageType::kA8_LiteralCoverage != fCachedAtlas->coverageType());
-
- ReleaseAtlasResult releaseAtlasResult = this->releaseCachedAtlas(pathCache);
-
- fCachedAtlas = atlas->refOrMakeCachedAtlas(onFlushRP);
- fCachedAtlas->incrOnFlushRefCnt(fOnFlushRefCnt);
- fCachedAtlas->addPathPixels(this->height() * this->width());
-
- fAtlasOffset = newAtlasOffset;
- return releaseAtlasResult;
-}
-
-GrCCPathCacheEntry::ReleaseAtlasResult GrCCPathCacheEntry::releaseCachedAtlas(
- GrCCPathCache* pathCache) {
- ReleaseAtlasResult result = ReleaseAtlasResult::kNone;
- if (fCachedAtlas) {
- result = fCachedAtlas->invalidatePathPixels(pathCache, this->height() * this->width());
- if (fOnFlushRefCnt) {
- SkASSERT(fOnFlushRefCnt > 0);
- fCachedAtlas->decrOnFlushRefCnt(fOnFlushRefCnt);
- }
- fCachedAtlas = nullptr;
- }
- return result;
-}
-
-GrCCPathCacheEntry::ReleaseAtlasResult GrCCCachedAtlas::invalidatePathPixels(
- GrCCPathCache* pathCache, int numPixels) {
- // Mark the pixels invalid in the cached atlas texture.
- fNumInvalidatedPathPixels += numPixels;
- SkASSERT(fNumInvalidatedPathPixels <= fNumPathPixels);
- if (!fIsInvalidatedFromResourceCache && fNumInvalidatedPathPixels >= fNumPathPixels / 2) {
- // Too many invalidated pixels: purge the atlas texture from the resource cache.
- if (fOnFlushProxy) {
- // Don't clear (or std::move) fOnFlushProxy. Other path cache entries might still have a
- // reference on this atlas and expect to use our proxy during the current flush.
- // fOnFlushProxy will be cleared once fOnFlushRefCnt decrements to zero.
- pathCache->fInvalidatedProxies.push_back(fOnFlushProxy);
- } else {
- pathCache->fInvalidatedProxyUniqueKeys.push_back(fTextureKey);
- }
- fIsInvalidatedFromResourceCache = true;
- return ReleaseAtlasResult::kDidInvalidateFromCache;
- }
- return ReleaseAtlasResult::kNone;
-}
-
-void GrCCCachedAtlas::decrOnFlushRefCnt(int count) const {
- SkASSERT(count > 0);
- fOnFlushRefCnt -= count;
- SkASSERT(fOnFlushRefCnt >= 0);
- if (0 == fOnFlushRefCnt) {
- // Don't hold the actual proxy past the end of the current flush.
- SkASSERT(fOnFlushProxy);
- fOnFlushProxy = nullptr;
- }
-}
diff --git a/src/gpu/ccpr/GrCCPathCache.h b/src/gpu/ccpr/GrCCPathCache.h
deleted file mode 100644
index 6a521ae..0000000
--- a/src/gpu/ccpr/GrCCPathCache.h
+++ /dev/null
@@ -1,372 +0,0 @@
-/*
- * Copyright 2018 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef GrCCPathCache_DEFINED
-#define GrCCPathCache_DEFINED
-
-#include "include/private/SkIDChangeListener.h"
-#include "include/private/SkTHash.h"
-#include "src/core/SkTInternalLList.h"
-#include "src/gpu/GrNonAtomicRef.h"
-#include "src/gpu/ccpr/GrCCAtlas.h"
-#include "src/gpu/ccpr/GrCCPathProcessor.h"
-#include "src/gpu/geometry/GrStyledShape.h"
-
-class GrCCPathCacheEntry;
-class GrStyledShape;
-
-/**
- * This class implements an LRU cache that maps from GrStyledShape to GrCCPathCacheEntry objects.
- * Shapes are only given one entry in the cache, so any time they are accessed with a different
- * matrix, the old entry gets evicted.
- */
-class GrCCPathCache {
-public:
- GrCCPathCache(uint32_t contextUniqueID);
- ~GrCCPathCache();
-
- class Key : public SkIDChangeListener {
- public:
- static sk_sp<Key> Make(uint32_t pathCacheUniqueID, int dataCountU32,
- const void* data = nullptr);
-
- uint32_t pathCacheUniqueID() const { return fPathCacheUniqueID; }
-
- int dataSizeInBytes() const { return fDataSizeInBytes; }
- const uint32_t* data() const;
-
- void resetDataCountU32(int dataCountU32) {
- SkASSERT(dataCountU32 <= fDataReserveCountU32);
- fDataSizeInBytes = dataCountU32 * sizeof(uint32_t);
- }
- uint32_t* data();
-
- bool operator==(const Key& that) const {
- return fDataSizeInBytes == that.fDataSizeInBytes &&
- !memcmp(this->data(), that.data(), fDataSizeInBytes);
- }
-
- // Called when our corresponding path is modified or deleted. Not threadsafe.
- void changed() override;
-
- // TODO(b/30449950): use sized delete once P0722R3 is available
- static void operator delete(void* p);
-
- private:
- Key(uint32_t pathCacheUniqueID, int dataCountU32)
- : fPathCacheUniqueID(pathCacheUniqueID)
- , fDataSizeInBytes(dataCountU32 * sizeof(uint32_t))
- SkDEBUGCODE(, fDataReserveCountU32(dataCountU32)) {
- SkASSERT(SK_InvalidUniqueID != fPathCacheUniqueID);
- }
-
- const uint32_t fPathCacheUniqueID;
- int fDataSizeInBytes;
- SkDEBUGCODE(const int fDataReserveCountU32);
- // The GrStyledShape's unstyled key is stored as a variable-length footer to this class.
- // GetKey provides access to it.
- };
-
- // Stores the components of a transformation that affect a path mask (i.e. everything but
- // integer translation). During construction, any integer portions of the matrix's translate are
- // shaved off and returned to the caller. The caller is responsible for those integer shifts.
- struct MaskTransform {
- MaskTransform(const SkMatrix& m, SkIVector* shift);
- float fMatrix2x2[4];
-#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
- // Except on AOSP, cache hits must have matching subpixel portions of their view matrix.
- // On AOSP we follow after HWUI and ignore the subpixel translate.
- float fSubpixelTranslate[2];
-#endif
- };
-
- // Represents a ref on a GrCCPathCacheEntry that should only be used during the current flush.
- class OnFlushEntryRef : SkNoncopyable {
- public:
- static OnFlushEntryRef OnFlushRef(GrCCPathCacheEntry*);
- OnFlushEntryRef() = default;
- OnFlushEntryRef(OnFlushEntryRef&& ref) : fEntry(std::exchange(ref.fEntry, nullptr)) {}
- ~OnFlushEntryRef();
-
- GrCCPathCacheEntry* get() const { return fEntry; }
- GrCCPathCacheEntry* operator->() const { return fEntry; }
- GrCCPathCacheEntry& operator*() const { return *fEntry; }
- explicit operator bool() const { return fEntry; }
- void operator=(OnFlushEntryRef&& ref) { fEntry = std::exchange(ref.fEntry, nullptr); }
-
- private:
- OnFlushEntryRef(GrCCPathCacheEntry* entry) : fEntry(entry) {}
- GrCCPathCacheEntry* fEntry = nullptr;
- };
-
- // Finds an entry in the cache that matches the given shape and transformation matrix.
- // 'maskShift' is filled with an integer post-translate that the caller must apply when drawing
- // the entry's mask to the device.
- //
- // NOTE: Shapes are only given one entry, so any time they are accessed with a new
- // transformation, the old entry gets evicted.
- OnFlushEntryRef find(GrOnFlushResourceProvider*, const GrStyledShape&,
- const SkIRect& clippedDrawBounds, const SkMatrix& viewMatrix,
- SkIVector* maskShift);
-
- void doPreFlushProcessing();
-
- void purgeEntriesOlderThan(GrProxyProvider*, const GrStdSteadyClock::time_point& purgeTime);
-
- // As we evict entries from our local path cache, we accumulate a list of invalidated atlas
- // textures. This call purges the invalidated atlas textures from the mainline GrResourceCache.
- // This call is available with two different "provider" objects, to accommodate whatever might
- // be available at the callsite.
- void purgeInvalidatedAtlasTextures(GrOnFlushResourceProvider*);
- void purgeInvalidatedAtlasTextures(GrProxyProvider*);
-
-private:
- // This is a special ref ptr for GrCCPathCacheEntry, used by the hash table. It provides static
- // methods for SkTHash, and can only be moved. This guarantees the hash table holds exactly one
- // reference for each entry. Also, when a HashNode goes out of scope, that means it is exiting
- // the hash table. We take that opportunity to remove it from the LRU list and do some cleanup.
- class HashNode : SkNoncopyable {
- public:
- static const Key& GetKey(const HashNode&);
- inline static uint32_t Hash(const Key& key) {
- return GrResourceKeyHash(key.data(), key.dataSizeInBytes());
- }
-
- HashNode() = default;
- HashNode(GrCCPathCache*, sk_sp<Key>, const MaskTransform&, const GrStyledShape&);
- HashNode(HashNode&& node)
- : fPathCache(node.fPathCache), fEntry(std::move(node.fEntry)) {
- SkASSERT(!node.fEntry);
- }
-
- ~HashNode();
-
- void operator=(HashNode&& node);
-
- GrCCPathCacheEntry* entry() const { return fEntry.get(); }
-
- private:
- GrCCPathCache* fPathCache = nullptr;
- sk_sp<GrCCPathCacheEntry> fEntry;
- };
-
- GrStdSteadyClock::time_point quickPerFlushTimestamp() {
- // time_point::min() means it's time to update fPerFlushTimestamp with a newer clock read.
- if (GrStdSteadyClock::time_point::min() == fPerFlushTimestamp) {
- fPerFlushTimestamp = GrStdSteadyClock::now();
- }
- return fPerFlushTimestamp;
- }
-
- void evict(const GrCCPathCache::Key&, GrCCPathCacheEntry* = nullptr);
-
- // Evicts all the cache entries whose keys have been queued up in fInvalidatedKeysInbox via
- // SkPath listeners.
- void evictInvalidatedCacheKeys();
-
- const uint32_t fContextUniqueID;
-
- SkTHashTable<HashNode, const Key&> fHashTable;
- SkTInternalLList<GrCCPathCacheEntry> fLRU;
- SkMessageBus<sk_sp<Key>, uint32_t>::Inbox fInvalidatedKeysInbox;
- sk_sp<Key> fScratchKey; // Reused for creating a temporary key in the find() method.
-
- // We only read the clock once per flush, and cache it in this variable. This prevents us from
- // excessive clock reads for cache timestamps that might degrade performance.
- GrStdSteadyClock::time_point fPerFlushTimestamp = GrStdSteadyClock::time_point::min();
-
- // As we evict entries from our local path cache, we accumulate lists of invalidated atlas
- // textures in these two members. We hold these until we purge them from the GrResourceCache
- // (e.g. via purgeInvalidatedAtlasTextures().)
- SkSTArray<4, sk_sp<GrTextureProxy>> fInvalidatedProxies;
- SkSTArray<4, GrUniqueKey> fInvalidatedProxyUniqueKeys;
-
- friend class GrCCCachedAtlas; // To append to fInvalidatedProxies, fInvalidatedProxyUniqueKeys.
-
-public:
- const SkTHashTable<HashNode, const Key&>& testingOnly_getHashTable() const;
- const SkTInternalLList<GrCCPathCacheEntry>& testingOnly_getLRU() const;
-};
-
-/**
- * This class stores all the data necessary to draw a specific path + matrix combination from their
- * corresponding cached atlas.
- */
-class GrCCPathCacheEntry : public GrNonAtomicRef<GrCCPathCacheEntry> {
-public:
- SK_DECLARE_INTERNAL_LLIST_INTERFACE(GrCCPathCacheEntry);
-
- ~GrCCPathCacheEntry() {
- SkASSERT(this->hasBeenEvicted()); // Should have called GrCCPathCache::evict().
- SkASSERT(!fCachedAtlas);
- SkASSERT(0 == fOnFlushRefCnt);
- }
-
- const GrCCPathCache::Key& cacheKey() const { SkASSERT(fCacheKey); return *fCacheKey; }
-
- // The number of flushes during which this specific entry (path + matrix combination) has been
- // pulled from the path cache. If a path is pulled from the cache more than once in a single
- // flush, the hit count is only incremented once.
- //
- // If the entry did not previously exist, its hit count will be 1.
- int hitCount() const { return fHitCount; }
-
- // The accumulative region of the path that has been drawn during the lifetime of this cache
- // entry (as defined by the 'clippedDrawBounds' parameter for GrCCPathCache::find).
- const SkIRect& hitRect() const { return fHitRect; }
-
- const GrCCCachedAtlas* cachedAtlas() const { return fCachedAtlas.get(); }
-
- const SkIRect& devIBounds() const { return fDevIBounds; }
- int width() const { return fDevIBounds.width(); }
- int height() const { return fDevIBounds.height(); }
-
- enum class ReleaseAtlasResult : bool {
- kNone,
- kDidInvalidateFromCache
- };
-
- // Called once our path has been rendered into the mainline CCPR (fp16, coverage count) atlas.
- // The caller will stash this atlas texture away after drawing, and during the next flush,
- // recover it and attempt to copy any paths that got reused into permanent 8-bit atlases.
- void setCoverageCountAtlas(
- GrOnFlushResourceProvider*, GrCCAtlas*, const SkIVector& atlasOffset,
- const GrOctoBounds& octoBounds, const SkIRect& devIBounds, const SkIVector& maskShift);
-
- // Called once our path mask has been copied into a permanent, 8-bit atlas. This method points
- // the entry at the new atlas and updates the GrCCCCachedAtlas data.
- ReleaseAtlasResult upgradeToLiteralCoverageAtlas(GrCCPathCache*, GrOnFlushResourceProvider*,
- GrCCAtlas*, const SkIVector& newAtlasOffset);
-
-private:
- using MaskTransform = GrCCPathCache::MaskTransform;
-
- GrCCPathCacheEntry(sk_sp<GrCCPathCache::Key> cacheKey, const MaskTransform& maskTransform)
- : fCacheKey(std::move(cacheKey)), fMaskTransform(maskTransform) {
- }
-
- bool hasBeenEvicted() const { return fCacheKey->shouldDeregister(); }
-
- // Resets this entry back to not having an atlas, and purges its previous atlas texture from the
- // resource cache if needed.
- ReleaseAtlasResult releaseCachedAtlas(GrCCPathCache*);
-
- sk_sp<GrCCPathCache::Key> fCacheKey;
- GrStdSteadyClock::time_point fTimestamp;
- int fHitCount = 0;
- SkIRect fHitRect = SkIRect::MakeEmpty();
-
- sk_sp<GrCCCachedAtlas> fCachedAtlas;
- SkIVector fAtlasOffset;
-
- MaskTransform fMaskTransform;
- GrOctoBounds fOctoBounds;
- SkIRect fDevIBounds;
-
- int fOnFlushRefCnt = 0;
-
- friend class GrCCPathCache;
- friend void GrCCPathProcessor::Instance::set(const GrCCPathCacheEntry&, const SkIVector&,
- const SkPMColor4f&, GrFillRule);
-
-public:
- int testingOnly_peekOnFlushRefCnt() const;
-};
-
-/**
- * Encapsulates the data for an atlas whose texture is stored in the mainline GrResourceCache. Many
- * instances of GrCCPathCacheEntry will reference the same GrCCCachedAtlas.
- *
- * We use this object to track the percentage of the original atlas pixels that could still ever
- * potentially be reused (i.e., those which still represent an extant path). When the percentage
- * of useful pixels drops below 50%, we purge the entire texture from the resource cache.
- *
- * This object also holds a ref on the atlas's actual texture proxy during flush. When
- * fOnFlushRefCnt decrements back down to zero, we release fOnFlushProxy and reset it back to null.
- */
-class GrCCCachedAtlas : public GrNonAtomicRef<GrCCCachedAtlas> {
-public:
- using ReleaseAtlasResult = GrCCPathCacheEntry::ReleaseAtlasResult;
-
- GrCCCachedAtlas(GrCCAtlas::CoverageType type, const GrUniqueKey& textureKey,
- sk_sp<GrTextureProxy> onFlushProxy)
- : fCoverageType(type)
- , fTextureKey(textureKey)
- , fOnFlushProxy(std::move(onFlushProxy)) {}
-
- ~GrCCCachedAtlas() {
- SkASSERT(!fOnFlushProxy);
- SkASSERT(!fOnFlushRefCnt);
- }
-
- GrCCAtlas::CoverageType coverageType() const { return fCoverageType; }
- const GrUniqueKey& textureKey() const { return fTextureKey; }
-
- GrTextureProxy* getOnFlushProxy() const { return fOnFlushProxy.get(); }
-
- void setOnFlushProxy(sk_sp<GrTextureProxy> proxy) {
- SkASSERT(!fOnFlushProxy);
- fOnFlushProxy = std::move(proxy);
- }
-
- void addPathPixels(int numPixels) { fNumPathPixels += numPixels; }
- ReleaseAtlasResult invalidatePathPixels(GrCCPathCache*, int numPixels);
-
- int peekOnFlushRefCnt() const { return fOnFlushRefCnt; }
- void incrOnFlushRefCnt(int count = 1) const {
- SkASSERT(count > 0);
- SkASSERT(fOnFlushProxy);
- fOnFlushRefCnt += count;
- }
- void decrOnFlushRefCnt(int count = 1) const;
-
-private:
- const GrCCAtlas::CoverageType fCoverageType;
- const GrUniqueKey fTextureKey;
-
- int fNumPathPixels = 0;
- int fNumInvalidatedPathPixels = 0;
- bool fIsInvalidatedFromResourceCache = false;
-
- mutable sk_sp<GrTextureProxy> fOnFlushProxy;
- mutable int fOnFlushRefCnt = 0;
-
-public:
- int testingOnly_peekOnFlushRefCnt() const;
-};
-
-
-inline GrCCPathCache::HashNode::HashNode(GrCCPathCache* pathCache, sk_sp<Key> key,
- const MaskTransform& m, const GrStyledShape& shape)
- : fPathCache(pathCache)
- , fEntry(new GrCCPathCacheEntry(key, m)) {
- SkASSERT(shape.hasUnstyledKey());
- shape.addGenIDChangeListener(std::move(key));
-}
-
-inline const GrCCPathCache::Key& GrCCPathCache::HashNode::GetKey(
- const GrCCPathCache::HashNode& node) {
- return *node.entry()->fCacheKey;
-}
-
-inline GrCCPathCache::HashNode::~HashNode() {
- SkASSERT(!fEntry || fEntry->hasBeenEvicted()); // Should have called GrCCPathCache::evict().
-}
-
-inline void GrCCPathCache::HashNode::operator=(HashNode&& node) {
- SkASSERT(!fEntry || fEntry->hasBeenEvicted()); // Should have called GrCCPathCache::evict().
- fEntry = std::exchange(node.fEntry, nullptr);
-}
-
-inline void GrCCPathProcessor::Instance::set(
- const GrCCPathCacheEntry& entry, const SkIVector& shift, const SkPMColor4f& color,
- GrFillRule fillRule) {
- float dx = (float)shift.fX, dy = (float)shift.fY;
- this->set(entry.fOctoBounds.makeOffset(dx, dy), entry.fAtlasOffset - shift, color, fillRule);
-}
-
-#endif
diff --git a/src/gpu/ccpr/GrCCPathProcessor.cpp b/src/gpu/ccpr/GrCCPathProcessor.cpp
deleted file mode 100644
index 3d5d953..0000000
--- a/src/gpu/ccpr/GrCCPathProcessor.cpp
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
- * Copyright 2017 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#include "src/gpu/ccpr/GrCCPathProcessor.h"
-
-#include "src/gpu/GrOnFlushResourceProvider.h"
-#include "src/gpu/GrOpsRenderPass.h"
-#include "src/gpu/GrTexture.h"
-#include "src/gpu/ccpr/GrCCPerFlushResources.h"
-#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
-#include "src/gpu/glsl/GrGLSLGeometryProcessor.h"
-#include "src/gpu/glsl/GrGLSLProgramBuilder.h"
-#include "src/gpu/glsl/GrGLSLVarying.h"
-
-// Paths are drawn as octagons. Each point on the octagon is the intersection of two lines: one edge
-// from the path's bounding box and one edge from its 45-degree bounding box. The selectors
-// below indicate one corner from the bounding box, paired with a corner from the 45-degree bounding
-// box. The octagon vertex is the point that lies between these two corners, found by intersecting
-// their edges.
-static constexpr float kOctoEdgeNorms[8*4] = {
- // bbox // bbox45
- 0,0, 0,0,
- 0,0, 1,0,
- 1,0, 1,0,
- 1,0, 1,1,
- 1,1, 1,1,
- 1,1, 0,1,
- 0,1, 0,1,
- 0,1, 0,0,
-};
-
-GR_DECLARE_STATIC_UNIQUE_KEY(gVertexBufferKey);
-
-sk_sp<const GrGpuBuffer> GrCCPathProcessor::FindVertexBuffer(GrOnFlushResourceProvider* onFlushRP) {
- GR_DEFINE_STATIC_UNIQUE_KEY(gVertexBufferKey);
- return onFlushRP->findOrMakeStaticBuffer(GrGpuBufferType::kVertex, sizeof(kOctoEdgeNorms),
- kOctoEdgeNorms, gVertexBufferKey);
-}
-
-static constexpr uint16_t kRestartStrip = 0xffff;
-
-static constexpr uint16_t kOctoIndicesAsStrips[] = {
- 3, 4, 2, 0, 1, kRestartStrip, // First half.
- 7, 0, 6, 4, 5 // Second half.
-};
-
-static constexpr uint16_t kOctoIndicesAsTris[] = {
- // First half.
- 3, 4, 2,
- 4, 0, 2,
- 2, 0, 1,
-
- // Second half.
- 7, 0, 6,
- 0, 4, 6,
- 6, 4, 5,
-};
-
-GR_DECLARE_STATIC_UNIQUE_KEY(gIndexBufferKey);
-
-constexpr GrPrimitiveProcessor::Attribute GrCCPathProcessor::kInstanceAttribs[];
-constexpr GrPrimitiveProcessor::Attribute GrCCPathProcessor::kCornersAttrib;
-
-sk_sp<const GrGpuBuffer> GrCCPathProcessor::FindIndexBuffer(GrOnFlushResourceProvider* onFlushRP) {
- GR_DEFINE_STATIC_UNIQUE_KEY(gIndexBufferKey);
- if (onFlushRP->caps()->usePrimitiveRestart()) {
- return onFlushRP->findOrMakeStaticBuffer(GrGpuBufferType::kIndex,
- sizeof(kOctoIndicesAsStrips), kOctoIndicesAsStrips,
- gIndexBufferKey);
- } else {
- return onFlushRP->findOrMakeStaticBuffer(GrGpuBufferType::kIndex,
- sizeof(kOctoIndicesAsTris), kOctoIndicesAsTris,
- gIndexBufferKey);
- }
-}
-
-GrCCPathProcessor::GrCCPathProcessor(const GrTexture* atlasTexture, const GrSwizzle& swizzle,
- GrSurfaceOrigin atlasOrigin,
- const SkMatrix& viewMatrixIfUsingLocalCoords)
- : INHERITED(kGrCCPathProcessor_ClassID)
- , fAtlasAccess(GrSamplerState::Filter::kNearest, atlasTexture->backendFormat(), swizzle)
- , fAtlasDimensions(atlasTexture->dimensions())
- , fAtlasOrigin(atlasOrigin) {
- // TODO: Can we just assert that atlas has GrCCAtlas::kTextureOrigin and remove fAtlasOrigin?
- this->setInstanceAttributes(kInstanceAttribs, SK_ARRAY_COUNT(kInstanceAttribs));
- SkASSERT(this->instanceStride() == sizeof(Instance));
-
- this->setVertexAttributes(&kCornersAttrib, 1);
- this->setTextureSamplerCnt(1);
-
- if (!viewMatrixIfUsingLocalCoords.invert(&fLocalMatrix)) {
- fLocalMatrix.setIdentity();
- }
-}
-
-class GrCCPathProcessor::Impl : public GrGLSLGeometryProcessor {
-public:
- void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override;
-
- static void GenKey(const GrCCPathProcessor& cc, GrProcessorKeyBuilder* b) {
- b->add32(AddMatrixKeys(0, SkMatrix::I(), cc.fLocalMatrix));
- }
-
-private:
- void setData(const GrGLSLProgramDataManager& pdman,
- const GrPrimitiveProcessor& primProc) override {
- const auto& proc = primProc.cast<GrCCPathProcessor>();
- pdman.set2f(fAtlasAdjustUniform,
- 1.0f / proc.fAtlasDimensions.fWidth,
- 1.0f / proc.fAtlasDimensions.fHeight);
- this->setTransform(pdman, fLocalMatrixUni, proc.fLocalMatrix, &fLocalMatrix);
- }
-
- GrGLSLUniformHandler::UniformHandle fAtlasAdjustUniform;
- GrGLSLUniformHandler::UniformHandle fLocalMatrixUni;
- SkMatrix fLocalMatrix = SkMatrix::InvalidMatrix();
-
- using INHERITED = GrGLSLGeometryProcessor;
-};
-
-void GrCCPathProcessor::getGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder* b) const {
- GrCCPathProcessor::Impl::GenKey(*this, b);
-}
-
-GrGLSLPrimitiveProcessor* GrCCPathProcessor::createGLSLInstance(const GrShaderCaps&) const {
- return new Impl();
-}
-
-void GrCCPathProcessor::drawPaths(GrOpFlushState* flushState, const GrPipeline& pipeline,
- const GrSurfaceProxy& atlasProxy,
- const GrCCPerFlushResources& resources, int baseInstance,
- int endInstance, const SkRect& bounds) const {
- const GrCaps& caps = flushState->caps();
- GrPrimitiveType primitiveType = caps.usePrimitiveRestart()
- ? GrPrimitiveType::kTriangleStrip
- : GrPrimitiveType::kTriangles;
- int numIndicesPerInstance = caps.usePrimitiveRestart()
- ? SK_ARRAY_COUNT(kOctoIndicesAsStrips)
- : SK_ARRAY_COUNT(kOctoIndicesAsTris);
- auto enablePrimitiveRestart = GrPrimitiveRestart(flushState->caps().usePrimitiveRestart());
-
- GrProgramInfo programInfo(flushState->writeView(), &pipeline, &GrUserStencilSettings::kUnused,
- this, primitiveType, 0, flushState->renderPassBarriers(),
- flushState->colorLoadOp());
-
- flushState->bindPipelineAndScissorClip(programInfo, bounds);
- flushState->bindTextures(*this, atlasProxy, pipeline);
- flushState->bindBuffers(resources.indexBuffer(), resources.instanceBuffer(),
- resources.vertexBuffer(), enablePrimitiveRestart);
- flushState->drawIndexedInstanced(numIndicesPerInstance, 0, endInstance - baseInstance,
- baseInstance, 0);
-}
-
-void GrCCPathProcessor::Impl::onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) {
- using Interpolation = GrGLSLVaryingHandler::Interpolation;
-
- const GrCCPathProcessor& proc = args.fGP.cast<GrCCPathProcessor>();
- GrGLSLUniformHandler* uniHandler = args.fUniformHandler;
- GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler;
- GrGLSLVertexBuilder* v = args.fVertBuilder;
- GrGLSLFPFragmentBuilder* f = args.fFragBuilder;
-
- const char* atlasAdjust;
- fAtlasAdjustUniform = uniHandler->addUniform(
- nullptr, kVertex_GrShaderFlag, kFloat2_GrSLType, "atlas_adjust", &atlasAdjust);
-
- varyingHandler->emitAttributes(proc);
-
- GrGLSLVarying texcoord(kFloat2_GrSLType);
- varyingHandler->addVarying("texcoord", &texcoord);
-
- GrGLSLVarying color(kHalf4_GrSLType);
- f->codeAppendf("half4 %s;", args.fOutputColor);
- varyingHandler->addPassThroughAttribute(
- kInstanceAttribs[kColorAttribIdx], args.fOutputColor, Interpolation::kCanBeFlat);
-
- // The vertex shader bloats and intersects the devBounds and devBounds45 rectangles, in order to
- // find an octagon that circumscribes the (bloated) path.
- // Are we clockwise? (Positive wind => nonzero fill rule.)
- // Or counter-clockwise? (negative wind => even/odd fill rule.)
- v->codeAppendf("float wind = sign(devbounds.z - devbounds.x);");
-
- // Find our reference corner from the device-space bounding box.
- v->codeAppendf("float2 refpt = mix(devbounds.xy, devbounds.zw, corners.xy);");
-
- // Find our reference corner from the 45-degree bounding box.
- v->codeAppendf("float2 refpt45 = mix(devbounds45.xy, devbounds45.zw, corners.zw);");
- // Transform back to device space.
- v->codeAppendf("refpt45 *= float2x2(+1, +1, -wind, +wind) * .5;");
-
- // Find the normals to each edge, then intersect them to find our octagon vertex.
- v->codeAppendf("float2x2 N = float2x2("
- "corners.z + corners.w - 1, corners.w - corners.z, "
- "corners.xy*2 - 1);");
- v->codeAppendf("N = float2x2(wind, 0, 0, 1) * N;");
- v->codeAppendf("float2 K = float2(dot(N[0], refpt), dot(N[1], refpt45));");
- v->codeAppendf("float2 octocoord = K * inverse(N);");
-
- // Round the octagon out to ensure we rasterize every pixel the path might touch. (Positive
- // bloatdir means we should take the "ceil" and negative means to take the "floor".)
- //
- // NOTE: If we were just drawing a rect, ceil/floor would be enough. But since there are also
- // diagonals in the octagon that cross through pixel centers, we need to outset by another
- // quarter px to ensure those pixels get rasterized.
- v->codeAppendf("float2 bloatdir = (0 != N[0].x) "
- "? float2(N[0].x, N[1].y)"
- ": float2(N[1].x, N[0].y);");
- v->codeAppendf("octocoord = (ceil(octocoord * bloatdir - 1e-4) + 0.25) * bloatdir;");
- v->codeAppendf("float2 atlascoord = octocoord + float2(dev_to_atlas_offset);");
-
- // Convert to atlas coordinates in order to do our texture lookup.
- if (kTopLeft_GrSurfaceOrigin == proc.fAtlasOrigin) {
- v->codeAppendf("%s.xy = atlascoord * %s;", texcoord.vsOut(), atlasAdjust);
- } else {
- SkASSERT(kBottomLeft_GrSurfaceOrigin == proc.fAtlasOrigin);
- v->codeAppendf("%s.xy = float2(atlascoord.x * %s.x, 1 - atlascoord.y * %s.y);",
- texcoord.vsOut(), atlasAdjust, atlasAdjust);
- }
-
- gpArgs->fPositionVar.set(kFloat2_GrSLType, "octocoord");
- this->writeLocalCoord(v, args.fUniformHandler, gpArgs, gpArgs->fPositionVar, proc.fLocalMatrix,
- &fLocalMatrixUni);
-
- // Fragment shader. Look up coverage in the atlas.
- f->codeAppendf("half coverage = ");
- f->appendTextureLookup(args.fTexSamplers[0], SkStringPrintf("%s.xy", texcoord.fsIn()).c_str());
- f->codeAppendf(".a;");
-
- f->codeAppendf("half4 %s = half4(coverage);", args.fOutputCoverage);
-}
diff --git a/src/gpu/ccpr/GrCCPathProcessor.h b/src/gpu/ccpr/GrCCPathProcessor.h
deleted file mode 100644
index df6f6ea..0000000
--- a/src/gpu/ccpr/GrCCPathProcessor.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright 2017 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef GrCCPathProcessor_DEFINED
-#define GrCCPathProcessor_DEFINED
-
-#include <array>
-#include "include/core/SkPath.h"
-#include "src/gpu/GrCaps.h"
-#include "src/gpu/GrGeometryProcessor.h"
-#include "src/gpu/GrPipeline.h"
-#include "src/gpu/ccpr/GrOctoBounds.h"
-
-class GrCCPathCacheEntry;
-class GrCCPerFlushResources;
-class GrOnFlushResourceProvider;
-class GrOpFlushState;
-
-/**
- * This class draws AA paths using the coverage count masks produced by GrCCCoverageProcessor.
- *
- * Paths are drawn as bloated octagons, and coverage is derived from the coverage count mask and
- * fill rule.
- *
- * To draw paths, the caller must set up an instance buffer as detailed below, then call drawPaths()
- * providing its own instance buffer alongside the buffers found by calling FindIndexBuffer/
- * FindVertexBuffer.
- */
-class GrCCPathProcessor : public GrGeometryProcessor {
-public:
- struct Instance {
- SkRect fDevBounds; // "right < left" indicates even-odd fill type.
- SkRect fDevBounds45; // Bounding box in "| 1 -1 | * devCoords" space. See GrOctoBounds.
- // | 1 1 |
- SkIVector fDevToAtlasOffset; // Translation from device space to location in atlas.
- SkPMColor4f fColor; // Color always stored as 4 x fp32
-
- void set(const GrOctoBounds&, const SkIVector& devToAtlasOffset, const SkPMColor4f&,
- GrFillRule);
- void set(const GrCCPathCacheEntry&, const SkIVector& shift, const SkPMColor4f&, GrFillRule);
- };
-
- static_assert(4 * 14 == sizeof(Instance));
-
- static sk_sp<const GrGpuBuffer> FindVertexBuffer(GrOnFlushResourceProvider*);
- static sk_sp<const GrGpuBuffer> FindIndexBuffer(GrOnFlushResourceProvider*);
-
- GrCCPathProcessor(const GrTexture* atlasTexture, const GrSwizzle&, GrSurfaceOrigin atlasOrigin,
- const SkMatrix& viewMatrixIfUsingLocalCoords = SkMatrix::I());
-
- const char* name() const override { return "GrCCPathProcessor"; }
- void getGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder* b) const override;
-
- GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override;
-
- void drawPaths(GrOpFlushState*, const GrPipeline&, const GrSurfaceProxy& atlasProxy,
- const GrCCPerFlushResources&, int baseInstance, int endInstance,
- const SkRect& bounds) const;
-
-private:
- const TextureSampler& onTextureSampler(int) const override { return fAtlasAccess; }
-
- const TextureSampler fAtlasAccess;
- SkISize fAtlasDimensions;
- GrSurfaceOrigin fAtlasOrigin;
-
- SkMatrix fLocalMatrix;
- static constexpr Attribute kInstanceAttribs[] = {
- {"devbounds", kFloat4_GrVertexAttribType, kFloat4_GrSLType},
- {"devbounds45", kFloat4_GrVertexAttribType, kFloat4_GrSLType},
- {"dev_to_atlas_offset", kInt2_GrVertexAttribType, kInt2_GrSLType},
- {"color", kFloat4_GrVertexAttribType, kHalf4_GrSLType}
- };
- static constexpr int kColorAttribIdx = 3;
- static constexpr Attribute kCornersAttrib =
- {"corners", kFloat4_GrVertexAttribType, kFloat4_GrSLType};
-
- class Impl;
-
- using INHERITED = GrGeometryProcessor;
-};
-
-inline void GrCCPathProcessor::Instance::set(
- const GrOctoBounds& octoBounds, const SkIVector& devToAtlasOffset, const SkPMColor4f& color,
- GrFillRule fillRule) {
- if (GrFillRule::kNonzero == fillRule) {
- // We cover "nonzero" paths with clockwise triangles, which is the default result from
- // normal octo bounds.
- fDevBounds = octoBounds.bounds();
- fDevBounds45 = octoBounds.bounds45();
- } else {
- // We cover "even/odd" paths with counterclockwise triangles. Here we reorder the bounding
- // box vertices so the output is flipped horizontally.
- fDevBounds.setLTRB(
- octoBounds.right(), octoBounds.top(), octoBounds.left(), octoBounds.bottom());
- fDevBounds45.setLTRB(
- octoBounds.bottom45(), octoBounds.right45(), octoBounds.top45(),
- octoBounds.left45());
- }
- fDevToAtlasOffset = devToAtlasOffset;
- fColor = color;
-}
-
-#endif
diff --git a/src/gpu/ccpr/GrCCPerFlushResources.cpp b/src/gpu/ccpr/GrCCPerFlushResources.cpp
index 5ffb194..a48c3fc 100644
--- a/src/gpu/ccpr/GrCCPerFlushResources.cpp
+++ b/src/gpu/ccpr/GrCCPerFlushResources.cpp
@@ -12,15 +12,12 @@
#include "src/gpu/GrOnFlushResourceProvider.h"
#include "src/gpu/GrRecordingContextPriv.h"
#include "src/gpu/GrSurfaceDrawContext.h"
-#include "src/gpu/ccpr/GrCCPathCache.h"
#include "src/gpu/ccpr/GrSampleMaskProcessor.h"
#include "src/gpu/geometry/GrStyledShape.h"
#include <algorithm>
-using CoverageType = GrCCAtlas::CoverageType;
using FillBatchID = GrCCFiller::BatchID;
-using PathInstance = GrCCPathProcessor::Instance;
namespace {
@@ -59,58 +56,6 @@
void onPrepare(GrOpFlushState*) final {}
};
-// Copies paths from a cached coverage count or msaa atlas into an 8-bit literal-coverage atlas.
-class CopyAtlasOp : public AtlasOp {
-public:
- DEFINE_OP_CLASS_ID
-
- static GrOp::Owner Make(
- GrRecordingContext* context, sk_sp<const GrCCPerFlushResources> resources,
- sk_sp<GrTextureProxy> copyProxy, int baseInstance, int endInstance,
- const SkISize& drawBounds) {
- return GrOp::Make<CopyAtlasOp>(
- context, std::move(resources), std::move(copyProxy), baseInstance,
- endInstance, drawBounds);
- }
-
- const char* name() const override { return "CopyAtlasOp (CCPR)"; }
-
- void visitProxies(const VisitProxyFunc& fn) const override {
- fn(fSrcProxy.get(), GrMipmapped::kNo);
- }
-
- void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
- SkASSERT(fSrcProxy);
- SkASSERT(fSrcProxy->isInstantiated());
-
- GrColorType ct = GrCCAtlas::CoverageTypeToColorType(fResources->renderedPathCoverageType());
- GrSwizzle swizzle = flushState->caps().getReadSwizzle(fSrcProxy->backendFormat(), ct);
- GrCCPathProcessor pathProc(fSrcProxy->peekTexture(), swizzle, GrCCAtlas::kTextureOrigin);
-
- bool hasScissor = flushState->appliedClip() &&
- flushState->appliedClip()->scissorState().enabled();
- GrPipeline pipeline(hasScissor ? GrScissorTest::kEnabled : GrScissorTest::kDisabled,
- SkBlendMode::kSrc, flushState->drawOpArgs().writeView().swizzle());
-
- pathProc.drawPaths(flushState, pipeline, *fSrcProxy, *fResources, fBaseInstance,
- fEndInstance, this->bounds());
- }
-
-private:
- friend class ::GrOp; // for ctor
-
- CopyAtlasOp(sk_sp<const GrCCPerFlushResources> resources, sk_sp<GrTextureProxy> srcProxy,
- int baseInstance, int endInstance, const SkISize& drawBounds)
- : AtlasOp(ClassID(), std::move(resources), drawBounds)
- , fSrcProxy(srcProxy)
- , fBaseInstance(baseInstance)
- , fEndInstance(endInstance) {
- }
- sk_sp<GrTextureProxy> fSrcProxy;
- const int fBaseInstance;
- const int fEndInstance;
-};
-
// Renders coverage counts to a CCPR atlas using the resources' pre-filled GrCCPathParser.
template<typename ProcessorType> class RenderAtlasOp : public AtlasOp {
public:
@@ -149,252 +94,23 @@
} // namespace
-static int inst_buffer_count(const GrCCPerFlushResourceSpecs& specs) {
- return specs.fNumCachedPaths +
- // Copies get two instances per draw: 1 copy + 1 draw.
- (specs.fNumCopiedPaths) * 2 +
- specs.fNumRenderedPaths;
- // No clips in instance buffers.
-}
-
-GrCCPerFlushResources::GrCCPerFlushResources(
- GrOnFlushResourceProvider* onFlushRP, CoverageType coverageType,
- const GrCCPerFlushResourceSpecs& specs)
- // Overallocate by one point so we can call Sk4f::Store at the final SkPoint in the array.
- // (See transform_path_pts below.)
- // FIXME: instead use built-in instructions to write only the first two lanes of an Sk4f.
- : fLocalDevPtsBuffer(specs.fRenderedPathStats.fMaxPointsPerPath + 1)
- , fFiller(GrCCFiller::Algorithm::kStencilWindingCount,
- specs.fNumRenderedPaths + specs.fNumClipPaths,
+GrCCPerFlushResources::GrCCPerFlushResources(GrOnFlushResourceProvider* onFlushRP,
+ const GrCCPerFlushResourceSpecs& specs)
+ : fFiller(GrCCFiller::Algorithm::kStencilWindingCount,
+ specs.fNumClipPaths,
specs.fRenderedPathStats.fNumTotalSkPoints,
specs.fRenderedPathStats.fNumTotalSkVerbs,
specs.fRenderedPathStats.fNumTotalConicWeights)
- , fCopyAtlasStack(CoverageType::kA8_LiteralCoverage, specs.fCopyAtlasSpecs,
- onFlushRP->caps())
- , fRenderedAtlasStack(coverageType, specs.fRenderedAtlasSpecs, onFlushRP->caps())
- , fIndexBuffer(GrCCPathProcessor::FindIndexBuffer(onFlushRP))
- , fVertexBuffer(GrCCPathProcessor::FindVertexBuffer(onFlushRP))
- , fNextCopyInstanceIdx(0)
- , fNextPathInstanceIdx(specs.fNumCopiedPaths) {
- if (!fIndexBuffer) {
- SkDebugf("WARNING: failed to allocate CCPR index buffer. No paths will be drawn.\n");
+ , fRenderedAtlasStack(specs.fRenderedAtlasSpecs, onFlushRP->caps()) {
+ int numRenderedPaths = specs.fNumClipPaths;
+ fStencilResolveBuffer.resetAndMapBuffer(
+ onFlushRP, numRenderedPaths * sizeof(GrStencilAtlasOp::ResolveRectInstance));
+ if (!fStencilResolveBuffer.hasGpuBuffer()) {
+ SkDebugf("WARNING: failed to allocate CCPR stencil resolve buffer. "
+ "No paths will be drawn.\n");
return;
}
- if (!fVertexBuffer) {
- SkDebugf("WARNING: failed to allocate CCPR vertex buffer. No paths will be drawn.\n");
- return;
- }
- fPathInstanceBuffer.resetAndMapBuffer(onFlushRP,
- inst_buffer_count(specs) * sizeof(PathInstance));
- if (!fPathInstanceBuffer.hasGpuBuffer()) {
- SkDebugf("WARNING: failed to allocate CCPR instance buffer. No paths will be drawn.\n");
- return;
- }
-
- if (CoverageType::kA8_Multisample == coverageType) {
- int numRenderedPaths = specs.fNumRenderedPaths + specs.fNumClipPaths;
- fStencilResolveBuffer.resetAndMapBuffer(
- onFlushRP, numRenderedPaths * sizeof(GrStencilAtlasOp::ResolveRectInstance));
- if (!fStencilResolveBuffer.hasGpuBuffer()) {
- SkDebugf("WARNING: failed to allocate CCPR stencil resolve buffer. "
- "No paths will be drawn.\n");
- return;
- }
- SkDEBUGCODE(fEndStencilResolveInstance = numRenderedPaths);
- }
-
- SkDEBUGCODE(fEndCopyInstance = specs.fNumCopiedPaths);
- SkDEBUGCODE(fEndPathInstance = inst_buffer_count(specs));
-}
-
-void GrCCPerFlushResources::upgradeEntryToLiteralCoverageAtlas(
- GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP, GrCCPathCacheEntry* entry,
- GrFillRule fillRule) {
- using ReleaseAtlasResult = GrCCPathCacheEntry::ReleaseAtlasResult;
- SkASSERT(this->isMapped());
- SkASSERT(fNextCopyInstanceIdx < fEndCopyInstance);
-
- const GrCCCachedAtlas* cachedAtlas = entry->cachedAtlas();
- SkASSERT(cachedAtlas);
- SkASSERT(cachedAtlas->getOnFlushProxy());
-
- if (CoverageType::kA8_LiteralCoverage == cachedAtlas->coverageType()) {
- // This entry has already been upgraded to literal coverage. The path must have been drawn
- // multiple times during the flush.
- SkDEBUGCODE(--fEndCopyInstance);
- return;
- }
-
- SkIVector newAtlasOffset;
- if (GrCCAtlas* retiredAtlas = fCopyAtlasStack.addRect(entry->devIBounds(), &newAtlasOffset)) {
- // We did not fit in the previous copy atlas and it was retired. We will render the ranges
- // up until fCopyPathRanges.count() into the retired atlas during finalize().
- retiredAtlas->setFillBatchID(fCopyPathRanges.count());
- fCurrCopyAtlasRangesIdx = fCopyPathRanges.count();
- }
-
- this->recordCopyPathInstance(
- *entry, newAtlasOffset, fillRule, sk_ref_sp(cachedAtlas->getOnFlushProxy()));
-
- sk_sp<GrTexture> previousAtlasTexture =
- sk_ref_sp(cachedAtlas->getOnFlushProxy()->peekTexture());
- GrCCAtlas* newAtlas = &fCopyAtlasStack.current();
- if (ReleaseAtlasResult::kDidInvalidateFromCache ==
- entry->upgradeToLiteralCoverageAtlas(pathCache, onFlushRP, newAtlas, newAtlasOffset)) {
- // This texture just got booted out of the cache. Keep it around, in case we might be able
- // to recycle it for a new atlas. We can recycle it because copying happens before rendering
- // new paths, and every path from the atlas that we're planning to use this flush will be
- // copied to a new atlas. We'll never copy some and leave others.
- fRecyclableAtlasTextures.push_back(std::move(previousAtlasTexture));
- }
-}
-
-void GrCCPerFlushResources::recordCopyPathInstance(
- const GrCCPathCacheEntry& entry, const SkIVector& newAtlasOffset, GrFillRule fillRule,
- sk_sp<GrTextureProxy> srcProxy) {
- SkASSERT(fNextCopyInstanceIdx < fEndCopyInstance);
-
- // Write the instance at the back of the array.
- int currentInstanceIdx = fNextCopyInstanceIdx++;
- fPathInstanceBuffer[currentInstanceIdx].set(entry, newAtlasOffset, SK_PMColor4fWHITE, fillRule);
-
- // Percolate the instance forward until it's contiguous with other instances that share the same
- // proxy.
- for (int i = fCopyPathRanges.count() - 1; i >= fCurrCopyAtlasRangesIdx; --i) {
- if (fCopyPathRanges[i].fSrcProxy == srcProxy) {
- ++fCopyPathRanges[i].fCount;
- return;
- }
- int rangeFirstInstanceIdx = currentInstanceIdx - fCopyPathRanges[i].fCount;
- std::swap(fPathInstanceBuffer[rangeFirstInstanceIdx],
- fPathInstanceBuffer[currentInstanceIdx]);
- currentInstanceIdx = rangeFirstInstanceIdx;
- }
-
- // An instance with this particular proxy did not yet exist in the array. Add a range for it,
- // first moving any later ranges back to make space for it at fCurrCopyAtlasRangesIdx.
- fCopyPathRanges.push_back();
- std::move_backward(fCopyPathRanges.begin() + fCurrCopyAtlasRangesIdx,
- fCopyPathRanges.end() - 1,
- fCopyPathRanges.end());
- fCopyPathRanges[fCurrCopyAtlasRangesIdx] = {std::move(srcProxy), 1};
-}
-
-static bool transform_path_pts(
- const SkMatrix& m, const SkPath& path, const SkAutoSTArray<32, SkPoint>& outDevPts,
- GrOctoBounds* octoBounds) {
- const SkPoint* pts = SkPathPriv::PointData(path);
- int numPts = path.countPoints();
- SkASSERT(numPts + 1 <= outDevPts.count());
- SkASSERT(numPts);
-
- // m45 transforms path points into "45 degree" device space. A bounding box in this space gives
- // the circumscribing octagon's diagonals. We could use SK_ScalarRoot2Over2, but an orthonormal
- // transform is not necessary as long as the shader uses the correct inverse.
- SkMatrix m45;
- m45.setSinCos(1, 1);
- m45.preConcat(m);
-
- // X,Y,T are two parallel view matrices that accumulate two bounding boxes as they map points:
- // device-space bounds and "45 degree" device-space bounds (| 1 -1 | * devCoords).
- // | 1 1 |
- Sk4f X = Sk4f(m.getScaleX(), m.getSkewY(), m45.getScaleX(), m45.getSkewY());
- Sk4f Y = Sk4f(m.getSkewX(), m.getScaleY(), m45.getSkewX(), m45.getScaleY());
- Sk4f T = Sk4f(m.getTranslateX(), m.getTranslateY(), m45.getTranslateX(), m45.getTranslateY());
-
- // Map the path's points to device space and accumulate bounding boxes.
- Sk4f devPt = SkNx_fma(Y, Sk4f(pts[0].y()), T);
- devPt = SkNx_fma(X, Sk4f(pts[0].x()), devPt);
- Sk4f topLeft = devPt;
- Sk4f bottomRight = devPt;
-
- // Store all 4 values [dev.x, dev.y, dev45.x, dev45.y]. We are only interested in the first two,
- // and will overwrite [dev45.x, dev45.y] with the next point. This is why the dst buffer must
- // be at least one larger than the number of points.
- devPt.store(&outDevPts[0]);
-
- for (int i = 1; i < numPts; ++i) {
- devPt = SkNx_fma(Y, Sk4f(pts[i].y()), T);
- devPt = SkNx_fma(X, Sk4f(pts[i].x()), devPt);
- topLeft = Sk4f::Min(topLeft, devPt);
- bottomRight = Sk4f::Max(bottomRight, devPt);
- devPt.store(&outDevPts[i]);
- }
-
- if (!(Sk4f(0) == topLeft*0).allTrue() || !(Sk4f(0) == bottomRight*0).allTrue()) {
- // The bounds are infinite or NaN.
- return false;
- }
-
- SkPoint topLeftPts[2], bottomRightPts[2];
- topLeft.store(topLeftPts);
- bottomRight.store(bottomRightPts);
-
- const SkRect& devBounds = SkRect::MakeLTRB(
- topLeftPts[0].x(), topLeftPts[0].y(), bottomRightPts[0].x(), bottomRightPts[0].y());
- const SkRect& devBounds45 = SkRect::MakeLTRB(
- topLeftPts[1].x(), topLeftPts[1].y(), bottomRightPts[1].x(), bottomRightPts[1].y());
-
- octoBounds->set(devBounds, devBounds45);
- return true;
-}
-
-GrCCAtlas* GrCCPerFlushResources::renderShapeInAtlas(
- const SkIRect& clipIBounds, const SkMatrix& m, const GrStyledShape& shape,
- float strokeDevWidth, GrOctoBounds* octoBounds, SkIRect* devIBounds,
- SkIVector* devToAtlasOffset) {
- SkASSERT(this->isMapped());
- SkASSERT(fNextPathInstanceIdx < fEndPathInstance);
-
- SkPath path;
- shape.asPath(&path);
- if (path.isEmpty()) {
- SkDEBUGCODE(--fEndPathInstance);
- SkDEBUGCODE(--fEndStencilResolveInstance);
- return nullptr;
- }
- if (!transform_path_pts(m, path, fLocalDevPtsBuffer, octoBounds)) {
- // The transformed path had infinite or NaN bounds.
- SkDEBUGCODE(--fEndPathInstance);
- SkDEBUGCODE(--fEndStencilResolveInstance);
- return nullptr;
- }
-
- const SkStrokeRec& stroke = shape.style().strokeRec();
- if (!stroke.isFillStyle()) {
- float r = SkStrokeRec::GetInflationRadius(
- stroke.getJoin(), stroke.getMiter(), stroke.getCap(), strokeDevWidth);
- octoBounds->outset(r);
- }
-
- GrScissorTest enableScissorInAtlas;
- if (clipIBounds.contains(octoBounds->bounds())) {
- enableScissorInAtlas = GrScissorTest::kDisabled;
- } else if (octoBounds->clip(clipIBounds)) {
- enableScissorInAtlas = GrScissorTest::kEnabled;
- } else {
- // The clip and octo bounds do not intersect. Draw nothing.
- SkDEBUGCODE(--fEndPathInstance);
- SkDEBUGCODE(--fEndStencilResolveInstance);
- return nullptr;
- }
- octoBounds->roundOut(devIBounds);
- SkASSERT(clipIBounds.contains(*devIBounds));
-
- this->placeRenderedPathInAtlas(*devIBounds, enableScissorInAtlas, devToAtlasOffset);
-
- GrFillRule fillRule;
- SkASSERT(stroke.isFillStyle());
- SkASSERT(0 == strokeDevWidth);
- fFiller.parseDeviceSpaceFill(path, fLocalDevPtsBuffer.begin(), enableScissorInAtlas,
- *devIBounds, *devToAtlasOffset);
- fillRule = GrFillRuleForSkPath(path);
-
- if (GrCCAtlas::CoverageType::kA8_Multisample == this->renderedPathCoverageType()) {
- this->recordStencilResolveInstance(*devIBounds, *devToAtlasOffset, fillRule);
- }
-
- return &fRenderedAtlasStack.current();
+ SkDEBUGCODE(fEndStencilResolveInstance = numRenderedPaths);
}
const GrCCAtlas* GrCCPerFlushResources::renderDeviceSpacePathInAtlas(
@@ -426,9 +142,7 @@
// In MSAA mode we also record an internal draw instance that will be used to resolve stencil
// winding values to coverage when the atlas is generated.
- if (GrCCAtlas::CoverageType::kA8_Multisample == this->renderedPathCoverageType()) {
- this->recordStencilResolveInstance(clippedPathIBounds, *devToAtlasOffset, fillRule);
- }
+ this->recordStencilResolveInstance(clippedPathIBounds, *devToAtlasOffset, fillRule);
return &fRenderedAtlasStack.current();
}
@@ -447,7 +161,6 @@
void GrCCPerFlushResources::recordStencilResolveInstance(
const SkIRect& clippedPathIBounds, const SkIVector& devToAtlasOffset, GrFillRule fillRule) {
- SkASSERT(GrCCAtlas::CoverageType::kA8_Multisample == this->renderedPathCoverageType());
SkASSERT(fNextStencilResolveInstanceIdx < fEndStencilResolveInstance);
SkIRect atlasIBounds = clippedPathIBounds.makeOffset(devToAtlasOffset);
@@ -463,21 +176,12 @@
bool GrCCPerFlushResources::finalize(GrOnFlushResourceProvider* onFlushRP) {
SkASSERT(this->isMapped());
- SkASSERT(fNextPathInstanceIdx == fEndPathInstance);
- SkASSERT(fNextCopyInstanceIdx == fEndCopyInstance);
- SkASSERT(GrCCAtlas::CoverageType::kA8_Multisample != this->renderedPathCoverageType() ||
- fNextStencilResolveInstanceIdx == fEndStencilResolveInstance);
-
- fPathInstanceBuffer.unmapBuffer();
+ SkASSERT(fNextStencilResolveInstanceIdx == fEndStencilResolveInstance);
if (fStencilResolveBuffer.hasGpuBuffer()) {
fStencilResolveBuffer.unmapBuffer();
}
- if (!fCopyAtlasStack.empty()) {
- fCopyAtlasStack.current().setFillBatchID(fCopyPathRanges.count());
- fCurrCopyAtlasRangesIdx = fCopyPathRanges.count();
- }
if (!fRenderedAtlasStack.empty()) {
fRenderedAtlasStack.current().setFillBatchID(fFiller.closeCurrentBatch());
fRenderedAtlasStack.current().setEndStencilResolveInstance(fNextStencilResolveInstanceIdx);
@@ -489,47 +193,11 @@
return false;
}
- // Draw the copies from coverage count or msaa atlas(es) into 8-bit cached atlas(es).
- int copyRangeIdx = 0;
- int baseCopyInstance = 0;
- for (GrCCAtlas& atlas : fCopyAtlasStack.atlases()) {
- int endCopyRange = atlas.getFillBatchID();
- SkASSERT(endCopyRange > copyRangeIdx);
-
- auto rtc = atlas.instantiate(onFlushRP);
- for (; copyRangeIdx < endCopyRange; ++copyRangeIdx) {
- const CopyPathRange& copyRange = fCopyPathRanges[copyRangeIdx];
- int endCopyInstance = baseCopyInstance + copyRange.fCount;
- if (rtc) {
- auto op = CopyAtlasOp::Make(rtc->recordingContext(), sk_ref_sp(this),
- copyRange.fSrcProxy, baseCopyInstance, endCopyInstance,
- atlas.drawBounds());
- rtc->addDrawOp(nullptr, std::move(op));
- }
- baseCopyInstance = endCopyInstance;
- }
- }
- SkASSERT(fCopyPathRanges.count() == copyRangeIdx);
- SkASSERT(fNextCopyInstanceIdx == baseCopyInstance);
- SkASSERT(baseCopyInstance == fEndCopyInstance);
-
// Render the coverage count atlas(es).
int baseStencilResolveInstance = 0;
for (GrCCAtlas& atlas : fRenderedAtlasStack.atlases()) {
- // Copies will be finished by the time we get to rendering new atlases. See if we can
- // recycle any previous invalidated atlas textures instead of creating new ones.
- sk_sp<GrTexture> backingTexture;
- for (sk_sp<GrTexture>& texture : fRecyclableAtlasTextures) {
- if (texture && atlas.currentHeight() == texture->height() &&
- atlas.currentWidth() == texture->width()) {
- backingTexture = std::exchange(texture, nullptr);
- break;
- }
- }
-
- if (auto rtc = atlas.instantiate(onFlushRP, std::move(backingTexture))) {
+ if (auto rtc = atlas.instantiate(onFlushRP)) {
GrOp::Owner op;
- SkASSERT(CoverageType::kA8_Multisample == fRenderedAtlasStack.coverageType());
op = GrStencilAtlasOp::Make(
rtc->recordingContext(), sk_ref_sp(this), atlas.getFillBatchID(),
baseStencilResolveInstance,
@@ -544,16 +212,7 @@
SkASSERT(atlas.getEndStencilResolveInstance() >= baseStencilResolveInstance);
baseStencilResolveInstance = atlas.getEndStencilResolveInstance();
}
- SkASSERT(GrCCAtlas::CoverageType::kA8_Multisample != this->renderedPathCoverageType() ||
- baseStencilResolveInstance == fEndStencilResolveInstance);
+ SkASSERT(baseStencilResolveInstance == fEndStencilResolveInstance);
return true;
}
-
-void GrCCPerFlushResourceSpecs::cancelCopies() {
- // Convert copies to cached draws.
- fNumCachedPaths += fNumCopiedPaths;
- fNumCopiedPaths = 0;
- fCopyPathStats = GrCCRenderedPathStats();
- fCopyAtlasSpecs = GrCCAtlas::Specs();
-}
diff --git a/src/gpu/ccpr/GrCCPerFlushResources.h b/src/gpu/ccpr/GrCCPerFlushResources.h
index 6a5f992..81c602f 100644
--- a/src/gpu/ccpr/GrCCPerFlushResources.h
+++ b/src/gpu/ccpr/GrCCPerFlushResources.h
@@ -12,7 +12,6 @@
#include "src/gpu/ccpr/GrAutoMapVertexBuffer.h"
#include "src/gpu/ccpr/GrCCAtlas.h"
#include "src/gpu/ccpr/GrCCFiller.h"
-#include "src/gpu/ccpr/GrCCPathProcessor.h"
#include "src/gpu/ccpr/GrStencilAtlasOp.h"
class GrCCPathCache;
@@ -38,22 +37,13 @@
* CCPR in a given flush.
*/
struct GrCCPerFlushResourceSpecs {
- int fNumCachedPaths = 0;
-
- int fNumCopiedPaths = 0;
- GrCCRenderedPathStats fCopyPathStats;
- GrCCAtlas::Specs fCopyAtlasSpecs;
-
- int fNumRenderedPaths = 0;
int fNumClipPaths = 0;
GrCCRenderedPathStats fRenderedPathStats;
GrCCAtlas::Specs fRenderedAtlasSpecs;
bool isEmpty() const {
- return 0 == fNumCachedPaths + fNumCopiedPaths + fNumRenderedPaths + fNumClipPaths;
+ return 0 == fNumClipPaths;
}
- // Converts the copies to normal cached draws.
- void cancelCopies();
};
/**
@@ -64,72 +54,26 @@
class GrCCPerFlushResources : public GrNonAtomicRef<GrCCPerFlushResources> {
public:
GrCCPerFlushResources(
- GrOnFlushResourceProvider*, GrCCAtlas::CoverageType,const GrCCPerFlushResourceSpecs&);
+ GrOnFlushResourceProvider*,const GrCCPerFlushResourceSpecs&);
- bool isMapped() const { return fPathInstanceBuffer.isMapped(); }
+ bool isMapped() const { return fStencilResolveBuffer.isMapped(); }
- GrCCAtlas::CoverageType renderedPathCoverageType() const {
- return fRenderedAtlasStack.coverageType();
- }
-
- // Copies a coverage-counted path out of the given texture proxy, and into a cached, 8-bit,
- // literal coverage atlas. Updates the cache entry to reference the new atlas.
- void upgradeEntryToLiteralCoverageAtlas(GrCCPathCache*, GrOnFlushResourceProvider*,
- GrCCPathCacheEntry*, GrFillRule);
-
- // An expected copy-to-literal was canceled due to cache entry eviction within a flush cycle.
- SkDEBUGCODE(void cancelEvictedDoCopies() { fEndCopyInstance--; })
-
- // These two methods render a path into a temporary coverage count atlas. See
- // GrCCPathProcessor::Instance for a description of the outputs.
- //
- // strokeDevWidth must be 0 for fills, 1 for hairlines, or the stroke width in device-space
- // pixels for non-hairline strokes (implicitly requiring a rigid-body transform).
- GrCCAtlas* renderShapeInAtlas(
- const SkIRect& clipIBounds, const SkMatrix&, const GrStyledShape&, float strokeDevWidth,
- GrOctoBounds*, SkIRect* devIBounds, SkIVector* devToAtlasOffset);
+ // Renders a path into an atlas.
const GrCCAtlas* renderDeviceSpacePathInAtlas(
const SkIRect& clipIBounds, const SkPath& devPath, const SkIRect& devPathIBounds,
GrFillRule fillRule, SkIVector* devToAtlasOffset);
- // Returns the index in instanceBuffer() of the next instance that will be added by
- // appendDrawPathInstance().
- int nextPathInstanceIdx() const { return fNextPathInstanceIdx; }
-
- // Appends an instance to instanceBuffer() that will draw a path to the destination render
- // target. The caller is responsible to call set() on the returned instance, to keep track of
- // its atlas and index (see nextPathInstanceIdx()), and to issue the actual draw call.
- GrCCPathProcessor::Instance& appendDrawPathInstance() {
- SkASSERT(this->isMapped());
- SkASSERT(fNextPathInstanceIdx < fEndPathInstance);
- return fPathInstanceBuffer[fNextPathInstanceIdx++];
- }
-
// Finishes off the GPU buffers and renders the atlas(es).
bool finalize(GrOnFlushResourceProvider*);
// Accessors used by draw calls, once the resources have been finalized.
const GrCCFiller& filler() const { SkASSERT(!this->isMapped()); return fFiller; }
- sk_sp<const GrGpuBuffer> indexBuffer() const {
- SkASSERT(!this->isMapped());
- return fIndexBuffer;
- }
- sk_sp<const GrGpuBuffer> instanceBuffer() const {
- SkASSERT(!this->isMapped());
- return fPathInstanceBuffer.gpuBuffer();
- }
- sk_sp<const GrGpuBuffer> vertexBuffer() const {
- SkASSERT(!this->isMapped());
- return fVertexBuffer;
- }
sk_sp<const GrGpuBuffer> stencilResolveBuffer() const {
SkASSERT(!this->isMapped());
return fStencilResolveBuffer.gpuBuffer();
}
private:
- void recordCopyPathInstance(const GrCCPathCacheEntry&, const SkIVector& newAtlasOffset,
- GrFillRule, sk_sp<GrTextureProxy> srcProxy);
void placeRenderedPathInAtlas(
const SkIRect& clippedPathIBounds, GrScissorTest, SkIVector* devToAtlasOffset);
@@ -138,38 +82,9 @@
void recordStencilResolveInstance(
const SkIRect& clippedPathIBounds, const SkIVector& devToAtlasOffset, GrFillRule);
- const SkAutoSTArray<32, SkPoint> fLocalDevPtsBuffer;
GrCCFiller fFiller;
- GrCCAtlasStack fCopyAtlasStack;
GrCCAtlasStack fRenderedAtlasStack;
- const sk_sp<const GrGpuBuffer> fIndexBuffer;
- const sk_sp<const GrGpuBuffer> fVertexBuffer;
-
- GrTAutoMapVertexBuffer<GrCCPathProcessor::Instance> fPathInstanceBuffer;
- int fNextCopyInstanceIdx;
- SkDEBUGCODE(int fEndCopyInstance);
- int fNextPathInstanceIdx;
- int fBasePathInstanceIdx;
- SkDEBUGCODE(int fEndPathInstance);
-
- // Represents a range of copy-path instances that all share the same source proxy. (i.e. Draw
- // instances that copy a path mask from a 16-bit coverage count atlas into an 8-bit literal
- // coverage atlas.)
- struct CopyPathRange {
- sk_sp<GrTextureProxy> fSrcProxy;
- int fCount;
- };
-
- SkSTArray<4, CopyPathRange> fCopyPathRanges;
- int fCurrCopyAtlasRangesIdx = 0;
-
- // This is a list of coverage count atlas textures that have been invalidated due to us copying
- // their paths into new 8-bit literal coverage atlases. Since copying is finished by the time
- // we begin rendering new atlases, we can recycle these textures for the rendered atlases rather
- // than allocating new texture objects upon instantiation.
- SkSTArray<2, sk_sp<GrTexture>> fRecyclableAtlasTextures;
-
// Used in MSAA mode make an intermediate draw that resolves stencil winding values to coverage.
GrTAutoMapVertexBuffer<GrStencilAtlasOp::ResolveRectInstance> fStencilResolveBuffer;
int fNextStencilResolveInstanceIdx = 0;
@@ -178,12 +93,9 @@
public:
#ifdef SK_DEBUG
void debugOnly_didReuseRenderedPath() {
- if (GrCCAtlas::CoverageType::kA8_Multisample == this->renderedPathCoverageType()) {
- --fEndStencilResolveInstance;
- }
+ --fEndStencilResolveInstance;
}
#endif
- const GrTexture* testingOnly_frontCopyAtlasTexture() const;
const GrTexture* testingOnly_frontRenderedAtlasTexture() const;
};
diff --git a/src/gpu/ccpr/GrCCPerOpsTaskPaths.h b/src/gpu/ccpr/GrCCPerOpsTaskPaths.h
index ff8a224..4692dc9 100644
--- a/src/gpu/ccpr/GrCCPerOpsTaskPaths.h
+++ b/src/gpu/ccpr/GrCCPerOpsTaskPaths.h
@@ -15,7 +15,6 @@
#include <map>
-class GrCCDrawPathsOp;
class GrCCPerFlushResources;
/**
@@ -23,7 +22,6 @@
*/
// DDL TODO: given the usage pattern in DDL mode, this could probably be non-atomic refcounting.
struct GrCCPerOpsTaskPaths : public SkRefCnt {
- SkTInternalLList<GrCCDrawPathsOp> fDrawOps; // This class does not own these ops.
std::map<uint32_t, GrCCClipPath> fClipPaths;
SkSTArenaAlloc<10 * 1024> fAllocator{10 * 1024 * 2};
sk_sp<const GrCCPerFlushResources> fFlushResources;
diff --git a/src/gpu/ccpr/GrCCSTLList.h b/src/gpu/ccpr/GrCCSTLList.h
deleted file mode 100644
index 29f26b9..0000000
--- a/src/gpu/ccpr/GrCCSTLList.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright 2018 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef GrCCSTLList_DEFINED
-#define GrCCSTLList_DEFINED
-
-#include "include/private/SkNoncopyable.h"
-#include "src/core/SkArenaAlloc.h"
-#include <new>
-
-/**
- * A singly-linked list whose head element is a local class member. This is required by
- * GrCCDrawPathsOp because the owning opsTask is unknown at the time of creation, so we can't use
- * its associated allocator to create the first element.
- */
-template<typename T> class GrCCSTLList : SkNoncopyable {
-public:
- template <typename ...Args>
- GrCCSTLList(Args&&... args) : fHead(std::forward<Args>(args)...) {}
-
- ~GrCCSTLList() {
- T* draw = fHead.fNext; // fHead will be destructed automatically.
- while (draw) {
- T* next = draw->fNext;
- draw->~T();
- draw = next;
- }
- }
-
- const T& head() const { return fHead; }
- T& head() { return fHead; }
-
- void append(GrCCSTLList&& right, SkArenaAlloc* alloc) {
- T* nextTail = (&right.fHead == right.fTail) ? nullptr : right.fTail;
- T* newRightHead =
- new (alloc->makeBytesAlignedTo(sizeof(T), alignof(T))) T(std::move(right.fHead));
-
- // Finish the move of right.fHead.
- right.fHead.fNext = nullptr;
- right.fTail = &right.fHead;
-
- fTail->fNext = newRightHead;
- fTail = !nextTail ? newRightHead : nextTail;
- }
-
- template<typename U> struct Iter {
- bool operator!=(const Iter& that) { return fCurr != that.fCurr; }
- U& operator*() { return *fCurr; }
- void operator++() { fCurr = fCurr->fNext; }
- U* fCurr;
- };
- Iter<const T> begin() const { return Iter<const T>{&fHead}; }
- Iter<const T> end() const { return Iter<const T>{nullptr}; }
- Iter<T> begin() { return Iter<T>{&fHead}; }
- Iter<T> end() { return Iter<T>{nullptr}; }
-
-private:
- T fHead;
- T* fTail = &fHead;
-};
-
-#endif
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
index dbcc1d9..d587f50 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
@@ -6,18 +6,15 @@
*/
#include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
+#include <memory>
#include "include/pathops/SkPathOps.h"
#include "src/gpu/GrCaps.h"
#include "src/gpu/GrProxyProvider.h"
#include "src/gpu/GrSurfaceDrawContext.h"
#include "src/gpu/ccpr/GrCCClipProcessor.h"
-#include "src/gpu/ccpr/GrCCDrawPathsOp.h"
-#include "src/gpu/ccpr/GrCCPathCache.h"
-using PathInstance = GrCCPathProcessor::Instance;
-
-bool GrCoverageCountingPathRenderer::IsSupported(const GrCaps& caps, CoverageType* coverageType) {
+bool GrCoverageCountingPathRenderer::IsSupported(const GrCaps& caps) {
const GrShaderCaps& shaderCaps = *caps.shaderCaps();
GrBackendFormat defaultA8Format = caps.getDefaultBackendFormat(GrColorType::kAlpha_8,
GrRenderable::kYes);
@@ -32,33 +29,20 @@
caps.internalMultisampleCount(defaultA8Format) > 1 &&
caps.sampleLocationsSupport() &&
shaderCaps.sampleMaskSupport()) {
- if (coverageType) {
- *coverageType = CoverageType::kA8_Multisample;
- }
return true;
}
return false;
}
-sk_sp<GrCoverageCountingPathRenderer> GrCoverageCountingPathRenderer::CreateIfSupported(
- const GrCaps& caps, AllowCaching allowCaching, uint32_t contextUniqueID) {
- CoverageType coverageType;
- if (IsSupported(caps, &coverageType)) {
- return sk_sp<GrCoverageCountingPathRenderer>(new GrCoverageCountingPathRenderer(
- coverageType, allowCaching, contextUniqueID));
+std::unique_ptr<GrCoverageCountingPathRenderer> GrCoverageCountingPathRenderer::CreateIfSupported(
+ const GrCaps& caps) {
+ if (IsSupported(caps)) {
+ return std::make_unique<GrCoverageCountingPathRenderer>();
}
return nullptr;
}
-GrCoverageCountingPathRenderer::GrCoverageCountingPathRenderer(
- CoverageType coverageType, AllowCaching allowCaching, uint32_t contextUniqueID)
- : fCoverageType(coverageType) {
- if (AllowCaching::kYes == allowCaching) {
- fPathCache = std::make_unique<GrCCPathCache>(contextUniqueID);
- }
-}
-
GrCCPerOpsTaskPaths* GrCoverageCountingPathRenderer::lookupPendingPaths(uint32_t opsTaskID) {
auto it = fPendingPaths.find(opsTaskID);
if (fPendingPaths.end() == it) {
@@ -68,92 +52,6 @@
return it->second.get();
}
-GrPathRenderer::CanDrawPath GrCoverageCountingPathRenderer::onCanDrawPath(
- const CanDrawPathArgs& args) const {
-#if 1
- // The atlas takes up too much memory. We should focus on other path renderers instead.
- return CanDrawPath::kNo;
-#else
- const GrStyledShape& shape = *args.fShape;
- // We use "kCoverage", or analytic AA, no mater what the coverage type of our atlas: Even if the
- // atlas is multisampled, that resolves into analytic coverage before we draw the path to the
- // main canvas.
- if (GrAAType::kCoverage != args.fAAType || shape.style().hasPathEffect() ||
- args.fViewMatrix->hasPerspective() || shape.inverseFilled()) {
- return CanDrawPath::kNo;
- }
-
- SkPath path;
- shape.asPath(&path);
-
- const SkStrokeRec& stroke = shape.style().strokeRec();
- switch (stroke.getStyle()) {
- case SkStrokeRec::kFill_Style: {
- SkRect devBounds;
- args.fViewMatrix->mapRect(&devBounds, path.getBounds());
-
- SkIRect clippedIBounds;
- devBounds.roundOut(&clippedIBounds);
- if (!clippedIBounds.intersect(*args.fClipConservativeBounds)) {
- // The path is completely clipped away. Our code will eventually notice this before
- // doing any real work.
- return CanDrawPath::kYes;
- }
-
- int64_t numPixels = sk_64_mul(clippedIBounds.height(), clippedIBounds.width());
- if (path.countVerbs() > 1000 && path.countPoints() > numPixels) {
- // This is a complicated path that has more vertices than pixels! Let's let the SW
- // renderer have this one: It will probably be faster and a bitmap will require less
- // total memory on the GPU than CCPR instance buffers would for the raw path data.
- return CanDrawPath::kNo;
- }
-
- if (numPixels > 256 * 256) {
- // Large paths can blow up the atlas fast. And they are not ideal for a two-pass
- // rendering algorithm. Give the simpler direct renderers a chance before we commit
- // to drawing it.
- return CanDrawPath::kAsBackup;
- }
-
- if (args.fShape->hasUnstyledKey() && path.countVerbs() > 50) {
- // Complex paths do better cached in an SDF, if the renderer will accept them.
- return CanDrawPath::kAsBackup;
- }
-
- return CanDrawPath::kYes;
- }
-
- case SkStrokeRec::kStroke_Style:
- case SkStrokeRec::kHairline_Style:
- case SkStrokeRec::kStrokeAndFill_Style:
- return CanDrawPath::kNo;
- }
-
- SK_ABORT("Invalid stroke style.");
-#endif
-}
-
-bool GrCoverageCountingPathRenderer::onDrawPath(const DrawPathArgs& args) {
- SkASSERT(!fFlushing);
-
- auto op = GrCCDrawPathsOp::Make(args.fContext, *args.fClipConservativeBounds, *args.fViewMatrix,
- *args.fShape, std::move(args.fPaint));
- this->recordOp(std::move(op), args);
- return true;
-}
-
-void GrCoverageCountingPathRenderer::recordOp(GrOp::Owner op,
- const DrawPathArgs& args) {
- if (op) {
- auto addToOwningPerOpsTaskPaths = [this](GrOp* op, uint32_t opsTaskID) {
- op->cast<GrCCDrawPathsOp>()->addToOwningPerOpsTaskPaths(
- sk_ref_sp(this->lookupPendingPaths(opsTaskID)));
- };
- args.fRenderTargetContext->addDrawOp(args.fClip, std::move(op),
- addToOwningPerOpsTaskPaths);
- }
-}
-
std::unique_ptr<GrFragmentProcessor> GrCoverageCountingPathRenderer::makeClipProcessor(
std::unique_ptr<GrFragmentProcessor> inputFP, uint32_t opsTaskID,
const SkPath& deviceSpacePath, const SkIRect& accessRect, const GrCaps& caps) {
@@ -168,11 +66,7 @@
#endif
uint32_t key = deviceSpacePath.getGenerationID();
- if (CoverageType::kA8_Multisample == fCoverageType) {
- // We only need to consider fill rule in MSAA mode. In coverage count mode Even/Odd and
- // Nonzero both reference the same coverage count mask.
- key = (key << 1) | (uint32_t)GrFillRuleForSkPath(deviceSpacePath);
- }
+ key = (key << 1) | (uint32_t)GrFillRuleForSkPath(deviceSpacePath);
GrCCClipPath& clipPath =
this->lookupPendingPaths(opsTaskID)->fClipPaths[key];
if (!clipPath.isInitialized()) {
@@ -183,9 +77,9 @@
SkPath croppedPath;
int maxRTSize = caps.maxRenderTargetSize();
CropPath(deviceSpacePath, SkIRect::MakeWH(maxRTSize, maxRTSize), &croppedPath);
- clipPath.init(croppedPath, accessRect, fCoverageType, caps);
+ clipPath.init(croppedPath, accessRect, caps);
} else {
- clipPath.init(deviceSpacePath, accessRect, fCoverageType, caps);
+ clipPath.init(deviceSpacePath, accessRect, caps);
}
} else {
clipPath.addAccess(accessRect);
@@ -199,23 +93,16 @@
void GrCoverageCountingPathRenderer::preFlush(
GrOnFlushResourceProvider* onFlushRP, SkSpan<const uint32_t> taskIDs) {
- using DoCopiesToA8Coverage = GrCCDrawPathsOp::DoCopiesToA8Coverage;
SkASSERT(!fFlushing);
SkASSERT(fFlushingPaths.empty());
SkDEBUGCODE(fFlushing = true);
- if (fPathCache) {
- fPathCache->doPreFlushProcessing();
- }
-
if (fPendingPaths.empty()) {
return; // Nothing to draw.
}
GrCCPerFlushResourceSpecs specs;
int maxPreferredRTSize = onFlushRP->caps()->maxPreferredRenderTargetSize();
- specs.fCopyAtlasSpecs.fMaxPreferredTextureSize = std::min(2048, maxPreferredRTSize);
- SkASSERT(0 == specs.fCopyAtlasSpecs.fMinTextureSize);
specs.fRenderedAtlasSpecs.fMaxPreferredTextureSize = maxPreferredRTSize;
specs.fRenderedAtlasSpecs.fMinTextureSize = std::min(512, maxPreferredRTSize);
@@ -231,9 +118,6 @@
fFlushingPaths.push_back(std::move(iter->second));
fPendingPaths.erase(iter);
- for (GrCCDrawPathsOp* op : fFlushingPaths.back()->fDrawOps) {
- op->accountForOwnPaths(fPathCache.get(), onFlushRP, &specs);
- }
for (const auto& clipsIter : fFlushingPaths.back()->fClipPaths) {
clipsIter.second.accountForOwnPath(&specs);
}
@@ -243,36 +127,18 @@
return; // Nothing to draw.
}
- // Determine if there are enough reusable paths from last flush for it to be worth our time to
- // copy them to cached atlas(es).
- int numCopies = specs.fNumCopiedPaths;
- auto doCopies = DoCopiesToA8Coverage(numCopies > kDoCopiesThreshold ||
- specs.fCopyAtlasSpecs.fApproxNumPixels > 256 * 256);
- if (numCopies && DoCopiesToA8Coverage::kNo == doCopies) {
- specs.cancelCopies();
- }
-
- auto resources = sk_make_sp<GrCCPerFlushResources>(onFlushRP, fCoverageType, specs);
+ auto resources = sk_make_sp<GrCCPerFlushResources>(onFlushRP, specs);
if (!resources->isMapped()) {
return; // Some allocation failed.
}
// Layout the atlas(es) and parse paths.
for (const auto& flushingPaths : fFlushingPaths) {
- for (GrCCDrawPathsOp* op : flushingPaths->fDrawOps) {
- op->setupResources(fPathCache.get(), onFlushRP, resources.get(), doCopies);
- }
for (auto& clipsIter : flushingPaths->fClipPaths) {
clipsIter.second.renderPathInAtlas(resources.get(), onFlushRP);
}
}
- if (fPathCache) {
- // Purge invalidated textures from previous atlases *before* calling finalize(). That way,
- // the underlying textures objects can be freed up and reused for the next atlases.
- fPathCache->purgeInvalidatedAtlasTextures(onFlushRP);
- }
-
// Allocate resources and then render the atlas(es).
if (!resources->finalize(onFlushRP)) {
return;
@@ -303,13 +169,6 @@
SkDEBUGCODE(fFlushing = false);
}
-void GrCoverageCountingPathRenderer::purgeCacheEntriesOlderThan(
- GrProxyProvider* proxyProvider, const GrStdSteadyClock::time_point& purgeTime) {
- if (fPathCache) {
- fPathCache->purgeEntriesOlderThan(proxyProvider, purgeTime);
- }
-}
-
void GrCoverageCountingPathRenderer::CropPath(const SkPath& path, const SkIRect& cropbox,
SkPath* out) {
SkPath cropboxPath;
@@ -320,24 +179,3 @@
}
out->setIsVolatile(true);
}
-
-float GrCoverageCountingPathRenderer::GetStrokeDevWidth(const SkMatrix& m,
- const SkStrokeRec& stroke,
- float* inflationRadius) {
- float strokeDevWidth;
- if (stroke.isHairlineStyle()) {
- strokeDevWidth = 1;
- } else {
- SkASSERT(SkStrokeRec::kStroke_Style == stroke.getStyle());
- SkASSERT(m.isSimilarity()); // Otherwise matrixScaleFactor = m.getMaxScale().
- float matrixScaleFactor = SkVector::Length(m.getScaleX(), m.getSkewY());
- strokeDevWidth = stroke.getWidth() * matrixScaleFactor;
- }
- if (inflationRadius) {
- // Inflate for a minimum stroke width of 1. In some cases when the stroke is less than 1px
- // wide, we may inflate it to 1px and instead reduce the opacity.
- *inflationRadius = SkStrokeRec::GetInflationRadius(
- stroke.getJoin(), stroke.getMiter(), stroke.getCap(), std::max(strokeDevWidth, 1.f));
- }
- return strokeDevWidth;
-}
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
index b9f34c8..09d06e0 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
@@ -15,9 +15,6 @@
#include "src/gpu/ccpr/GrCCPerFlushResources.h"
#include "src/gpu/ccpr/GrCCPerOpsTaskPaths.h"
-class GrCCDrawPathsOp;
-class GrCCPathCache;
-
/**
* This is a path renderer that draws antialiased paths by counting coverage in an offscreen
* buffer. (See GrCCCoverageProcessor, GrCCPathProcessor.)
@@ -25,23 +22,11 @@
* It also serves as the per-render-target tracker for pending path draws, and at the start of
* flush, it compiles GPU buffers and renders a "coverage count atlas" for the upcoming paths.
*/
-class GrCoverageCountingPathRenderer : public GrPathRenderer, public GrOnFlushCallbackObject {
+class GrCoverageCountingPathRenderer : public GrOnFlushCallbackObject {
public:
- using CoverageType = GrCCAtlas::CoverageType;
+ static bool IsSupported(const GrCaps&);
- const char* name() const final { return "CCPR"; }
-
- static bool IsSupported(const GrCaps&, CoverageType* = nullptr);
-
- enum class AllowCaching : bool {
- kNo = false,
- kYes = true
- };
-
- static sk_sp<GrCoverageCountingPathRenderer> CreateIfSupported(
- const GrCaps&, AllowCaching, uint32_t contextUniqueID);
-
- CoverageType coverageType() const { return fCoverageType; }
+ static std::unique_ptr<GrCoverageCountingPathRenderer> CreateIfSupported(const GrCaps&);
using PendingPathsMap = std::map<uint32_t, sk_sp<GrCCPerOpsTaskPaths>>;
@@ -74,8 +59,6 @@
void preFlush(GrOnFlushResourceProvider*, SkSpan<const uint32_t> taskIDs) override;
void postFlush(GrDeferredUploadToken, SkSpan<const uint32_t> taskIDs) override;
- void purgeCacheEntriesOlderThan(GrProxyProvider*, const GrStdSteadyClock::time_point&);
-
// If a path spans more pixels than this, we need to crop it or else analytic AA can run out of
// fp32 precision.
static constexpr float kPathCropThreshold = 1 << 16;
@@ -88,23 +71,8 @@
static constexpr int kDoCopiesThreshold = 100;
- static float GetStrokeDevWidth(const SkMatrix&, const SkStrokeRec&,
- float* inflationRadius = nullptr);
-
private:
- GrCoverageCountingPathRenderer(CoverageType, AllowCaching, uint32_t contextUniqueID);
-
- // GrPathRenderer overrides.
- StencilSupport onGetStencilSupport(const GrStyledShape&) const override {
- return GrPathRenderer::kNoSupport_StencilSupport;
- }
- CanDrawPath onCanDrawPath(const CanDrawPathArgs&) const override;
- bool onDrawPath(const DrawPathArgs&) override;
-
GrCCPerOpsTaskPaths* lookupPendingPaths(uint32_t opsTaskID);
- void recordOp(GrOp::Owner, const DrawPathArgs&);
-
- const CoverageType fCoverageType;
// fPendingPaths holds the GrCCPerOpsTaskPaths objects that have already been created, but not
// flushed, and those that are still being created. All GrCCPerOpsTaskPaths objects will first
@@ -115,14 +83,10 @@
// (It will only contain elements when fFlushing is true.)
SkSTArray<4, sk_sp<GrCCPerOpsTaskPaths>> fFlushingPaths;
- std::unique_ptr<GrCCPathCache> fPathCache;
-
SkDEBUGCODE(bool fFlushing = false);
public:
- void testingOnly_drawPathDirectly(const DrawPathArgs&);
const GrCCPerFlushResources* testingOnly_getCurrentFlushResources();
- const GrCCPathCache* testingOnly_getPathCache() const;
};
#endif
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp
index 72d443c..e07ebec 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp
@@ -5,14 +5,15 @@
* found in the LICENSE file.
*/
+#include <memory>
#include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
-bool GrCoverageCountingPathRenderer::IsSupported(const GrCaps& caps, CoverageType*) {
+bool GrCoverageCountingPathRenderer::IsSupported(const GrCaps& caps) {
return false;
}
-sk_sp<GrCoverageCountingPathRenderer> GrCoverageCountingPathRenderer::CreateIfSupported(
- const GrCaps& caps, AllowCaching allowCaching, uint32_t contextUniqueID) {
+std::unique_ptr<GrCoverageCountingPathRenderer> GrCoverageCountingPathRenderer::CreateIfSupported(
+ const GrCaps&) {
return nullptr;
}
diff --git a/src/gpu/ccpr/GrOctoBounds.cpp b/src/gpu/ccpr/GrOctoBounds.cpp
deleted file mode 100644
index cc6f97e..0000000
--- a/src/gpu/ccpr/GrOctoBounds.cpp
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Copyright 2019 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#include "include/private/SkTPin.h"
-#include "src/gpu/ccpr/GrOctoBounds.h"
-#include <algorithm>
-
-bool GrOctoBounds::clip(const SkIRect& clipRect) {
- // Intersect dev bounds with the clip rect.
- float l = std::max(fBounds.left(), (float)clipRect.left());
- float t = std::max(fBounds.top(), (float)clipRect.top());
- float r = std::min(fBounds.right(), (float)clipRect.right());
- float b = std::min(fBounds.bottom(), (float)clipRect.bottom());
-
- float l45 = fBounds45.left();
- float t45 = fBounds45.top();
- float r45 = fBounds45.right();
- float b45 = fBounds45.bottom();
-
- // Check if either the bounds or 45-degree bounds are empty. We write this check as the NOT of
- // non-empty rects, so we will return false if any values are NaN.
- if (!(l < r && t < b && l45 < r45 && t45 < b45)) {
- return false;
- }
-
- // Tighten dev bounds around the new (octagonal) intersection that results after clipping. This
- // may be tighter now even than the clipped bounds, depending on the diagonals. Shader code that
- // emits octagons expects both bounding boxes to circumcribe the inner octagon, and will fail if
- // they do not.
- if (l45 > Get_x45(r,b)) {
- // Slide the bottom upward until it crosses the l45 diagonal at x=r.
- // y = x + (y0 - x0)
- // Substitute: l45 = x0 - y0
- // y = x - l45
- b = SkTPin(r - l45, t, b);
- } else if (r45 < Get_x45(r,b)) {
- // Slide the right side leftward until it crosses the r45 diagonal at y=b.
- // x = y + (x0 - y0)
- // Substitute: r45 = x0 - y0
- // x = y + r45
- r = SkTPin(b + r45, l, r);
- }
- if (l45 > Get_x45(l,t)) {
- // Slide the left side rightward until it crosses the l45 diagonal at y=t.
- // x = y + (x0 - y0)
- // Substitute: l45 = x0 - y0
- // x = y + l45
- l = SkTPin(t + l45, l, r);
- } else if (r45 < Get_x45(l,t)) {
- // Slide the top downward until it crosses the r45 diagonal at x=l.
- // y = x + (y0 - x0)
- // Substitute: r45 = x0 - y0
- // y = x - r45
- t = SkTPin(l - r45, t, b);
- }
- if (t45 > Get_y45(l,b)) {
- // Slide the left side rightward until it crosses the t45 diagonal at y=b.
- // x = -y + (x0 + y0)
- // Substitute: t45 = x0 + y0
- // x = -y + t45
- l = SkTPin(t45 - b, l, r);
- } else if (b45 < Get_y45(l,b)) {
- // Slide the bottom upward until it crosses the b45 diagonal at x=l.
- // y = -x + (y0 + x0)
- // Substitute: b45 = x0 + y0
- // y = -x + b45
- b = SkTPin(b45 - l, t, b);
- }
- if (t45 > Get_y45(r,t)) {
- // Slide the top downward until it crosses the t45 diagonal at x=r.
- // y = -x + (y0 + x0)
- // Substitute: t45 = x0 + y0
- // y = -x + t45
- t = SkTPin(t45 - r, t, b);
- } else if (b45 < Get_y45(r,t)) {
- // Slide the right side leftward until it crosses the b45 diagonal at y=t.
- // x = -y + (x0 + y0)
- // Substitute: b45 = x0 + y0
- // x = -y + b45
- r = SkTPin(b45 - t, l, r);
- }
-
- // Tighten the 45-degree bounding box. Since the dev bounds are now fully tightened, we only
- // have to clamp the diagonals to outer corners.
- // NOTE: This will not cause l,t,r,b to need more insetting. We only ever change a diagonal by
- // pinning it to a FAR corner, which, by definition, is still outside the other corners.
- l45 = SkTPin(Get_x45(l,b), l45, r45);
- t45 = SkTPin(Get_y45(l,t), t45, b45);
- r45 = SkTPin(Get_x45(r,t), l45, r45);
- b45 = SkTPin(Get_y45(r,b), t45, b45);
-
- // Make one final check for empty or NaN bounds. If the dev bounds were clipped completely
- // outside one of the diagonals, they will have been pinned to empty. It's also possible that
- // some Infs crept in and turned into NaNs.
- if (!(l < r && t < b && l45 < r45 && t45 < b45)) {
- return false;
- }
-
- fBounds.setLTRB(l, t, r, b);
- fBounds45.setLTRB(l45, t45, r45, b45);
-
-#ifdef SK_DEBUG
- // Verify dev bounds are inside the clip rect.
- SkASSERT(l >= (float)clipRect.left());
- SkASSERT(t >= (float)clipRect.top());
- SkASSERT(r <= (float)clipRect.right());
- SkASSERT(b <= (float)clipRect.bottom());
- this->validateBoundsAreTight();
-#endif
-
- return true;
-}
-
-#if defined(SK_DEBUG) || GR_TEST_UTILS
-void GrOctoBounds::validateBoundsAreTight() const {
- this->validateBoundsAreTight([](bool cond, const char* file, int line, const char* code) {
- SkASSERTF(cond, "%s(%d): assertion failure: \"assert(%s)\"", file, line, code);
- });
-}
-
-void GrOctoBounds::validateBoundsAreTight(const std::function<void(
- bool cond, const char* file, int line, const char* code)>& validateFn) const {
- // The octobounds calculated in GrCCPerFlushResources::renderShapeInAtlas use FMAs to compute
- // M * (x,y) and T45 * M * (x,y) in parallel. This leads to a not-insignificant floating point
- // difference between (T45 * M * (x,y)) stored in fBounds45, and T45 * (M * (x,y)) calculated
- // here from fBounds with the Get_xy45 functions.
- constexpr static float epsilon = 1e-2f;
-
- float l=fBounds.left(), l45=fBounds45.left();
- float t=fBounds.top(), t45=fBounds45.top();
- float r=fBounds.right(), r45=fBounds45.right();
- float b=fBounds.bottom(), b45=fBounds45.bottom();
-
-#define VALIDATE(CODE) validateFn(CODE, __FILE__, __LINE__, #CODE)
- // Verify diagonals are inside far corners of the dev bounds.
- VALIDATE(l45 >= Get_x45(l,b) - epsilon);
- VALIDATE(t45 >= Get_y45(l,t) - epsilon);
- VALIDATE(r45 <= Get_x45(r,t) + epsilon);
- VALIDATE(b45 <= Get_y45(r,b) + epsilon);
- // Verify verticals and horizontals are inside far corners of the 45-degree dev bounds.
- VALIDATE(l >= Get_x(l45,t45) - epsilon);
- VALIDATE(t >= Get_y(r45,t45) - epsilon);
- VALIDATE(r <= Get_x(r45,b45) + epsilon);
- VALIDATE(b <= Get_y(l45,b45) + epsilon);
- // Verify diagonals are outside middle corners of the dev bounds.
- VALIDATE(l45 <= Get_x45(r,b) + epsilon);
- VALIDATE(l45 <= Get_x45(l,t) + epsilon);
- VALIDATE(t45 <= Get_y45(l,b) + epsilon);
- VALIDATE(t45 <= Get_y45(r,t) + epsilon);
- VALIDATE(r45 >= Get_x45(l,t) - epsilon);
- VALIDATE(r45 >= Get_x45(r,b) - epsilon);
- VALIDATE(b45 >= Get_y45(r,t) - epsilon);
- VALIDATE(b45 >= Get_y45(l,b) - epsilon);
- // Verify verticals and horizontals are outside middle corners of the 45-degree dev bounds.
- VALIDATE(l <= Get_x(l45,b45) + epsilon);
- VALIDATE(l <= Get_x(r45,t45) + epsilon);
- VALIDATE(t <= Get_y(r45,b45) + epsilon);
- VALIDATE(t <= Get_y(l45,t45) + epsilon);
- VALIDATE(r >= Get_x(r45,t45) - epsilon);
- VALIDATE(r >= Get_x(l45,b45) - epsilon);
- VALIDATE(b >= Get_y(l45,t45) - epsilon);
- VALIDATE(b >= Get_y(r45,b45) - epsilon);
-#undef VALIDATE
-}
-#endif
diff --git a/src/gpu/ccpr/GrOctoBounds.h b/src/gpu/ccpr/GrOctoBounds.h
deleted file mode 100644
index 5ef2c11..0000000
--- a/src/gpu/ccpr/GrOctoBounds.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Copyright 2019 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef GrOctoBounds_DEFINED
-#define GrOctoBounds_DEFINED
-
-#include "include/core/SkRect.h"
-#include <functional>
-
-/**
- * This class is composed of two bounding boxes: one in device space, and one in a 45-degree rotated
- * space.
- *
- * The 45-degree bounding box resides in "| 1 -1 | * coords" space.
- * | 1 1 |
- *
- * The intersection of these two boxes defines the bounding octagon of a shape.
- *
- * Furthermore, both bounding boxes are fully tightened. This means we can blindly find the
- * intersections between each diagonal and its vertical and horizontal neighbors, and be left with
- * 8 points that define a convex (possibly degenerate) octagon.
- */
-class GrOctoBounds {
-public:
- GrOctoBounds() = default;
- GrOctoBounds(const SkRect& bounds, const SkRect& bounds45) {
- this->set(bounds, bounds45);
- }
-
- void set(const SkRect& bounds, const SkRect& bounds45) {
- fBounds = bounds;
- fBounds45 = bounds45;
- SkDEBUGCODE(this->validateBoundsAreTight());
- }
-
- bool operator==(const GrOctoBounds& that) const {
- return fBounds == that.fBounds && fBounds45 == that.fBounds45;
- }
- bool operator!=(const GrOctoBounds& that) const { return !(*this == that); }
-
- const SkRect& bounds() const { return fBounds; }
- float left() const { return fBounds.left(); }
- float top() const { return fBounds.top(); }
- float right() const { return fBounds.right(); }
- float bottom() const { return fBounds.bottom(); }
-
-
- // The 45-degree bounding box resides in "| 1 -1 | * coords" space.
- // | 1 1 |
- const SkRect& bounds45() const { return fBounds45; }
- float left45() const { return fBounds45.left(); }
- float top45() const { return fBounds45.top(); }
- float right45() const { return fBounds45.right(); }
- float bottom45() const { return fBounds45.bottom(); }
-
- void roundOut(SkIRect* out) const {
- // The octagon is the intersection of fBounds and fBounds45 (see the comment at the start of
- // the class). The octagon's bounding box is therefore just fBounds. And the integer
- // bounding box can be found by simply rounding out fBounds.
- fBounds.roundOut(out);
- }
-
- GrOctoBounds makeOffset(float dx, float dy) const {
- GrOctoBounds offset;
- offset.setOffset(*this, dx, dy);
- return offset;
- }
-
- void setOffset(const GrOctoBounds& octoBounds, float dx, float dy) {
- fBounds = octoBounds.fBounds.makeOffset(dx, dy);
- fBounds45 = octoBounds.fBounds45.makeOffset(dx - dy, dx + dy);
- SkDEBUGCODE(this->validateBoundsAreTight());
- }
-
- void outset(float radius) {
- fBounds.outset(radius, radius);
- fBounds45.outset(radius*SK_ScalarSqrt2, radius*SK_ScalarSqrt2);
- SkDEBUGCODE(this->validateBoundsAreTight());
- }
-
- // Clips the octo bounds by a clip rect and ensures the resulting bounds are fully tightened.
- // Returns false if the octagon and clipRect do not intersect at all.
- //
- // NOTE: Does not perform a trivial containment test before the clip routine. It is probably a
- // good idea to not call this method if 'this->bounds()' are fully contained within 'clipRect'.
- bool SK_WARN_UNUSED_RESULT clip(const SkIRect& clipRect);
-
- // The 45-degree bounding box resides in "| 1 -1 | * coords" space.
- // | 1 1 |
- //
- // i.e., | x45 | = | x - y |
- // | y45 | = | x + y |
- //
- // These methods transform points between device space and 45-degree space.
- constexpr static float Get_x45(float x, float y) { return x - y; }
- constexpr static float Get_y45(float x, float y) { return x + y; }
- constexpr static float Get_x(float x45, float y45) { return (x45 + y45) * .5f; }
- constexpr static float Get_y(float x45, float y45) { return (y45 - x45) * .5f; }
-
-#if defined(SK_DEBUG) || GR_TEST_UTILS
- void validateBoundsAreTight() const;
- void validateBoundsAreTight(const std::function<void(
- bool cond, const char* file, int line, const char* code)>& validateFn) const;
-#endif
-
-private:
- SkRect fBounds;
- SkRect fBounds45;
-};
-
-#endif