Revert "ccpr: Support caching of paths that span multiple tiles"
This reverts commit 6a3dc8be46728ce2042d363f89ed689a2236a37a.
Reason for revert: <INSERT REASONING HERE>
Original change's description:
> ccpr: Support caching of paths that span multiple tiles
>
> Adds an accumulative "hit rect" for each cache entry that tracks the
> region of the path that has been drawn during its lifetime. Now, a
> path mask can be cached once the "hit rect" covers 50% of the path.
> This allows us to cache a path that spans multiple tiles.
>
> To guard against unnecessarily caching gigantic path masks, we also
> require that 10% of the path be visible during the draw when it is
> cached.
>
> Bug: skia:8462
> Change-Id: Iab2c277102b7a774eaa909c9663211694554c5a5
> Reviewed-on: https://skia-review.googlesource.com/c/180700
> Commit-Queue: Chris Dalton <csmartdalton@google.com>
> Reviewed-by: Robert Phillips <robertphillips@google.com>
TBR=bsalomon@google.com,robertphillips@google.com,brianosman@google.com,csmartdalton@google.com
Change-Id: Ibae9d46333e3178856fd623f26317366102dd344
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: skia:8462
Reviewed-on: https://skia-review.googlesource.com/c/181982
Reviewed-by: Chris Dalton <csmartdalton@google.com>
Commit-Queue: Chris Dalton <csmartdalton@google.com>
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.cpp b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
index 254d062..b9f0969 100644
--- a/src/gpu/ccpr/GrCCDrawPathsOp.cpp
+++ b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
@@ -25,6 +25,10 @@
return false;
}
+static int64_t area(const SkIRect& r) {
+ return sk_64_mul(r.height(), r.width());
+}
+
std::unique_ptr<GrCCDrawPathsOp> GrCCDrawPathsOp::Make(
GrContext* context, const SkIRect& clipIBounds, const SkMatrix& m, const GrShape& shape,
GrPaint&& paint) {
@@ -87,22 +91,36 @@
conservativeDevBounds.roundOut(&shapeConservativeIBounds);
SkIRect maskDevIBounds;
- if (!maskDevIBounds.intersect(clipIBounds, shapeConservativeIBounds)) {
- return nullptr;
+ Visibility maskVisibility;
+ if (clipIBounds.contains(shapeConservativeIBounds)) {
+ maskDevIBounds = shapeConservativeIBounds;
+ maskVisibility = Visibility::kComplete;
+ } else {
+ if (!maskDevIBounds.intersect(clipIBounds, shapeConservativeIBounds)) {
+ return nullptr;
+ }
+ int64_t unclippedArea = area(shapeConservativeIBounds);
+ int64_t clippedArea = area(maskDevIBounds);
+ maskVisibility = (clippedArea >= unclippedArea/2 || unclippedArea < 100*100)
+ ? Visibility::kMostlyComplete // i.e., visible enough to justify rendering the
+ // whole thing if we think we can cache it.
+ : Visibility::kPartial;
}
GrOpMemoryPool* pool = context->contextPriv().opMemoryPool();
+
return pool->allocate<GrCCDrawPathsOp>(m, shape, strokeDevWidth, shapeConservativeIBounds,
- maskDevIBounds, conservativeDevBounds, std::move(paint));
+ maskDevIBounds, maskVisibility, conservativeDevBounds,
+ std::move(paint));
}
GrCCDrawPathsOp::GrCCDrawPathsOp(const SkMatrix& m, const GrShape& shape, float strokeDevWidth,
const SkIRect& shapeConservativeIBounds,
- const SkIRect& maskDevIBounds, const SkRect& conservativeDevBounds,
- GrPaint&& paint)
+ const SkIRect& maskDevIBounds, Visibility maskVisibility,
+ const SkRect& conservativeDevBounds, GrPaint&& paint)
: GrDrawOp(ClassID())
, fViewMatrixIfUsingLocalCoords(has_coord_transforms(paint) ? m : SkMatrix::I())
- , fDraws(m, shape, strokeDevWidth, shapeConservativeIBounds, maskDevIBounds,
+ , fDraws(m, shape, strokeDevWidth, shapeConservativeIBounds, maskDevIBounds, maskVisibility,
paint.getColor4f())
, fProcessors(std::move(paint)) { // Paint must be moved after fetching its color above.
SkDEBUGCODE(fBaseInstance = -1);
@@ -121,12 +139,14 @@
GrCCDrawPathsOp::SingleDraw::SingleDraw(const SkMatrix& m, const GrShape& shape,
float strokeDevWidth,
const SkIRect& shapeConservativeIBounds,
- const SkIRect& maskDevIBounds, const SkPMColor4f& color)
+ const SkIRect& maskDevIBounds, Visibility maskVisibility,
+ const SkPMColor4f& color)
: fMatrix(m)
, fShape(shape)
, fStrokeDevWidth(strokeDevWidth)
, fShapeConservativeIBounds(shapeConservativeIBounds)
, fMaskDevIBounds(maskDevIBounds)
+ , fMaskVisibility(maskVisibility)
, fColor(color) {
#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
if (fShape.hasUnstyledKey()) {
@@ -219,6 +239,8 @@
void GrCCDrawPathsOp::SingleDraw::accountForOwnPath(
GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP,
GrCCPerFlushResourceSpecs* specs) {
+ using CreateIfAbsent = GrCCPathCache::CreateIfAbsent;
+ using MaskTransform = GrCCPathCache::MaskTransform;
using CoverageType = GrCCAtlas::CoverageType;
SkPath path;
@@ -227,8 +249,9 @@
SkASSERT(!fCacheEntry);
if (pathCache) {
- fCacheEntry =
- pathCache->find(onFlushRP, fShape, fMaskDevIBounds, fMatrix, &fCachedMaskShift);
+ MaskTransform m(fMatrix, &fCachedMaskShift);
+ bool canStashPathMask = fMaskVisibility >= Visibility::kMostlyComplete;
+ fCacheEntry = pathCache->find(onFlushRP, fShape, m, CreateIfAbsent(canStashPathMask));
}
if (fCacheEntry) {
@@ -250,52 +273,25 @@
return;
}
- if (this->shouldCachePathMask(onFlushRP->caps()->maxRenderTargetSize())) {
- fDoCachePathMask = true;
- // We don't cache partial masks; ensure the bounds include the entire path.
- fMaskDevIBounds = fShapeConservativeIBounds;
+ if (Visibility::kMostlyComplete == fMaskVisibility && fCacheEntry->hitCount() > 1) {
+ int shapeSize = SkTMax(fShapeConservativeIBounds.height(),
+ fShapeConservativeIBounds.width());
+ if (shapeSize <= onFlushRP->caps()->maxRenderTargetSize()) {
+ // We've seen this path before with a compatible matrix, and it's mostly
+ // visible. Just render the whole mask so we can try to cache it.
+ fMaskDevIBounds = fShapeConservativeIBounds;
+ fMaskVisibility = Visibility::kComplete;
+ }
}
}
- // Plan on rendering this path in a new atlas.
int idx = (fShape.style().strokeRec().isFillStyle())
? GrCCPerFlushResourceSpecs::kFillIdx
: GrCCPerFlushResourceSpecs::kStrokeIdx;
++specs->fNumRenderedPaths[idx];
specs->fRenderedPathStats[idx].statPath(path);
- specs->fRenderedAtlasSpecs.accountForSpace(fMaskDevIBounds.width(), fMaskDevIBounds.height());
-}
-
-bool GrCCDrawPathsOp::SingleDraw::shouldCachePathMask(int maxRenderTargetSize) const {
- SkASSERT(fCacheEntry);
- SkASSERT(!fCacheEntry->cachedAtlas());
- if (fCacheEntry->hitCount() <= 1) {
- return false; // Don't cache a path mask until at least its second hit.
- }
-
- int shapeMaxDimension = SkTMax(fShapeConservativeIBounds.height(),
- fShapeConservativeIBounds.width());
- if (shapeMaxDimension > maxRenderTargetSize) {
- return false; // This path isn't cachable.
- }
-
- int64_t shapeArea = sk_64_mul(fShapeConservativeIBounds.height(),
- fShapeConservativeIBounds.width());
- if (shapeArea < 100*100) {
- // If a path is small enough, we might as well try to render and cache the entire thing, no
- // matter how much of it is actually visible.
- return true;
- }
-
- // Render and cache the entire path mask if we see enough of it to justify rendering all the
- // pixels. Our criteria for "enough" is that we must have seen at least 50% of the path in the
- // past, and in this particular draw we must see at least 10% of it.
- const SkIRect& hitRect = fCacheEntry->hitRect();
- SkASSERT(fShapeConservativeIBounds.makeOffset(-fCachedMaskShift.x(),
- -fCachedMaskShift.y()).contains(hitRect));
- int64_t hitArea = sk_64_mul(hitRect.height(), hitRect.width());
- int64_t drawArea = sk_64_mul(fMaskDevIBounds.height(), fMaskDevIBounds.width());
- return hitArea*2 >= shapeArea && drawArea*10 >= shapeArea;
+ specs->fRenderedAtlasSpecs.accountForSpace(fMaskDevIBounds.width(),
+ fMaskDevIBounds.height());
}
void GrCCDrawPathsOp::setupResources(
@@ -339,12 +335,6 @@
== fCacheEntry->cachedAtlas()->coverageType());
SkASSERT(fCacheEntry->cachedAtlas()->getOnFlushProxy());
}
-#if 0
- // Simple color manipulation to visualize cached paths.
- fColor = (GrCCAtlas::CoverageType::kA8_LiteralCoverage
- == fCacheEntry->cachedAtlas()->coverageType())
- ? SkPMColor4f{0,0,.25,.25} : SkPMColor4f{0,.25,0,.25};
-#endif
op->recordInstance(fCacheEntry->cachedAtlas()->getOnFlushProxy(),
resources->nextPathInstanceIdx());
// TODO4F: Preserve float colors
@@ -369,10 +359,19 @@
resources->appendDrawPathInstance().set(devBounds, devBounds45, devToAtlasOffset,
fColor.toBytes_RGBA(), doEvenOddFill);
- if (fDoCachePathMask) {
- SkASSERT(fCacheEntry);
+ // If we have a spot in the path cache, try to make a note of where this mask is so we
+ // can reuse it in the future.
+ if (fCacheEntry) {
SkASSERT(!fCacheEntry->cachedAtlas());
- SkASSERT(fShapeConservativeIBounds == fMaskDevIBounds);
+
+ if (Visibility::kComplete != fMaskVisibility || fCacheEntry->hitCount() <= 1) {
+ // Don't cache a path mask unless it's completely visible with a hit count > 1.
+ //
+ // NOTE: mostly-visible paths with a hit count > 1 should have been promoted to
+ // fully visible during accountForOwnPaths().
+ return;
+ }
+
fCacheEntry->setCoverageCountAtlas(onFlushRP, atlas, devToAtlasOffset, devBounds,
devBounds45, devIBounds, fCachedMaskShift);
}
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.h b/src/gpu/ccpr/GrCCDrawPathsOp.h
index 82e05ae..b5d216f 100644
--- a/src/gpu/ccpr/GrCCDrawPathsOp.h
+++ b/src/gpu/ccpr/GrCCDrawPathsOp.h
@@ -73,10 +73,15 @@
float strokeDevWidth,
const SkRect& conservativeDevBounds,
GrPaint&&);
+ enum class Visibility {
+ kPartial,
+ kMostlyComplete, // (i.e., can we cache the whole path mask if we think it will be reused?)
+ kComplete
+ };
GrCCDrawPathsOp(const SkMatrix&, const GrShape&, float strokeDevWidth,
const SkIRect& shapeConservativeIBounds, const SkIRect& maskDevIBounds,
- const SkRect& conservativeDevBounds, GrPaint&&);
+ Visibility maskVisibility, const SkRect& conservativeDevBounds, GrPaint&&);
void recordInstance(GrTextureProxy* atlasProxy, int instanceIdx);
@@ -86,7 +91,7 @@
public:
SingleDraw(const SkMatrix&, const GrShape&, float strokeDevWidth,
const SkIRect& shapeConservativeIBounds, const SkIRect& maskDevIBounds,
- const SkPMColor4f&);
+ Visibility maskVisibility, const SkPMColor4f&);
// See the corresponding methods in GrCCDrawPathsOp.
RequiresDstTexture finalize(const GrCaps&, const GrAppliedClip*, GrProcessorSet*);
@@ -96,19 +101,17 @@
DoCopiesToA8Coverage, GrCCDrawPathsOp*);
private:
- bool shouldCachePathMask(int maxRenderTargetSize) const;
-
SkMatrix fMatrix;
GrShape fShape;
float fStrokeDevWidth;
const SkIRect fShapeConservativeIBounds;
SkIRect fMaskDevIBounds;
+ Visibility fMaskVisibility;
SkPMColor4f fColor;
GrCCPathCache::OnFlushEntryRef fCacheEntry;
SkIVector fCachedMaskShift;
bool fDoCopyToA8Coverage = false;
- bool fDoCachePathMask = false;
SingleDraw* fNext = nullptr;
diff --git a/src/gpu/ccpr/GrCCPathCache.cpp b/src/gpu/ccpr/GrCCPathCache.cpp
index 1342431..c3f6498 100644
--- a/src/gpu/ccpr/GrCCPathCache.cpp
+++ b/src/gpu/ccpr/GrCCPathCache.cpp
@@ -158,9 +158,9 @@
}
-GrCCPathCache::OnFlushEntryRef GrCCPathCache::find(
- GrOnFlushResourceProvider* onFlushRP, const GrShape& shape,
- const SkIRect& clippedDrawBounds, const SkMatrix& viewMatrix, SkIVector* maskShift) {
+GrCCPathCache::OnFlushEntryRef GrCCPathCache::find(GrOnFlushResourceProvider* onFlushRP,
+ const GrShape& shape, const MaskTransform& m,
+ CreateIfAbsent createIfAbsent) {
if (!shape.hasUnstyledKey()) {
return OnFlushEntryRef();
}
@@ -174,7 +174,6 @@
fScratchKey->resetDataCountU32(writeKeyHelper.allocCountU32());
writeKeyHelper.write(shape, fScratchKey->data());
- MaskTransform m(viewMatrix, maskShift);
GrCCPathCacheEntry* entry = nullptr;
if (HashNode* node = fHashTable.find(*fScratchKey)) {
entry = node->entry();
@@ -182,12 +181,11 @@
if (!fuzzy_equals(m, entry->fMaskTransform)) {
// The path was reused with an incompatible matrix.
- if (entry->unique()) {
+ if (CreateIfAbsent::kYes == createIfAbsent && entry->unique()) {
// This entry is unique: recycle it instead of deleting and malloc-ing a new one.
SkASSERT(0 == entry->fOnFlushRefCnt); // Because we are unique.
entry->fMaskTransform = m;
entry->fHitCount = 0;
- entry->fHitRect = SkIRect::MakeEmpty();
entry->releaseCachedAtlas(this);
} else {
this->evict(*fScratchKey);
@@ -197,6 +195,9 @@
}
if (!entry) {
+ if (CreateIfAbsent::kNo == createIfAbsent) {
+ return OnFlushEntryRef();
+ }
if (fHashTable.count() >= kMaxCacheCount) {
SkDEBUGCODE(HashNode* node = fHashTable.find(*fLRU.tail()->fCacheKey));
SkASSERT(node && node->entry() == fLRU.tail());
@@ -240,7 +241,6 @@
}
}
}
- entry->fHitRect.join(clippedDrawBounds.makeOffset(-maskShift->x(), -maskShift->y()));
SkASSERT(!entry->fCachedAtlas || entry->fCachedAtlas->getOnFlushProxy());
return OnFlushEntryRef::OnFlushRef(entry);
}
diff --git a/src/gpu/ccpr/GrCCPathCache.h b/src/gpu/ccpr/GrCCPathCache.h
index 5fa5d4d..8c52bf9 100644
--- a/src/gpu/ccpr/GrCCPathCache.h
+++ b/src/gpu/ccpr/GrCCPathCache.h
@@ -99,15 +99,15 @@
GrCCPathCacheEntry* fEntry = nullptr;
};
- // Finds an entry in the cache that matches the given shape and transformation matrix.
- // 'maskShift' is filled with an integer post-translate that the caller must apply when drawing
- // the entry's mask to the device.
- //
- // NOTE: Shapes are only given one entry, so any time they are accessed with a new
- // transformation, the old entry gets evicted.
- OnFlushEntryRef find(GrOnFlushResourceProvider*, const GrShape&,
- const SkIRect& clippedDrawBounds, const SkMatrix& viewMatrix,
- SkIVector* maskShift);
+ enum class CreateIfAbsent : bool {
+ kNo = false,
+ kYes = true
+ };
+
+ // Finds an entry in the cache. Shapes are only given one entry, so any time they are accessed
+ // with a different MaskTransform, the old entry gets evicted.
+ OnFlushEntryRef find(GrOnFlushResourceProvider*, const GrShape&, const MaskTransform&,
+ CreateIfAbsent = CreateIfAbsent::kNo);
void doPreFlushProcessing();
@@ -204,17 +204,14 @@
const GrCCPathCache::Key& cacheKey() const { SkASSERT(fCacheKey); return *fCacheKey; }
- // The number of flushes during which this specific entry (path + matrix combination) has been
- // pulled from the path cache. If a path is pulled from the cache more than once in a single
- // flush, the hit count is only incremented once.
+ // The number of times this specific entry (path + matrix combination) has been pulled from
+ // the path cache. As long as the caller does exactly one lookup per draw, this translates to
+ // the number of times the path has been drawn with a compatible matrix.
//
- // If the entry did not previously exist, its hit count will be 1.
+ // If the entry did not previously exist and was created during
+ // GrCCPathCache::find(.., CreateIfAbsent::kYes), its hit count will be 1.
int hitCount() const { return fHitCount; }
- // The accumulative region of the path that has been drawn during the lifetime of this cache
- // entry (as defined by the 'clippedDrawBounds' parameter for GrCCPathCache::find).
- const SkIRect& hitRect() const { return fHitRect; }
-
const GrCCCachedAtlas* cachedAtlas() const { return fCachedAtlas.get(); }
const SkIRect& devIBounds() const { return fDevIBounds; }
@@ -254,7 +251,6 @@
sk_sp<GrCCPathCache::Key> fCacheKey;
GrStdSteadyClock::time_point fTimestamp;
int fHitCount = 0;
- SkIRect fHitRect = SkIRect::MakeEmpty();
sk_sp<GrCCCachedAtlas> fCachedAtlas;
SkIVector fAtlasOffset;