ccpr: Don't use the GrContext id for the path cache id
Currently this is probably safe, but somebody could easily make a
change in the future that adds another path cache to the context and
accidentally introduces tricky bugs.
TBR=brianosman@google.com
Bug: chromium:897510
Bug: chromium:897413
Bug: chromium:897245
Bug: chromium:897507
Change-Id: I6bc40ac671058f78eb290dd775612d99008d32e7
Reviewed-on: https://skia-review.googlesource.com/c/164700
Reviewed-by: Chris Dalton <csmartdalton@google.com>
Commit-Queue: Chris Dalton <csmartdalton@google.com>
diff --git a/src/gpu/GrPathRendererChain.cpp b/src/gpu/GrPathRendererChain.cpp
index 93741da..60c935a 100644
--- a/src/gpu/GrPathRendererChain.cpp
+++ b/src/gpu/GrPathRendererChain.cpp
@@ -37,8 +37,7 @@
if (options.fGpuPathRenderers & GpuPathRenderers::kCoverageCounting) {
using AllowCaching = GrCoverageCountingPathRenderer::AllowCaching;
if (auto ccpr = GrCoverageCountingPathRenderer::CreateIfSupported(
- caps, AllowCaching(options.fAllowPathMaskCaching),
- context->uniqueID())) {
+ caps, AllowCaching(options.fAllowPathMaskCaching))) {
fCoverageCountingPathRenderer = ccpr.get();
context->contextPriv().addOnFlushCallbackObject(fCoverageCountingPathRenderer);
fChain.push_back(std::move(ccpr));
diff --git a/src/gpu/ccpr/GrCCAtlas.cpp b/src/gpu/ccpr/GrCCAtlas.cpp
index 4ef632a..a566707 100644
--- a/src/gpu/ccpr/GrCCAtlas.cpp
+++ b/src/gpu/ccpr/GrCCAtlas.cpp
@@ -166,10 +166,11 @@
return fUniqueKey;
}
-sk_sp<GrCCAtlas::CachedAtlasInfo> GrCCAtlas::refOrMakeCachedAtlasInfo() {
+sk_sp<GrCCAtlas::CachedAtlasInfo> GrCCAtlas::refOrMakeCachedAtlasInfo(uint32_t contextUniqueID) {
if (!fCachedAtlasInfo) {
- fCachedAtlasInfo = sk_make_sp<CachedAtlasInfo>();
+ fCachedAtlasInfo = sk_make_sp<CachedAtlasInfo>(contextUniqueID);
}
+ SkASSERT(fCachedAtlasInfo->fContextUniqueID == contextUniqueID);
return fCachedAtlasInfo;
}
diff --git a/src/gpu/ccpr/GrCCAtlas.h b/src/gpu/ccpr/GrCCAtlas.h
index 6412895..4a762bc 100644
--- a/src/gpu/ccpr/GrCCAtlas.h
+++ b/src/gpu/ccpr/GrCCAtlas.h
@@ -74,11 +74,13 @@
// potentially be reused (i.e., those which still represent an extant path). When the percentage
// of useful pixels drops below 50%, the entire texture is purged from the resource cache.
struct CachedAtlasInfo : public GrNonAtomicRef<CachedAtlasInfo> {
+ CachedAtlasInfo(uint32_t contextUniqueID) : fContextUniqueID(contextUniqueID) {}
+ const uint32_t fContextUniqueID;
int fNumPathPixels = 0;
int fNumInvalidatedPathPixels = 0;
bool fIsPurgedFromResourceCache = false;
};
- sk_sp<CachedAtlasInfo> refOrMakeCachedAtlasInfo();
+ sk_sp<CachedAtlasInfo> refOrMakeCachedAtlasInfo(uint32_t contextUniqueID);
// Instantiates our texture proxy for the atlas and returns a pre-cleared GrRenderTargetContext
// that the caller may use to render the content. After this call, it is no longer valid to call
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.cpp b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
index 1dae085..4b3780d 100644
--- a/src/gpu/ccpr/GrCCDrawPathsOp.cpp
+++ b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
@@ -345,8 +345,9 @@
SkIVector newOffset;
GrCCAtlas* atlas =
resources->copyPathToCachedAtlas(*cacheEntry, doEvenOddFill, &newOffset);
- cacheEntry->updateToCachedAtlas(atlas->getOrAssignUniqueKey(onFlushRP), newOffset,
- atlas->refOrMakeCachedAtlasInfo());
+ cacheEntry->updateToCachedAtlas(
+ atlas->getOrAssignUniqueKey(onFlushRP), newOffset,
+ atlas->refOrMakeCachedAtlasInfo(onFlushRP->contextUniqueID()));
this->recordInstance(atlas->textureProxy(), resources->nextPathInstanceIdx());
resources->appendDrawPathInstance().set(*cacheEntry, draw.fCachedMaskShift,
draw.fColor);
diff --git a/src/gpu/ccpr/GrCCPathCache.cpp b/src/gpu/ccpr/GrCCPathCache.cpp
index db85641..6d37b4f 100644
--- a/src/gpu/ccpr/GrCCPathCache.cpp
+++ b/src/gpu/ccpr/GrCCPathCache.cpp
@@ -12,6 +12,16 @@
DECLARE_SKMESSAGEBUS_MESSAGE(sk_sp<GrCCPathCacheEntry>);
+static inline uint32_t next_path_cache_id() {
+ static std::atomic<uint32_t> gNextID(1);
+ for (;;) {
+ uint32_t id = gNextID.fetch_add(+1, std::memory_order_acquire);
+ if (SK_InvalidUniqueID != id) {
+ return id;
+ }
+ }
+}
+
static inline bool SkShouldPostMessageToBus(
const sk_sp<GrCCPathCacheEntry>& entry, uint32_t msgBusUniqueID) {
return entry->pathCacheUniqueID() == msgBusUniqueID;
@@ -20,6 +30,7 @@
// The maximum number of cache entries we allow in our own cache.
static constexpr int kMaxCacheCount = 1 << 16;
+
GrCCPathCache::MaskTransform::MaskTransform(const SkMatrix& m, SkIVector* shift)
: fMatrix2x2{m.getScaleX(), m.getSkewX(), m.getSkewY(), m.getScaleY()} {
SkASSERT(!m.hasPerspective());
@@ -128,6 +139,11 @@
return GrResourceKeyHash(&key.fData[1], key.fData[0]);
}
+
+GrCCPathCache::GrCCPathCache()
+ : fInvalidatedEntriesInbox(next_path_cache_id()) {
+}
+
#ifdef SK_DEBUG
GrCCPathCache::~GrCCPathCache() {
// Ensure the hash table and LRU list are still coherent.
@@ -248,9 +264,8 @@
fCachedAtlasInfo->fNumInvalidatedPathPixels >= fCachedAtlasInfo->fNumPathPixels / 2) {
// Too many invalidated pixels: purge the atlas texture from the resource cache.
// The GrContext and CCPR path cache both share the same unique ID.
- uint32_t contextUniqueID = fPathCacheUniqueID;
SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(
- GrUniqueKeyInvalidatedMessage(fAtlasKey, contextUniqueID));
+ GrUniqueKeyInvalidatedMessage(fAtlasKey, fCachedAtlasInfo->fContextUniqueID));
fCachedAtlasInfo->fIsPurgedFromResourceCache = true;
}
}
diff --git a/src/gpu/ccpr/GrCCPathCache.h b/src/gpu/ccpr/GrCCPathCache.h
index e8ce928..54a835a 100644
--- a/src/gpu/ccpr/GrCCPathCache.h
+++ b/src/gpu/ccpr/GrCCPathCache.h
@@ -24,7 +24,7 @@
*/
class GrCCPathCache {
public:
- GrCCPathCache(uint32_t contextUniqueID) : fInvalidatedEntriesInbox(contextUniqueID) {}
+ GrCCPathCache();
SkDEBUGCODE(~GrCCPathCache();)
// Stores the components of a transformation that affect a path mask (i.e. everything but
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
index 50eb1eb..55065e6 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
@@ -30,16 +30,15 @@
}
sk_sp<GrCoverageCountingPathRenderer> GrCoverageCountingPathRenderer::CreateIfSupported(
- const GrCaps& caps, AllowCaching allowCaching, uint32_t contextUniqueID) {
+ const GrCaps& caps, AllowCaching allowCaching) {
return sk_sp<GrCoverageCountingPathRenderer>((IsSupported(caps))
- ? new GrCoverageCountingPathRenderer(allowCaching, contextUniqueID)
+ ? new GrCoverageCountingPathRenderer(allowCaching)
: nullptr);
}
-GrCoverageCountingPathRenderer::GrCoverageCountingPathRenderer(AllowCaching allowCaching,
- uint32_t contextUniqueID) {
+GrCoverageCountingPathRenderer::GrCoverageCountingPathRenderer(AllowCaching allowCaching) {
if (AllowCaching::kYes == allowCaching) {
- fPathCache = skstd::make_unique<GrCCPathCache>(contextUniqueID);
+ fPathCache = skstd::make_unique<GrCCPathCache>();
}
}
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
index 1457e9b..19e42a9 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
@@ -34,8 +34,7 @@
kYes = true
};
- static sk_sp<GrCoverageCountingPathRenderer> CreateIfSupported(const GrCaps&, AllowCaching,
- uint32_t contextUniqueID);
+ static sk_sp<GrCoverageCountingPathRenderer> CreateIfSupported(const GrCaps&, AllowCaching);
using PendingPathsMap = std::map<uint32_t, sk_sp<GrCCPerOpListPaths>>;
@@ -83,7 +82,7 @@
float* inflationRadius = nullptr);
private:
- GrCoverageCountingPathRenderer(AllowCaching, uint32_t contextUniqueID);
+ GrCoverageCountingPathRenderer(AllowCaching);
// GrPathRenderer overrides.
StencilSupport onGetStencilSupport(const GrShape&) const override {
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp
index 5de32aa..ebc12da 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp
@@ -12,7 +12,7 @@
}
sk_sp<GrCoverageCountingPathRenderer> GrCoverageCountingPathRenderer::CreateIfSupported(
- const GrCaps& caps, AllowCaching allowCaching, uint32_t contextUniqueID) {
+ const GrCaps& caps, AllowCaching allowCaching) {
return nullptr;
}