ccpr: Don't use cache entries as path listeners
Allocates a separate "Key" object that holds the key and listens on the
path. This eliminates threading concerns and automatically fixes the
recycling logic.
Bug: skia:8452
Change-Id: I9b446ddf1d5da7bc44a8c1c3ff4c4efc0fdc97b1
Reviewed-on: https://skia-review.googlesource.com/c/166100
Reviewed-by: Brian Salomon <bsalomon@google.com>
Commit-Queue: Chris Dalton <csmartdalton@google.com>
diff --git a/src/gpu/ccpr/GrCCPathCache.cpp b/src/gpu/ccpr/GrCCPathCache.cpp
index 839cba4..4816858 100644
--- a/src/gpu/ccpr/GrCCPathCache.cpp
+++ b/src/gpu/ccpr/GrCCPathCache.cpp
@@ -10,7 +10,9 @@
#include "GrShape.h"
#include "SkNx.h"
-DECLARE_SKMESSAGEBUS_MESSAGE(sk_sp<GrCCPathCacheEntry>);
+static constexpr int kMaxKeyDataCountU32 = 256; // 1kB of uint32_t's.
+
+DECLARE_SKMESSAGEBUS_MESSAGE(sk_sp<GrCCPathCache::Key>);
static inline uint32_t next_path_cache_id() {
static std::atomic<uint32_t> gNextID(1);
@@ -23,8 +25,8 @@
}
static inline bool SkShouldPostMessageToBus(
- const sk_sp<GrCCPathCacheEntry>& entry, uint32_t msgBusUniqueID) {
- return entry->pathCacheUniqueID() == msgBusUniqueID;
+ const sk_sp<GrCCPathCache::Key>& key, uint32_t msgBusUniqueID) {
+ return key->pathCacheUniqueID() == msgBusUniqueID;
}
// The maximum number of cache entries we allow in our own cache.
@@ -62,29 +64,105 @@
return true;
}
+sk_sp<GrCCPathCache::Key> GrCCPathCache::Key::Make(uint32_t pathCacheUniqueID,
+ int dataCountU32, const void* data) {
+ void* memory = ::operator new (sizeof(Key) + dataCountU32 * sizeof(uint32_t));
+ sk_sp<GrCCPathCache::Key> key(new (memory) Key(pathCacheUniqueID, dataCountU32));
+ if (data) {
+ memcpy(key->data(), data, key->dataSizeInBytes());
+ }
+ return key;
+}
+
+const uint32_t* GrCCPathCache::Key::data() const {
+ // The shape key is a variable-length footer to the entry allocation.
+ return reinterpret_cast<const uint32_t*>(reinterpret_cast<const char*>(this) + sizeof(Key));
+}
+
+uint32_t* GrCCPathCache::Key::data() {
+ // The shape key is a variable-length footer to the entry allocation.
+ return reinterpret_cast<uint32_t*>(reinterpret_cast<char*>(this) + sizeof(Key));
+}
+
+inline bool GrCCPathCache::Key::operator==(const GrCCPathCache::Key& that) const {
+ return fDataSizeInBytes == that.fDataSizeInBytes &&
+ !memcmp(this->data(), that.data(), fDataSizeInBytes);
+}
+
+void GrCCPathCache::Key::onChange() {
+ // Our key's corresponding path was invalidated. Post a thread-safe eviction message.
+ SkMessageBus<sk_sp<Key>>::Post(sk_ref_sp(this));
+}
+
+inline const GrCCPathCache::Key& GrCCPathCache::HashNode::GetKey(
+ const GrCCPathCache::HashNode& node) {
+ return *node.entry()->fCacheKey;
+}
+
+inline uint32_t GrCCPathCache::HashNode::Hash(const Key& key) {
+ return GrResourceKeyHash(key.data(), key.dataSizeInBytes());
+}
+
+inline GrCCPathCache::HashNode::HashNode(GrCCPathCache* pathCache, sk_sp<Key> key,
+ const MaskTransform& m, const GrShape& shape)
+ : fPathCache(pathCache)
+ , fEntry(new GrCCPathCacheEntry(key, m)) {
+ SkASSERT(shape.hasUnstyledKey());
+ shape.addGenIDChangeListener(std::move(key));
+}
+
+inline GrCCPathCache::HashNode::~HashNode() {
+ this->willExitHashTable();
+}
+
+inline GrCCPathCache::HashNode& GrCCPathCache::HashNode::operator=(HashNode&& node) {
+ this->willExitHashTable();
+ fPathCache = node.fPathCache;
+ fEntry = std::move(node.fEntry);
+ SkASSERT(!node.fEntry);
+ return *this;
+}
+
+inline void GrCCPathCache::HashNode::willExitHashTable() {
+ if (!fEntry) {
+ return; // We were moved.
+ }
+
+ SkASSERT(fPathCache);
+ SkASSERT(fPathCache->fLRU.isInList(fEntry.get()));
+
+ fEntry->fCacheKey->markShouldUnregisterFromPath(); // Unregister the path listener.
+ fPathCache->fLRU.remove(fEntry.get());
+}
+
+
+GrCCPathCache::GrCCPathCache()
+ : fInvalidatedKeysInbox(next_path_cache_id())
+ , fScratchKey(Key::Make(fInvalidatedKeysInbox.uniqueID(), kMaxKeyDataCountU32)) {
+}
+
+GrCCPathCache::~GrCCPathCache() {
+ fHashTable.reset(); // Must be cleared first; ~HashNode calls fLRU.remove() on us.
+ SkASSERT(fLRU.isEmpty()); // Ensure the hash table and LRU list were coherent.
+}
+
namespace {
// Produces a key that accounts both for a shape's path geometry, as well as any stroke/style.
-class WriteStyledKey {
+class WriteKeyHelper {
public:
- static constexpr int kStyledKeySizeInBytesIdx = 0;
- static constexpr int kStrokeWidthIdx = 1;
- static constexpr int kStrokeMiterIdx = 2;
- static constexpr int kStrokeCapJoinIdx = 3;
- static constexpr int kShapeUnstyledKeyIdx = 4;
+ static constexpr int kStrokeWidthIdx = 0;
+ static constexpr int kStrokeMiterIdx = 1;
+ static constexpr int kStrokeCapJoinIdx = 2;
+ static constexpr int kShapeUnstyledKeyIdx = 3;
- static constexpr int kStrokeKeyCount = 3; // [width, miterLimit, cap|join].
-
- WriteStyledKey(const GrShape& shape) : fShapeUnstyledKeyCount(shape.unstyledKeySize()) {}
+ WriteKeyHelper(const GrShape& shape) : fShapeUnstyledKeyCount(shape.unstyledKeySize()) {}
// Returns the total number of uint32_t's to allocate for the key.
int allocCountU32() const { return kShapeUnstyledKeyIdx + fShapeUnstyledKeyCount; }
- // Writes the key to out[].
+ // Writes the key data to out[].
void write(const GrShape& shape, uint32_t* out) {
- out[kStyledKeySizeInBytesIdx] =
- (kStrokeKeyCount + fShapeUnstyledKeyCount) * sizeof(uint32_t);
-
// Stroke key.
// We don't use GrStyle::WriteKey() because it does not account for hairlines.
// http://skbug.com/8273
@@ -112,94 +190,23 @@
}
-inline bool operator==(const GrCCPathCache::HashKey& key1, const GrCCPathCache::HashKey& key2) {
- return key1.fData[0] == key2.fData[0] && !memcmp(&key1.fData[1], &key2.fData[1], key1.fData[0]);
-}
-
-inline GrCCPathCache::HashKey GrCCPathCache::HashNode::GetKey(const GrCCPathCacheEntry* entry) {
- // The shape key is a variable-length footer to the entry allocation.
- return HashKey{(const uint32_t*)((const char*)entry + sizeof(GrCCPathCacheEntry))};
-}
-
-inline uint32_t GrCCPathCache::HashNode::Hash(HashKey key) {
- return GrResourceKeyHash(&key.fData[1], key.fData[0]);
-}
-
-inline GrCCPathCache::HashNode::HashNode(GrCCPathCache* pathCache, const MaskTransform& m,
- const GrShape& shape)
- : fPathCache(pathCache) {
- SkASSERT(SkGetThreadID() == fPathCache->fGraphicsThreadID);
- SkASSERT(shape.hasUnstyledKey());
-
- WriteStyledKey writeKey(shape);
- void* mem = ::operator new (sizeof(GrCCPathCacheEntry) +
- writeKey.allocCountU32() * sizeof(uint32_t));
- fEntry.reset(new (mem) GrCCPathCacheEntry(fPathCache->fInvalidatedEntriesInbox.uniqueID(), m));
-
- // The shape key is a variable-length footer to the entry allocation.
- uint32_t* keyData = (uint32_t*)((char*)mem + sizeof(GrCCPathCacheEntry));
- writeKey.write(shape, keyData);
-}
-
-inline GrCCPathCache::HashNode::~HashNode() {
- this->willExitHashTable();
-}
-
-inline GrCCPathCache::HashNode& GrCCPathCache::HashNode::operator=(HashNode&& node) {
- this->willExitHashTable();
- fPathCache = node.fPathCache;
- fEntry = std::move(node.fEntry);
- SkASSERT(!node.fEntry);
- return *this;
-}
-
-inline void GrCCPathCache::HashNode::willExitHashTable() {
- if (!fEntry) {
- return; // We were moved.
- }
-
- SkASSERT(fPathCache);
- SkASSERT(SkGetThreadID() == fPathCache->fGraphicsThreadID);
- SkASSERT(fPathCache->fLRU.isInList(fEntry.get()));
-
- fEntry->invalidateAtlas(); // Must happen on graphics thread.
- fEntry->markShouldUnregisterFromPath();
- fPathCache->fLRU.remove(fEntry.get());
-}
-
-
-GrCCPathCache::GrCCPathCache()
- : fInvalidatedEntriesInbox(next_path_cache_id()) {
-}
-
-GrCCPathCache::~GrCCPathCache() {
- // DDL threads never use the cache, in which case it doesn't matter what thread we
- // clean up on.
- SkASSERT(kIllegalThreadID == fGraphicsThreadID || SkGetThreadID() == fGraphicsThreadID);
-
- fHashTable.reset(); // Must be cleared first; ~HashNode calls fLRU.remove() on us.
- SkASSERT(fLRU.isEmpty()); // Ensure the hash table and LRU list were coherent.
-}
-
sk_sp<GrCCPathCacheEntry> GrCCPathCache::find(const GrShape& shape, const MaskTransform& m,
CreateIfAbsent createIfAbsent) {
-#ifdef SK_DEBUG
- if (kIllegalThreadID == fGraphicsThreadID) {
- fGraphicsThreadID = SkGetThreadID();
- }
-#endif
- SkASSERT(SkGetThreadID() == fGraphicsThreadID);
-
if (!shape.hasUnstyledKey()) {
return nullptr;
}
- WriteStyledKey writeKey(shape);
- SkAutoSTMalloc<GrShape::kMaxKeyFromDataVerbCnt * 4, uint32_t> keyData(writeKey.allocCountU32());
- writeKey.write(shape, keyData.get());
+ WriteKeyHelper writeKeyHelper(shape);
+ if (writeKeyHelper.allocCountU32() > kMaxKeyDataCountU32) {
+ return nullptr;
+ }
+
+ SkASSERT(fScratchKey->unique());
+ fScratchKey->resetDataCountU32(writeKeyHelper.allocCountU32());
+ writeKeyHelper.write(shape, fScratchKey->data());
GrCCPathCacheEntry* entry = nullptr;
- if (HashNode* node = fHashTable.find({keyData.get()})) {
+ if (HashNode* node = fHashTable.find(*fScratchKey)) {
entry = node->entry();
SkASSERT(fLRU.isInList(entry));
if (fuzzy_equals(m, entry->fMaskTransform)) {
@@ -211,7 +218,7 @@
entry->invalidateAtlas();
SkASSERT(!entry->fCurrFlushAtlas); // Should be null because 'entry' is unique.
} else {
- this->evict(entry);
+ this->evict(*fScratchKey);
entry = nullptr;
}
}
@@ -221,62 +228,45 @@
return nullptr;
}
if (fHashTable.count() >= kMaxCacheCount) {
- this->evict(fLRU.tail()); // We've exceeded our limit.
+ SkDEBUGCODE(HashNode* node = fHashTable.find(*fLRU.tail()->fCacheKey));
+ SkASSERT(node && node->entry() == fLRU.tail());
+ this->evict(*fLRU.tail()->fCacheKey); // We've exceeded our limit.
}
- SkASSERT(!fHashTable.find({keyData.get()}));
- entry = fHashTable.set(HashNode(this, m, shape))->entry();
- shape.addGenIDChangeListener(sk_ref_sp(entry));
+
+ // Create a new entry in the cache.
+ sk_sp<Key> permanentKey = Key::Make(fInvalidatedKeysInbox.uniqueID(),
+ writeKeyHelper.allocCountU32(), fScratchKey->data());
+ SkASSERT(*permanentKey == *fScratchKey);
+ SkASSERT(!fHashTable.find(*permanentKey));
+ entry = fHashTable.set(HashNode(this, std::move(permanentKey), m, shape))->entry();
+
SkASSERT(fHashTable.count() <= kMaxCacheCount);
} else {
fLRU.remove(entry); // Will be re-added at head.
}
- SkDEBUGCODE(HashNode* node = fHashTable.find(HashNode::GetKey(entry)));
+ SkDEBUGCODE(HashNode* node = fHashTable.find(*fScratchKey));
SkASSERT(node && node->entry() == entry);
fLRU.addToHead(entry);
return sk_ref_sp(entry);
}
-void GrCCPathCache::evict(GrCCPathCacheEntry* entry) {
- // Has the entry already been evicted? (We mark "shouldUnregisterFromPath" upon eviction.)
- bool isInCache = !entry->shouldUnregisterFromPath();
- SkDEBUGCODE(HashNode* entryKeyNode = fHashTable.find(HashNode::GetKey(entry)));
- SkASSERT((entryKeyNode && entryKeyNode->entry() == entry) == isInCache);
- SkASSERT(fLRU.isInList(entry) == isInCache);
-
- if (isInCache) {
- fHashTable.remove(HashNode::GetKey(entry));
- // HashNode::willExitHashTable() takes care of the rest.
- }
-}
-
void GrCCPathCache::purgeAsNeeded() {
- SkTArray<sk_sp<GrCCPathCacheEntry>> invalidatedEntries;
- fInvalidatedEntriesInbox.poll(&invalidatedEntries);
- for (const sk_sp<GrCCPathCacheEntry>& entry : invalidatedEntries) {
- this->evict(entry.get());
+ SkTArray<sk_sp<Key>> invalidatedKeys;
+ fInvalidatedKeysInbox.poll(&invalidatedKeys);
+ for (const sk_sp<Key>& key : invalidatedKeys) {
+ bool isInCache = !key->shouldUnregisterFromPath(); // Gets set upon exiting the cache.
+ if (isInCache) {
+ this->evict(*key);
+ }
}
}
-GrCCPathCacheEntry::GrCCPathCacheEntry(uint32_t pathCacheUniqueID,
- const MaskTransform& maskTransform)
- : fPathCacheUniqueID(pathCacheUniqueID), fMaskTransform(maskTransform) {
- SkASSERT(SK_InvalidUniqueID != fPathCacheUniqueID);
- SkDEBUGCODE(fGraphicsThreadID = SkGetThreadID());
-}
-
-GrCCPathCacheEntry::~GrCCPathCacheEntry() {
- // This might get called from a different thread.
- SkASSERT(!fCachedAtlasInfo); // Should have been cleared when the entry was evicted.
- SkASSERT(!fCurrFlushAtlas); // Client is required to reset fCurrFlushAtlas back to null.
-}
-
void GrCCPathCacheEntry::initAsStashedAtlas(const GrUniqueKey& atlasKey,
const SkIVector& atlasOffset, const SkRect& devBounds,
const SkRect& devBounds45, const SkIRect& devIBounds,
const SkIVector& maskShift) {
- SkASSERT(SkGetThreadID() == fGraphicsThreadID);
SkASSERT(atlasKey.isValid());
SkASSERT(!fCurrFlushAtlas); // Otherwise we should reuse the atlas from last time.
@@ -293,7 +283,6 @@
void GrCCPathCacheEntry::updateToCachedAtlas(const GrUniqueKey& atlasKey,
const SkIVector& newAtlasOffset,
sk_sp<GrCCAtlas::CachedAtlasInfo> info) {
- SkASSERT(SkGetThreadID() == fGraphicsThreadID);
SkASSERT(atlasKey.isValid());
SkASSERT(!fCurrFlushAtlas); // Otherwise we should reuse the atlas from last time.
@@ -306,8 +295,6 @@
}
void GrCCPathCacheEntry::invalidateAtlas() {
- SkASSERT(SkGetThreadID() == fGraphicsThreadID);
-
if (fCachedAtlasInfo) {
// Mark our own pixels invalid in the cached atlas texture.
fCachedAtlasInfo->fNumInvalidatedPathPixels += this->height() * this->width();
@@ -324,8 +311,3 @@
fAtlasKey.reset();
fCachedAtlasInfo = nullptr;
}
-
-void GrCCPathCacheEntry::onChange() {
- // Post a thread-safe eviction message.
- SkMessageBus<sk_sp<GrCCPathCacheEntry>>::Post(sk_ref_sp(this));
-}
diff --git a/src/gpu/ccpr/GrCCPathCache.h b/src/gpu/ccpr/GrCCPathCache.h
index c0e3cef..16f31d0 100644
--- a/src/gpu/ccpr/GrCCPathCache.h
+++ b/src/gpu/ccpr/GrCCPathCache.h
@@ -27,6 +27,42 @@
GrCCPathCache();
~GrCCPathCache();
+ class Key : public SkPathRef::GenIDChangeListener {
+ public:
+ static sk_sp<Key> Make(uint32_t pathCacheUniqueID, int dataCountU32,
+ const void* data = nullptr);
+
+ uint32_t pathCacheUniqueID() const { return fPathCacheUniqueID; }
+
+ int dataSizeInBytes() const { return fDataSizeInBytes; }
+ const uint32_t* data() const;
+
+ void resetDataCountU32(int dataCountU32) {
+ SkASSERT(dataCountU32 <= fDataReserveCountU32);
+ fDataSizeInBytes = dataCountU32 * sizeof(uint32_t);
+ }
+ uint32_t* data();
+
+ bool operator==(const Key&) const;
+
+ // Called when our corresponding path is modified or deleted. Not threadsafe.
+ void onChange() override;
+
+ private:
+ Key(uint32_t pathCacheUniqueID, int dataCountU32)
+ : fPathCacheUniqueID(pathCacheUniqueID)
+ , fDataSizeInBytes(dataCountU32 * sizeof(uint32_t))
+ SkDEBUGCODE(, fDataReserveCountU32(dataCountU32)) {
+ SkASSERT(SK_InvalidUniqueID != fPathCacheUniqueID);
+ }
+
+ const uint32_t fPathCacheUniqueID;
+ int fDataSizeInBytes;
+ SkDEBUGCODE(const int fDataReserveCountU32);
+ // The GrShape's unstyled key is stored as a variable-length footer to this class. GetKey
+ // provides access to it.
+ };
+
// Stores the components of a transformation that affect a path mask (i.e. everything but
// integer translation). During construction, any integer portions of the matrix's translate are
// shaved off and returned to the caller. The caller is responsible for those integer shifts.
@@ -50,29 +86,20 @@
sk_sp<GrCCPathCacheEntry> find(const GrShape&, const MaskTransform&,
CreateIfAbsent = CreateIfAbsent::kNo);
- void evict(GrCCPathCacheEntry*);
-
void purgeAsNeeded();
private:
- // Wrapper around a raw GrShape key that has a specialized operator==. Used by the hash table.
- struct HashKey {
- const uint32_t* fData;
- };
- friend bool operator==(const HashKey&, const HashKey&);
-
// This is a special ref ptr for GrCCPathCacheEntry, used by the hash table. It provides static
// methods for SkTHash, and can only be moved. This guarantees the hash table holds exactly one
// reference for each entry. Also, when a HashNode goes out of scope, that means it is exiting
// the hash table. We take that opportunity to remove it from the LRU list and do some cleanup.
class HashNode : SkNoncopyable {
public:
- static HashKey GetKey(const HashNode& node) { return GetKey(node.entry()); }
- static HashKey GetKey(const GrCCPathCacheEntry*);
- static uint32_t Hash(HashKey);
+ static const Key& GetKey(const HashNode&);
+ static uint32_t Hash(const Key&);
HashNode() = default;
- HashNode(GrCCPathCache*, const MaskTransform&, const GrShape&);
+ HashNode(GrCCPathCache*, sk_sp<Key>, const MaskTransform&, const GrShape&);
HashNode(HashNode&& node)
: fPathCache(node.fPathCache), fEntry(std::move(node.fEntry)) {
SkASSERT(!node.fEntry);
@@ -89,28 +116,30 @@
GrCCPathCache* fPathCache = nullptr;
sk_sp<GrCCPathCacheEntry> fEntry;
- // The GrShape's unstyled key is stored as a variable-length footer to the 'fEntry'
- // allocation. GetKey provides access to it.
};
- SkTHashTable<HashNode, HashKey> fHashTable;
- SkTInternalLList<GrCCPathCacheEntry> fLRU;
- SkMessageBus<sk_sp<GrCCPathCacheEntry>>::Inbox fInvalidatedEntriesInbox;
+ void evict(const GrCCPathCache::Key& key) {
+ fHashTable.remove(key); // HashNode::willExitHashTable() takes care of the rest.
+ }
- SkDEBUGCODE(SkThreadID fGraphicsThreadID = kIllegalThreadID);
+ SkTHashTable<HashNode, const GrCCPathCache::Key&> fHashTable;
+ SkTInternalLList<GrCCPathCacheEntry> fLRU;
+ SkMessageBus<sk_sp<Key>>::Inbox fInvalidatedKeysInbox;
+ sk_sp<Key> fScratchKey; // Reused for creating a temporary key in the find() method.
};
/**
* This class stores all the data necessary to draw a specific path + matrix combination from their
* corresponding cached atlas.
*/
-class GrCCPathCacheEntry : public SkPathRef::GenIDChangeListener {
+class GrCCPathCacheEntry : public GrNonAtomicRef<GrCCPathCacheEntry> {
public:
SK_DECLARE_INTERNAL_LLIST_INTERFACE(GrCCPathCacheEntry);
- ~GrCCPathCacheEntry() override;
-
- uint32_t pathCacheUniqueID() const { return fPathCacheUniqueID; }
+ ~GrCCPathCacheEntry() {
+ SkASSERT(!fCurrFlushAtlas); // Client is required to reset fCurrFlushAtlas back to null.
+ this->invalidateAtlas();
+ }
// The number of times this specific entry (path + matrix combination) has been pulled from
// the path cache. As long as the caller does exactly one lookup per draw, this translates to
@@ -162,16 +191,16 @@
private:
using MaskTransform = GrCCPathCache::MaskTransform;
- GrCCPathCacheEntry(uint32_t pathCacheUniqueID, const MaskTransform&);
+ GrCCPathCacheEntry(sk_sp<GrCCPathCache::Key> cacheKey, const MaskTransform& maskTransform)
+ : fCacheKey(std::move(cacheKey)), fMaskTransform(maskTransform) {
+ }
// Resets this entry back to not having an atlas, and purges its previous atlas texture from the
// resource cache if needed.
void invalidateAtlas();
- // Called when our corresponding path is modified or deleted. Not threadsafe.
- void onChange() override;
+ sk_sp<GrCCPathCache::Key> fCacheKey;
- const uint32_t fPathCacheUniqueID;
MaskTransform fMaskTransform;
int fHitCount = 1;
@@ -188,8 +217,6 @@
// This field is for when a path gets drawn more than once during the same flush.
const GrCCAtlas* fCurrFlushAtlas = nullptr;
- SkDEBUGCODE(SkThreadID fGraphicsThreadID);
-
friend class GrCCPathCache;
friend void GrCCPathProcessor::Instance::set(const GrCCPathCacheEntry&, const SkIVector&,
GrColor, DoEvenOddFill); // To access data.