blob: 4816858986a930f92f90adfc81845e70161dbaeb [file] [log] [blame]
Chris Dalton4da70192018-06-18 09:51:36 -06001/*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GrCCPathCache.h"
9
10#include "GrShape.h"
11#include "SkNx.h"
Chris Dalton4da70192018-06-18 09:51:36 -060012
Chris Dalton9985a272018-10-30 14:29:39 -060013static constexpr int kMaxKeyDataCountU32 = 256; // 1kB of uint32_t's.
14
15DECLARE_SKMESSAGEBUS_MESSAGE(sk_sp<GrCCPathCache::Key>);
Chris Dalton9a986cf2018-10-18 15:27:59 -060016
Chris Dalton8429c792018-10-23 15:56:22 -060017static inline uint32_t next_path_cache_id() {
18 static std::atomic<uint32_t> gNextID(1);
19 for (;;) {
20 uint32_t id = gNextID.fetch_add(+1, std::memory_order_acquire);
21 if (SK_InvalidUniqueID != id) {
22 return id;
23 }
24 }
25}
26
Chris Dalton9a986cf2018-10-18 15:27:59 -060027static inline bool SkShouldPostMessageToBus(
Chris Dalton9985a272018-10-30 14:29:39 -060028 const sk_sp<GrCCPathCache::Key>& key, uint32_t msgBusUniqueID) {
29 return key->pathCacheUniqueID() == msgBusUniqueID;
Chris Dalton9a986cf2018-10-18 15:27:59 -060030}
31
Chris Dalton4da70192018-06-18 09:51:36 -060032// The maximum number of cache entries we allow in our own cache.
33static constexpr int kMaxCacheCount = 1 << 16;
34
Chris Dalton8429c792018-10-23 15:56:22 -060035
Chris Dalton4da70192018-06-18 09:51:36 -060036GrCCPathCache::MaskTransform::MaskTransform(const SkMatrix& m, SkIVector* shift)
37 : fMatrix2x2{m.getScaleX(), m.getSkewX(), m.getSkewY(), m.getScaleY()} {
38 SkASSERT(!m.hasPerspective());
39 Sk2f translate = Sk2f(m.getTranslateX(), m.getTranslateY());
Chris Dalton76c775f2018-10-01 23:08:06 -060040 Sk2f transFloor;
41#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
42 // On Android framework we pre-round view matrix translates to integers for better caching.
43 transFloor = translate;
44#else
45 transFloor = translate.floor();
46 (translate - transFloor).store(fSubpixelTranslate);
Chris Dalton644341a2018-06-18 19:14:16 -060047#endif
Chris Dalton76c775f2018-10-01 23:08:06 -060048 shift->set((int)transFloor[0], (int)transFloor[1]);
49 SkASSERT((float)shift->fX == transFloor[0]); // Make sure transFloor had integer values.
50 SkASSERT((float)shift->fY == transFloor[1]);
Chris Dalton4da70192018-06-18 09:51:36 -060051}
52
53inline static bool fuzzy_equals(const GrCCPathCache::MaskTransform& a,
54 const GrCCPathCache::MaskTransform& b) {
Chris Dalton644341a2018-06-18 19:14:16 -060055 if ((Sk4f::Load(a.fMatrix2x2) != Sk4f::Load(b.fMatrix2x2)).anyTrue()) {
56 return false;
57 }
58#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
59 if (((Sk2f::Load(a.fSubpixelTranslate) -
60 Sk2f::Load(b.fSubpixelTranslate)).abs() > 1.f/256).anyTrue()) {
61 return false;
62 }
63#endif
64 return true;
Chris Dalton4da70192018-06-18 09:51:36 -060065}
66
Chris Dalton9985a272018-10-30 14:29:39 -060067sk_sp<GrCCPathCache::Key> GrCCPathCache::Key::Make(uint32_t pathCacheUniqueID,
68 int dataCountU32, const void* data) {
69 void* memory = ::operator new (sizeof(Key) + dataCountU32 * sizeof(uint32_t));
70 sk_sp<GrCCPathCache::Key> key(new (memory) Key(pathCacheUniqueID, dataCountU32));
71 if (data) {
72 memcpy(key->data(), data, key->dataSizeInBytes());
73 }
74 return key;
75}
76
77const uint32_t* GrCCPathCache::Key::data() const {
78 // The shape key is a variable-length footer to the entry allocation.
79 return reinterpret_cast<const uint32_t*>(reinterpret_cast<const char*>(this) + sizeof(Key));
80}
81
82uint32_t* GrCCPathCache::Key::data() {
83 // The shape key is a variable-length footer to the entry allocation.
84 return reinterpret_cast<uint32_t*>(reinterpret_cast<char*>(this) + sizeof(Key));
85}
86
87inline bool GrCCPathCache::Key::operator==(const GrCCPathCache::Key& that) const {
88 return fDataSizeInBytes == that.fDataSizeInBytes &&
89 !memcmp(this->data(), that.data(), fDataSizeInBytes);
90}
91
92void GrCCPathCache::Key::onChange() {
93 // Our key's corresponding path was invalidated. Post a thread-safe eviction message.
94 SkMessageBus<sk_sp<Key>>::Post(sk_ref_sp(this));
95}
96
97inline const GrCCPathCache::Key& GrCCPathCache::HashNode::GetKey(
98 const GrCCPathCache::HashNode& node) {
99 return *node.entry()->fCacheKey;
100}
101
102inline uint32_t GrCCPathCache::HashNode::Hash(const Key& key) {
103 return GrResourceKeyHash(key.data(), key.dataSizeInBytes());
104}
105
106inline GrCCPathCache::HashNode::HashNode(GrCCPathCache* pathCache, sk_sp<Key> key,
107 const MaskTransform& m, const GrShape& shape)
108 : fPathCache(pathCache)
109 , fEntry(new GrCCPathCacheEntry(key, m)) {
110 SkASSERT(shape.hasUnstyledKey());
111 shape.addGenIDChangeListener(std::move(key));
112}
113
114inline GrCCPathCache::HashNode::~HashNode() {
115 this->willExitHashTable();
116}
117
118inline GrCCPathCache::HashNode& GrCCPathCache::HashNode::operator=(HashNode&& node) {
119 this->willExitHashTable();
120 fPathCache = node.fPathCache;
121 fEntry = std::move(node.fEntry);
122 SkASSERT(!node.fEntry);
123 return *this;
124}
125
126inline void GrCCPathCache::HashNode::willExitHashTable() {
127 if (!fEntry) {
128 return; // We were moved.
129 }
130
131 SkASSERT(fPathCache);
132 SkASSERT(fPathCache->fLRU.isInList(fEntry.get()));
133
134 fEntry->fCacheKey->markShouldUnregisterFromPath(); // Unregister the path listener.
135 fPathCache->fLRU.remove(fEntry.get());
136}
137
138
139GrCCPathCache::GrCCPathCache()
140 : fInvalidatedKeysInbox(next_path_cache_id())
141 , fScratchKey(Key::Make(fInvalidatedKeysInbox.uniqueID(), kMaxKeyDataCountU32)) {
142}
143
144GrCCPathCache::~GrCCPathCache() {
145 fHashTable.reset(); // Must be cleared first; ~HashNode calls fLRU.remove() on us.
146 SkASSERT(fLRU.isEmpty()); // Ensure the hash table and LRU list were coherent.
147}
148
Chris Dalton8f8bf882018-07-18 10:55:51 -0600149namespace {
150
151// Produces a key that accounts both for a shape's path geometry, as well as any stroke/style.
Chris Dalton9985a272018-10-30 14:29:39 -0600152class WriteKeyHelper {
Chris Dalton8f8bf882018-07-18 10:55:51 -0600153public:
Chris Dalton9985a272018-10-30 14:29:39 -0600154 static constexpr int kStrokeWidthIdx = 0;
155 static constexpr int kStrokeMiterIdx = 1;
156 static constexpr int kStrokeCapJoinIdx = 2;
157 static constexpr int kShapeUnstyledKeyIdx = 3;
Chris Dalton09a7bb22018-08-31 19:53:15 +0800158
Chris Dalton9985a272018-10-30 14:29:39 -0600159 WriteKeyHelper(const GrShape& shape) : fShapeUnstyledKeyCount(shape.unstyledKeySize()) {}
Chris Dalton8f8bf882018-07-18 10:55:51 -0600160
161 // Returns the total number of uint32_t's to allocate for the key.
Chris Dalton09a7bb22018-08-31 19:53:15 +0800162 int allocCountU32() const { return kShapeUnstyledKeyIdx + fShapeUnstyledKeyCount; }
Chris Dalton8f8bf882018-07-18 10:55:51 -0600163
Chris Dalton9985a272018-10-30 14:29:39 -0600164 // Writes the key data to out[].
Chris Dalton8f8bf882018-07-18 10:55:51 -0600165 void write(const GrShape& shape, uint32_t* out) {
Chris Dalton09a7bb22018-08-31 19:53:15 +0800166 // Stroke key.
167 // We don't use GrStyle::WriteKey() because it does not account for hairlines.
168 // http://skbug.com/8273
169 SkASSERT(!shape.style().hasPathEffect());
170 const SkStrokeRec& stroke = shape.style().strokeRec();
171 if (stroke.isFillStyle()) {
172 // Use a value for width that won't collide with a valid fp32 value >= 0.
173 out[kStrokeWidthIdx] = ~0;
174 out[kStrokeMiterIdx] = out[kStrokeCapJoinIdx] = 0;
175 } else {
176 float width = stroke.getWidth(), miterLimit = stroke.getMiter();
177 memcpy(&out[kStrokeWidthIdx], &width, sizeof(float));
178 memcpy(&out[kStrokeMiterIdx], &miterLimit, sizeof(float));
179 out[kStrokeCapJoinIdx] = (stroke.getCap() << 16) | stroke.getJoin();
180 GR_STATIC_ASSERT(sizeof(out[kStrokeWidthIdx]) == sizeof(float));
181 }
182
183 // Shape unstyled key.
184 shape.writeUnstyledKey(&out[kShapeUnstyledKeyIdx]);
Chris Dalton8f8bf882018-07-18 10:55:51 -0600185 }
186
187private:
188 int fShapeUnstyledKeyCount;
Chris Dalton8f8bf882018-07-18 10:55:51 -0600189};
190
191}
192
Chris Dalton4da70192018-06-18 09:51:36 -0600193sk_sp<GrCCPathCacheEntry> GrCCPathCache::find(const GrShape& shape, const MaskTransform& m,
194 CreateIfAbsent createIfAbsent) {
195 if (!shape.hasUnstyledKey()) {
196 return nullptr;
197 }
198
Chris Dalton9985a272018-10-30 14:29:39 -0600199 WriteKeyHelper writeKeyHelper(shape);
200 if (writeKeyHelper.allocCountU32() > kMaxKeyDataCountU32) {
201 return nullptr;
202 }
203
204 SkASSERT(fScratchKey->unique());
205 fScratchKey->resetDataCountU32(writeKeyHelper.allocCountU32());
206 writeKeyHelper.write(shape, fScratchKey->data());
Chris Dalton4da70192018-06-18 09:51:36 -0600207
208 GrCCPathCacheEntry* entry = nullptr;
Chris Dalton9985a272018-10-30 14:29:39 -0600209 if (HashNode* node = fHashTable.find(*fScratchKey)) {
Chris Dalton4da70192018-06-18 09:51:36 -0600210 entry = node->entry();
Chris Dalton9a986cf2018-10-18 15:27:59 -0600211 SkASSERT(fLRU.isInList(entry));
Chris Daltona8429cf2018-06-22 11:43:31 -0600212 if (fuzzy_equals(m, entry->fMaskTransform)) {
Chris Dalton907102e2018-06-29 13:18:53 -0600213 ++entry->fHitCount; // The path was reused with a compatible matrix.
214 } else if (CreateIfAbsent::kYes == createIfAbsent && entry->unique()) {
215 // This entry is unique: we can recycle it instead of deleting and malloc-ing a new one.
216 entry->fMaskTransform = m;
217 entry->fHitCount = 1;
218 entry->invalidateAtlas();
219 SkASSERT(!entry->fCurrFlushAtlas); // Should be null because 'entry' is unique.
Chris Daltona8429cf2018-06-22 11:43:31 -0600220 } else {
Chris Dalton9985a272018-10-30 14:29:39 -0600221 this->evict(*fScratchKey);
Chris Dalton4da70192018-06-18 09:51:36 -0600222 entry = nullptr;
223 }
224 }
225
226 if (!entry) {
227 if (CreateIfAbsent::kNo == createIfAbsent) {
228 return nullptr;
229 }
230 if (fHashTable.count() >= kMaxCacheCount) {
Chris Dalton9985a272018-10-30 14:29:39 -0600231 SkDEBUGCODE(HashNode* node = fHashTable.find(*fLRU.tail()->fCacheKey));
232 SkASSERT(node && node->entry() == fLRU.tail());
233 this->evict(*fLRU.tail()->fCacheKey); // We've exceeded our limit.
Chris Dalton4da70192018-06-18 09:51:36 -0600234 }
Chris Dalton9985a272018-10-30 14:29:39 -0600235
236 // Create a new entry in the cache.
237 sk_sp<Key> permanentKey = Key::Make(fInvalidatedKeysInbox.uniqueID(),
238 writeKeyHelper.allocCountU32(), fScratchKey->data());
239 SkASSERT(*permanentKey == *fScratchKey);
240 SkASSERT(!fHashTable.find(*permanentKey));
241 entry = fHashTable.set(HashNode(this, std::move(permanentKey), m, shape))->entry();
242
Chris Dalton4da70192018-06-18 09:51:36 -0600243 SkASSERT(fHashTable.count() <= kMaxCacheCount);
244 } else {
245 fLRU.remove(entry); // Will be re-added at head.
246 }
247
Chris Dalton9985a272018-10-30 14:29:39 -0600248 SkDEBUGCODE(HashNode* node = fHashTable.find(*fScratchKey));
Chris Dalton3b572792018-10-23 18:26:20 -0600249 SkASSERT(node && node->entry() == entry);
Chris Dalton4da70192018-06-18 09:51:36 -0600250 fLRU.addToHead(entry);
251 return sk_ref_sp(entry);
252}
253
Chris Dalton9a986cf2018-10-18 15:27:59 -0600254void GrCCPathCache::purgeAsNeeded() {
Chris Dalton9985a272018-10-30 14:29:39 -0600255 SkTArray<sk_sp<Key>> invalidatedKeys;
256 fInvalidatedKeysInbox.poll(&invalidatedKeys);
257 for (const sk_sp<Key>& key : invalidatedKeys) {
258 bool isInCache = !key->shouldUnregisterFromPath(); // Gets set upon exiting the cache.
259 if (isInCache) {
260 this->evict(*key);
261 }
Chris Dalton9a986cf2018-10-18 15:27:59 -0600262 }
Chris Dalton4da70192018-06-18 09:51:36 -0600263}
264
Chris Dalton907102e2018-06-29 13:18:53 -0600265
Chris Dalton9a986cf2018-10-18 15:27:59 -0600266void GrCCPathCacheEntry::initAsStashedAtlas(const GrUniqueKey& atlasKey,
Chris Dalton4da70192018-06-18 09:51:36 -0600267 const SkIVector& atlasOffset, const SkRect& devBounds,
268 const SkRect& devBounds45, const SkIRect& devIBounds,
269 const SkIVector& maskShift) {
270 SkASSERT(atlasKey.isValid());
271 SkASSERT(!fCurrFlushAtlas); // Otherwise we should reuse the atlas from last time.
272
273 fAtlasKey = atlasKey;
274 fAtlasOffset = atlasOffset + maskShift;
275 SkASSERT(!fCachedAtlasInfo); // Otherwise they should have reused the cached atlas instead.
276
277 float dx = (float)maskShift.fX, dy = (float)maskShift.fY;
278 fDevBounds = devBounds.makeOffset(-dx, -dy);
279 fDevBounds45 = GrCCPathProcessor::MakeOffset45(devBounds45, -dx, -dy);
280 fDevIBounds = devIBounds.makeOffset(-maskShift.fX, -maskShift.fY);
281}
282
Chris Dalton9a986cf2018-10-18 15:27:59 -0600283void GrCCPathCacheEntry::updateToCachedAtlas(const GrUniqueKey& atlasKey,
Chris Dalton4da70192018-06-18 09:51:36 -0600284 const SkIVector& newAtlasOffset,
285 sk_sp<GrCCAtlas::CachedAtlasInfo> info) {
286 SkASSERT(atlasKey.isValid());
287 SkASSERT(!fCurrFlushAtlas); // Otherwise we should reuse the atlas from last time.
288
289 fAtlasKey = atlasKey;
290 fAtlasOffset = newAtlasOffset;
291
292 SkASSERT(!fCachedAtlasInfo); // Otherwise we need to invalidate our pixels in the old info.
293 fCachedAtlasInfo = std::move(info);
294 fCachedAtlasInfo->fNumPathPixels += this->height() * this->width();
295}
296
Chris Dalton907102e2018-06-29 13:18:53 -0600297void GrCCPathCacheEntry::invalidateAtlas() {
298 if (fCachedAtlasInfo) {
299 // Mark our own pixels invalid in the cached atlas texture.
300 fCachedAtlasInfo->fNumInvalidatedPathPixels += this->height() * this->width();
301 if (!fCachedAtlasInfo->fIsPurgedFromResourceCache &&
302 fCachedAtlasInfo->fNumInvalidatedPathPixels >= fCachedAtlasInfo->fNumPathPixels / 2) {
303 // Too many invalidated pixels: purge the atlas texture from the resource cache.
Chris Dalton9a986cf2018-10-18 15:27:59 -0600304 // The GrContext and CCPR path cache both share the same unique ID.
Chris Dalton907102e2018-06-29 13:18:53 -0600305 SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(
Chris Dalton8429c792018-10-23 15:56:22 -0600306 GrUniqueKeyInvalidatedMessage(fAtlasKey, fCachedAtlasInfo->fContextUniqueID));
Chris Dalton907102e2018-06-29 13:18:53 -0600307 fCachedAtlasInfo->fIsPurgedFromResourceCache = true;
308 }
309 }
310
311 fAtlasKey.reset();
312 fCachedAtlasInfo = nullptr;
313}