blob: a5b9a10c93b61279100607426e54712409823904 [file] [log] [blame]
Chris Dalton4da70192018-06-18 09:51:36 -06001/*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GrCCPathCache.h"
9
10#include "GrShape.h"
11#include "SkNx.h"
Chris Dalton4da70192018-06-18 09:51:36 -060012
Chris Dalton9985a272018-10-30 14:29:39 -060013static constexpr int kMaxKeyDataCountU32 = 256; // 1kB of uint32_t's.
14
15DECLARE_SKMESSAGEBUS_MESSAGE(sk_sp<GrCCPathCache::Key>);
Chris Dalton9a986cf2018-10-18 15:27:59 -060016
Chris Dalton8429c792018-10-23 15:56:22 -060017static inline uint32_t next_path_cache_id() {
18 static std::atomic<uint32_t> gNextID(1);
19 for (;;) {
20 uint32_t id = gNextID.fetch_add(+1, std::memory_order_acquire);
21 if (SK_InvalidUniqueID != id) {
22 return id;
23 }
24 }
25}
26
Chris Dalton9a986cf2018-10-18 15:27:59 -060027static inline bool SkShouldPostMessageToBus(
Chris Dalton9985a272018-10-30 14:29:39 -060028 const sk_sp<GrCCPathCache::Key>& key, uint32_t msgBusUniqueID) {
29 return key->pathCacheUniqueID() == msgBusUniqueID;
Chris Dalton9a986cf2018-10-18 15:27:59 -060030}
31
Chris Dalton4da70192018-06-18 09:51:36 -060032// The maximum number of cache entries we allow in our own cache.
33static constexpr int kMaxCacheCount = 1 << 16;
34
Chris Dalton8429c792018-10-23 15:56:22 -060035
Chris Dalton4da70192018-06-18 09:51:36 -060036GrCCPathCache::MaskTransform::MaskTransform(const SkMatrix& m, SkIVector* shift)
37 : fMatrix2x2{m.getScaleX(), m.getSkewX(), m.getSkewY(), m.getScaleY()} {
38 SkASSERT(!m.hasPerspective());
39 Sk2f translate = Sk2f(m.getTranslateX(), m.getTranslateY());
Chris Dalton76c775f2018-10-01 23:08:06 -060040 Sk2f transFloor;
41#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
42 // On Android framework we pre-round view matrix translates to integers for better caching.
43 transFloor = translate;
44#else
45 transFloor = translate.floor();
46 (translate - transFloor).store(fSubpixelTranslate);
Chris Dalton644341a2018-06-18 19:14:16 -060047#endif
Chris Dalton76c775f2018-10-01 23:08:06 -060048 shift->set((int)transFloor[0], (int)transFloor[1]);
49 SkASSERT((float)shift->fX == transFloor[0]); // Make sure transFloor had integer values.
50 SkASSERT((float)shift->fY == transFloor[1]);
Chris Dalton4da70192018-06-18 09:51:36 -060051}
52
53inline static bool fuzzy_equals(const GrCCPathCache::MaskTransform& a,
54 const GrCCPathCache::MaskTransform& b) {
Chris Dalton644341a2018-06-18 19:14:16 -060055 if ((Sk4f::Load(a.fMatrix2x2) != Sk4f::Load(b.fMatrix2x2)).anyTrue()) {
56 return false;
57 }
58#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
59 if (((Sk2f::Load(a.fSubpixelTranslate) -
60 Sk2f::Load(b.fSubpixelTranslate)).abs() > 1.f/256).anyTrue()) {
61 return false;
62 }
63#endif
64 return true;
Chris Dalton4da70192018-06-18 09:51:36 -060065}
66
Chris Dalton9985a272018-10-30 14:29:39 -060067sk_sp<GrCCPathCache::Key> GrCCPathCache::Key::Make(uint32_t pathCacheUniqueID,
68 int dataCountU32, const void* data) {
69 void* memory = ::operator new (sizeof(Key) + dataCountU32 * sizeof(uint32_t));
70 sk_sp<GrCCPathCache::Key> key(new (memory) Key(pathCacheUniqueID, dataCountU32));
71 if (data) {
72 memcpy(key->data(), data, key->dataSizeInBytes());
73 }
74 return key;
75}
76
77const uint32_t* GrCCPathCache::Key::data() const {
78 // The shape key is a variable-length footer to the entry allocation.
79 return reinterpret_cast<const uint32_t*>(reinterpret_cast<const char*>(this) + sizeof(Key));
80}
81
82uint32_t* GrCCPathCache::Key::data() {
83 // The shape key is a variable-length footer to the entry allocation.
84 return reinterpret_cast<uint32_t*>(reinterpret_cast<char*>(this) + sizeof(Key));
85}
86
87inline bool GrCCPathCache::Key::operator==(const GrCCPathCache::Key& that) const {
88 return fDataSizeInBytes == that.fDataSizeInBytes &&
89 !memcmp(this->data(), that.data(), fDataSizeInBytes);
90}
91
92void GrCCPathCache::Key::onChange() {
93 // Our key's corresponding path was invalidated. Post a thread-safe eviction message.
94 SkMessageBus<sk_sp<Key>>::Post(sk_ref_sp(this));
95}
96
97inline const GrCCPathCache::Key& GrCCPathCache::HashNode::GetKey(
98 const GrCCPathCache::HashNode& node) {
99 return *node.entry()->fCacheKey;
100}
101
102inline uint32_t GrCCPathCache::HashNode::Hash(const Key& key) {
103 return GrResourceKeyHash(key.data(), key.dataSizeInBytes());
104}
105
106inline GrCCPathCache::HashNode::HashNode(GrCCPathCache* pathCache, sk_sp<Key> key,
107 const MaskTransform& m, const GrShape& shape)
108 : fPathCache(pathCache)
109 , fEntry(new GrCCPathCacheEntry(key, m)) {
110 SkASSERT(shape.hasUnstyledKey());
111 shape.addGenIDChangeListener(std::move(key));
112}
113
114inline GrCCPathCache::HashNode::~HashNode() {
115 this->willExitHashTable();
116}
117
118inline GrCCPathCache::HashNode& GrCCPathCache::HashNode::operator=(HashNode&& node) {
119 this->willExitHashTable();
120 fPathCache = node.fPathCache;
121 fEntry = std::move(node.fEntry);
122 SkASSERT(!node.fEntry);
123 return *this;
124}
125
126inline void GrCCPathCache::HashNode::willExitHashTable() {
127 if (!fEntry) {
128 return; // We were moved.
129 }
130
131 SkASSERT(fPathCache);
132 SkASSERT(fPathCache->fLRU.isInList(fEntry.get()));
133
134 fEntry->fCacheKey->markShouldUnregisterFromPath(); // Unregister the path listener.
135 fPathCache->fLRU.remove(fEntry.get());
136}
137
138
139GrCCPathCache::GrCCPathCache()
140 : fInvalidatedKeysInbox(next_path_cache_id())
141 , fScratchKey(Key::Make(fInvalidatedKeysInbox.uniqueID(), kMaxKeyDataCountU32)) {
142}
143
144GrCCPathCache::~GrCCPathCache() {
145 fHashTable.reset(); // Must be cleared first; ~HashNode calls fLRU.remove() on us.
146 SkASSERT(fLRU.isEmpty()); // Ensure the hash table and LRU list were coherent.
147}
148
Chris Dalton8f8bf882018-07-18 10:55:51 -0600149namespace {
150
151// Produces a key that accounts both for a shape's path geometry, as well as any stroke/style.
Chris Dalton9985a272018-10-30 14:29:39 -0600152class WriteKeyHelper {
Chris Dalton8f8bf882018-07-18 10:55:51 -0600153public:
Chris Dalton9985a272018-10-30 14:29:39 -0600154 static constexpr int kStrokeWidthIdx = 0;
155 static constexpr int kStrokeMiterIdx = 1;
156 static constexpr int kStrokeCapJoinIdx = 2;
157 static constexpr int kShapeUnstyledKeyIdx = 3;
Chris Dalton09a7bb22018-08-31 19:53:15 +0800158
Chris Dalton9985a272018-10-30 14:29:39 -0600159 WriteKeyHelper(const GrShape& shape) : fShapeUnstyledKeyCount(shape.unstyledKeySize()) {}
Chris Dalton8f8bf882018-07-18 10:55:51 -0600160
161 // Returns the total number of uint32_t's to allocate for the key.
Chris Dalton09a7bb22018-08-31 19:53:15 +0800162 int allocCountU32() const { return kShapeUnstyledKeyIdx + fShapeUnstyledKeyCount; }
Chris Dalton8f8bf882018-07-18 10:55:51 -0600163
Chris Dalton9985a272018-10-30 14:29:39 -0600164 // Writes the key data to out[].
Chris Dalton8f8bf882018-07-18 10:55:51 -0600165 void write(const GrShape& shape, uint32_t* out) {
Chris Dalton09a7bb22018-08-31 19:53:15 +0800166 // Stroke key.
167 // We don't use GrStyle::WriteKey() because it does not account for hairlines.
168 // http://skbug.com/8273
169 SkASSERT(!shape.style().hasPathEffect());
170 const SkStrokeRec& stroke = shape.style().strokeRec();
171 if (stroke.isFillStyle()) {
172 // Use a value for width that won't collide with a valid fp32 value >= 0.
173 out[kStrokeWidthIdx] = ~0;
174 out[kStrokeMiterIdx] = out[kStrokeCapJoinIdx] = 0;
175 } else {
176 float width = stroke.getWidth(), miterLimit = stroke.getMiter();
177 memcpy(&out[kStrokeWidthIdx], &width, sizeof(float));
178 memcpy(&out[kStrokeMiterIdx], &miterLimit, sizeof(float));
179 out[kStrokeCapJoinIdx] = (stroke.getCap() << 16) | stroke.getJoin();
180 GR_STATIC_ASSERT(sizeof(out[kStrokeWidthIdx]) == sizeof(float));
181 }
182
183 // Shape unstyled key.
184 shape.writeUnstyledKey(&out[kShapeUnstyledKeyIdx]);
Chris Dalton8f8bf882018-07-18 10:55:51 -0600185 }
186
187private:
188 int fShapeUnstyledKeyCount;
Chris Dalton8f8bf882018-07-18 10:55:51 -0600189};
190
191}
192
Chris Dalton4da70192018-06-18 09:51:36 -0600193sk_sp<GrCCPathCacheEntry> GrCCPathCache::find(const GrShape& shape, const MaskTransform& m,
194 CreateIfAbsent createIfAbsent) {
195 if (!shape.hasUnstyledKey()) {
196 return nullptr;
197 }
198
Chris Dalton9985a272018-10-30 14:29:39 -0600199 WriteKeyHelper writeKeyHelper(shape);
200 if (writeKeyHelper.allocCountU32() > kMaxKeyDataCountU32) {
201 return nullptr;
202 }
203
204 SkASSERT(fScratchKey->unique());
205 fScratchKey->resetDataCountU32(writeKeyHelper.allocCountU32());
206 writeKeyHelper.write(shape, fScratchKey->data());
Chris Dalton4da70192018-06-18 09:51:36 -0600207
208 GrCCPathCacheEntry* entry = nullptr;
Chris Dalton9985a272018-10-30 14:29:39 -0600209 if (HashNode* node = fHashTable.find(*fScratchKey)) {
Chris Dalton4da70192018-06-18 09:51:36 -0600210 entry = node->entry();
Chris Dalton9a986cf2018-10-18 15:27:59 -0600211 SkASSERT(fLRU.isInList(entry));
Chris Dalton6c3879d2018-11-01 11:13:19 -0600212 if (!fuzzy_equals(m, entry->fMaskTransform)) {
213 // The path was reused with an incompatible matrix.
214 if (CreateIfAbsent::kYes == createIfAbsent && entry->unique()) {
215 // This entry is unique: recycle it instead of deleting and malloc-ing a new one.
216 entry->fMaskTransform = m;
217 entry->fHitCount = 0;
218 entry->invalidateAtlas();
219 SkASSERT(!entry->fCurrFlushAtlas); // Should be null because 'entry' is unique.
220 } else {
221 this->evict(*fScratchKey);
222 entry = nullptr;
223 }
Chris Dalton4da70192018-06-18 09:51:36 -0600224 }
225 }
226
227 if (!entry) {
228 if (CreateIfAbsent::kNo == createIfAbsent) {
229 return nullptr;
230 }
231 if (fHashTable.count() >= kMaxCacheCount) {
Chris Dalton9985a272018-10-30 14:29:39 -0600232 SkDEBUGCODE(HashNode* node = fHashTable.find(*fLRU.tail()->fCacheKey));
233 SkASSERT(node && node->entry() == fLRU.tail());
234 this->evict(*fLRU.tail()->fCacheKey); // We've exceeded our limit.
Chris Dalton4da70192018-06-18 09:51:36 -0600235 }
Chris Dalton9985a272018-10-30 14:29:39 -0600236
237 // Create a new entry in the cache.
238 sk_sp<Key> permanentKey = Key::Make(fInvalidatedKeysInbox.uniqueID(),
239 writeKeyHelper.allocCountU32(), fScratchKey->data());
240 SkASSERT(*permanentKey == *fScratchKey);
241 SkASSERT(!fHashTable.find(*permanentKey));
242 entry = fHashTable.set(HashNode(this, std::move(permanentKey), m, shape))->entry();
243
Chris Dalton4da70192018-06-18 09:51:36 -0600244 SkASSERT(fHashTable.count() <= kMaxCacheCount);
245 } else {
246 fLRU.remove(entry); // Will be re-added at head.
247 }
248
Chris Dalton9985a272018-10-30 14:29:39 -0600249 SkDEBUGCODE(HashNode* node = fHashTable.find(*fScratchKey));
Chris Dalton3b572792018-10-23 18:26:20 -0600250 SkASSERT(node && node->entry() == entry);
Chris Dalton4da70192018-06-18 09:51:36 -0600251 fLRU.addToHead(entry);
Chris Dalton6c3879d2018-11-01 11:13:19 -0600252
253 entry->fTimestamp = this->quickPerFlushTimestamp();
254 ++entry->fHitCount;
Chris Dalton4da70192018-06-18 09:51:36 -0600255 return sk_ref_sp(entry);
256}
257
Chris Dalton6c3879d2018-11-01 11:13:19 -0600258void GrCCPathCache::doPostFlushProcessing() {
259 this->purgeInvalidatedKeys();
260
261 // Mark the per-flush timestamp as needing to be updated with a newer clock reading.
262 fPerFlushTimestamp = GrStdSteadyClock::time_point::min();
263}
264
265void GrCCPathCache::purgeEntriesOlderThan(const GrStdSteadyClock::time_point& purgeTime) {
266 this->purgeInvalidatedKeys();
267
268#ifdef SK_DEBUG
269 auto lastTimestamp = (fLRU.isEmpty())
270 ? GrStdSteadyClock::time_point::max()
271 : fLRU.tail()->fTimestamp;
272#endif
273
274 // Drop every cache entry whose timestamp is older than purgeTime.
275 while (!fLRU.isEmpty() && fLRU.tail()->fTimestamp < purgeTime) {
276#ifdef SK_DEBUG
277 // Verify that fLRU is sorted by timestamp.
278 auto timestamp = fLRU.tail()->fTimestamp;
279 SkASSERT(timestamp >= lastTimestamp);
280 lastTimestamp = timestamp;
281#endif
282 this->evict(*fLRU.tail()->fCacheKey);
283 }
284}
285
286void GrCCPathCache::purgeInvalidatedKeys() {
Chris Dalton9985a272018-10-30 14:29:39 -0600287 SkTArray<sk_sp<Key>> invalidatedKeys;
288 fInvalidatedKeysInbox.poll(&invalidatedKeys);
289 for (const sk_sp<Key>& key : invalidatedKeys) {
290 bool isInCache = !key->shouldUnregisterFromPath(); // Gets set upon exiting the cache.
291 if (isInCache) {
292 this->evict(*key);
293 }
Chris Dalton9a986cf2018-10-18 15:27:59 -0600294 }
Chris Dalton4da70192018-06-18 09:51:36 -0600295}
296
Chris Dalton907102e2018-06-29 13:18:53 -0600297
Chris Dalton9a986cf2018-10-18 15:27:59 -0600298void GrCCPathCacheEntry::initAsStashedAtlas(const GrUniqueKey& atlasKey,
Chris Dalton4da70192018-06-18 09:51:36 -0600299 const SkIVector& atlasOffset, const SkRect& devBounds,
300 const SkRect& devBounds45, const SkIRect& devIBounds,
301 const SkIVector& maskShift) {
302 SkASSERT(atlasKey.isValid());
303 SkASSERT(!fCurrFlushAtlas); // Otherwise we should reuse the atlas from last time.
304
305 fAtlasKey = atlasKey;
306 fAtlasOffset = atlasOffset + maskShift;
307 SkASSERT(!fCachedAtlasInfo); // Otherwise they should have reused the cached atlas instead.
308
309 float dx = (float)maskShift.fX, dy = (float)maskShift.fY;
310 fDevBounds = devBounds.makeOffset(-dx, -dy);
311 fDevBounds45 = GrCCPathProcessor::MakeOffset45(devBounds45, -dx, -dy);
312 fDevIBounds = devIBounds.makeOffset(-maskShift.fX, -maskShift.fY);
313}
314
Chris Dalton9a986cf2018-10-18 15:27:59 -0600315void GrCCPathCacheEntry::updateToCachedAtlas(const GrUniqueKey& atlasKey,
Chris Dalton4da70192018-06-18 09:51:36 -0600316 const SkIVector& newAtlasOffset,
317 sk_sp<GrCCAtlas::CachedAtlasInfo> info) {
318 SkASSERT(atlasKey.isValid());
319 SkASSERT(!fCurrFlushAtlas); // Otherwise we should reuse the atlas from last time.
320
321 fAtlasKey = atlasKey;
322 fAtlasOffset = newAtlasOffset;
323
324 SkASSERT(!fCachedAtlasInfo); // Otherwise we need to invalidate our pixels in the old info.
325 fCachedAtlasInfo = std::move(info);
326 fCachedAtlasInfo->fNumPathPixels += this->height() * this->width();
327}
328
Chris Dalton907102e2018-06-29 13:18:53 -0600329void GrCCPathCacheEntry::invalidateAtlas() {
330 if (fCachedAtlasInfo) {
331 // Mark our own pixels invalid in the cached atlas texture.
332 fCachedAtlasInfo->fNumInvalidatedPathPixels += this->height() * this->width();
333 if (!fCachedAtlasInfo->fIsPurgedFromResourceCache &&
334 fCachedAtlasInfo->fNumInvalidatedPathPixels >= fCachedAtlasInfo->fNumPathPixels / 2) {
335 // Too many invalidated pixels: purge the atlas texture from the resource cache.
Chris Dalton9a986cf2018-10-18 15:27:59 -0600336 // The GrContext and CCPR path cache both share the same unique ID.
Chris Dalton907102e2018-06-29 13:18:53 -0600337 SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(
Chris Dalton8429c792018-10-23 15:56:22 -0600338 GrUniqueKeyInvalidatedMessage(fAtlasKey, fCachedAtlasInfo->fContextUniqueID));
Chris Dalton907102e2018-06-29 13:18:53 -0600339 fCachedAtlasInfo->fIsPurgedFromResourceCache = true;
340 }
341 }
342
343 fAtlasKey.reset();
344 fCachedAtlasInfo = nullptr;
345}