blob: 383275daa0837703f60c01c1115769c914f79cab [file] [log] [blame]
Chris Dalton4da70192018-06-18 09:51:36 -06001/*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GrCCPathCache.h"
9
10#include "GrShape.h"
11#include "SkNx.h"
Chris Dalton4da70192018-06-18 09:51:36 -060012
Chris Dalton9a986cf2018-10-18 15:27:59 -060013DECLARE_SKMESSAGEBUS_MESSAGE(sk_sp<GrCCPathCacheEntry>);
14
Chris Dalton8429c792018-10-23 15:56:22 -060015static inline uint32_t next_path_cache_id() {
16 static std::atomic<uint32_t> gNextID(1);
17 for (;;) {
18 uint32_t id = gNextID.fetch_add(+1, std::memory_order_acquire);
19 if (SK_InvalidUniqueID != id) {
20 return id;
21 }
22 }
23}
24
Chris Dalton9a986cf2018-10-18 15:27:59 -060025static inline bool SkShouldPostMessageToBus(
26 const sk_sp<GrCCPathCacheEntry>& entry, uint32_t msgBusUniqueID) {
27 return entry->pathCacheUniqueID() == msgBusUniqueID;
28}
29
Chris Dalton4da70192018-06-18 09:51:36 -060030// The maximum number of cache entries we allow in our own cache.
31static constexpr int kMaxCacheCount = 1 << 16;
32
Chris Dalton8429c792018-10-23 15:56:22 -060033
Chris Dalton4da70192018-06-18 09:51:36 -060034GrCCPathCache::MaskTransform::MaskTransform(const SkMatrix& m, SkIVector* shift)
35 : fMatrix2x2{m.getScaleX(), m.getSkewX(), m.getSkewY(), m.getScaleY()} {
36 SkASSERT(!m.hasPerspective());
37 Sk2f translate = Sk2f(m.getTranslateX(), m.getTranslateY());
Chris Dalton76c775f2018-10-01 23:08:06 -060038 Sk2f transFloor;
39#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
40 // On Android framework we pre-round view matrix translates to integers for better caching.
41 transFloor = translate;
42#else
43 transFloor = translate.floor();
44 (translate - transFloor).store(fSubpixelTranslate);
Chris Dalton644341a2018-06-18 19:14:16 -060045#endif
Chris Dalton76c775f2018-10-01 23:08:06 -060046 shift->set((int)transFloor[0], (int)transFloor[1]);
47 SkASSERT((float)shift->fX == transFloor[0]); // Make sure transFloor had integer values.
48 SkASSERT((float)shift->fY == transFloor[1]);
Chris Dalton4da70192018-06-18 09:51:36 -060049}
50
51inline static bool fuzzy_equals(const GrCCPathCache::MaskTransform& a,
52 const GrCCPathCache::MaskTransform& b) {
Chris Dalton644341a2018-06-18 19:14:16 -060053 if ((Sk4f::Load(a.fMatrix2x2) != Sk4f::Load(b.fMatrix2x2)).anyTrue()) {
54 return false;
55 }
56#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
57 if (((Sk2f::Load(a.fSubpixelTranslate) -
58 Sk2f::Load(b.fSubpixelTranslate)).abs() > 1.f/256).anyTrue()) {
59 return false;
60 }
61#endif
62 return true;
Chris Dalton4da70192018-06-18 09:51:36 -060063}
64
Chris Dalton8f8bf882018-07-18 10:55:51 -060065namespace {
66
67// Produces a key that accounts both for a shape's path geometry, as well as any stroke/style.
68class WriteStyledKey {
69public:
Chris Dalton09a7bb22018-08-31 19:53:15 +080070 static constexpr int kStyledKeySizeInBytesIdx = 0;
71 static constexpr int kStrokeWidthIdx = 1;
72 static constexpr int kStrokeMiterIdx = 2;
73 static constexpr int kStrokeCapJoinIdx = 3;
74 static constexpr int kShapeUnstyledKeyIdx = 4;
75
76 static constexpr int kStrokeKeyCount = 3; // [width, miterLimit, cap|join].
77
78 WriteStyledKey(const GrShape& shape) : fShapeUnstyledKeyCount(shape.unstyledKeySize()) {}
Chris Dalton8f8bf882018-07-18 10:55:51 -060079
80 // Returns the total number of uint32_t's to allocate for the key.
Chris Dalton09a7bb22018-08-31 19:53:15 +080081 int allocCountU32() const { return kShapeUnstyledKeyIdx + fShapeUnstyledKeyCount; }
Chris Dalton8f8bf882018-07-18 10:55:51 -060082
83 // Writes the key to out[].
84 void write(const GrShape& shape, uint32_t* out) {
Chris Dalton09a7bb22018-08-31 19:53:15 +080085 out[kStyledKeySizeInBytesIdx] =
86 (kStrokeKeyCount + fShapeUnstyledKeyCount) * sizeof(uint32_t);
87
88 // Stroke key.
89 // We don't use GrStyle::WriteKey() because it does not account for hairlines.
90 // http://skbug.com/8273
91 SkASSERT(!shape.style().hasPathEffect());
92 const SkStrokeRec& stroke = shape.style().strokeRec();
93 if (stroke.isFillStyle()) {
94 // Use a value for width that won't collide with a valid fp32 value >= 0.
95 out[kStrokeWidthIdx] = ~0;
96 out[kStrokeMiterIdx] = out[kStrokeCapJoinIdx] = 0;
97 } else {
98 float width = stroke.getWidth(), miterLimit = stroke.getMiter();
99 memcpy(&out[kStrokeWidthIdx], &width, sizeof(float));
100 memcpy(&out[kStrokeMiterIdx], &miterLimit, sizeof(float));
101 out[kStrokeCapJoinIdx] = (stroke.getCap() << 16) | stroke.getJoin();
102 GR_STATIC_ASSERT(sizeof(out[kStrokeWidthIdx]) == sizeof(float));
103 }
104
105 // Shape unstyled key.
106 shape.writeUnstyledKey(&out[kShapeUnstyledKeyIdx]);
Chris Dalton8f8bf882018-07-18 10:55:51 -0600107 }
108
109private:
110 int fShapeUnstyledKeyCount;
Chris Dalton8f8bf882018-07-18 10:55:51 -0600111};
112
113}
114
Chris Dalton4da70192018-06-18 09:51:36 -0600115inline bool operator==(const GrCCPathCache::HashKey& key1, const GrCCPathCache::HashKey& key2) {
Chris Dalton8f8bf882018-07-18 10:55:51 -0600116 return key1.fData[0] == key2.fData[0] && !memcmp(&key1.fData[1], &key2.fData[1], key1.fData[0]);
Chris Dalton4da70192018-06-18 09:51:36 -0600117}
118
119inline GrCCPathCache::HashKey GrCCPathCache::HashNode::GetKey(const GrCCPathCacheEntry* entry) {
120 // The shape key is a variable-length footer to the entry allocation.
121 return HashKey{(const uint32_t*)((const char*)entry + sizeof(GrCCPathCacheEntry))};
122}
123
124inline uint32_t GrCCPathCache::HashNode::Hash(HashKey key) {
125 return GrResourceKeyHash(&key.fData[1], key.fData[0]);
126}
127
Chris Dalton3b572792018-10-23 18:26:20 -0600128inline GrCCPathCache::HashNode::HashNode(GrCCPathCache* pathCache, const MaskTransform& m,
129 const GrShape& shape)
130 : fPathCache(pathCache) {
131 SkASSERT(SkGetThreadID() == fPathCache->fGraphicsThreadID);
132 SkASSERT(shape.hasUnstyledKey());
133
134 WriteStyledKey writeKey(shape);
135 void* mem = ::operator new (sizeof(GrCCPathCacheEntry) +
136 writeKey.allocCountU32() * sizeof(uint32_t));
137 fEntry.reset(new (mem) GrCCPathCacheEntry(fPathCache->fInvalidatedEntriesInbox.uniqueID(), m));
138
139 // The shape key is a variable-length footer to the entry allocation.
140 uint32_t* keyData = (uint32_t*)((char*)mem + sizeof(GrCCPathCacheEntry));
141 writeKey.write(shape, keyData);
142}
143
144inline GrCCPathCache::HashNode::~HashNode() {
145 this->willExitHashTable();
146}
147
148inline GrCCPathCache::HashNode& GrCCPathCache::HashNode::operator=(HashNode&& node) {
149 this->willExitHashTable();
150 fPathCache = node.fPathCache;
151 fEntry = std::move(node.fEntry);
152 SkASSERT(!node.fEntry);
153 return *this;
154}
155
156inline void GrCCPathCache::HashNode::willExitHashTable() {
157 if (!fEntry) {
158 return; // We were moved.
159 }
160
161 SkASSERT(fPathCache);
162 SkASSERT(SkGetThreadID() == fPathCache->fGraphicsThreadID);
163 SkASSERT(fPathCache->fLRU.isInList(fEntry.get()));
164
165 fEntry->invalidateAtlas(); // Must happen on graphics thread.
166 fPathCache->fLRU.remove(fEntry.get());
167}
168
Chris Dalton8429c792018-10-23 15:56:22 -0600169
170GrCCPathCache::GrCCPathCache()
Chris Dalton3b572792018-10-23 18:26:20 -0600171 : fInvalidatedEntriesInbox(next_path_cache_id()) {
Chris Dalton8429c792018-10-23 15:56:22 -0600172}
173
Chris Dalton9a986cf2018-10-18 15:27:59 -0600174GrCCPathCache::~GrCCPathCache() {
Chris Dalton31abc5a2018-10-24 07:11:13 -0600175 // DDL threads never use the cache, in which case it doesn't matter what thread we
176 // clean up on.
177 SkASSERT(kIllegalThreadID == fGraphicsThreadID || SkGetThreadID() == fGraphicsThreadID);
Chris Dalton3b572792018-10-23 18:26:20 -0600178
179 fHashTable.reset(); // Must be cleared first; ~HashNode calls fLRU.remove() on us.
180 SkASSERT(fLRU.isEmpty()); // Ensure the hash table and LRU list were coherent.
Chris Dalton4da70192018-06-18 09:51:36 -0600181}
Chris Dalton4da70192018-06-18 09:51:36 -0600182
183sk_sp<GrCCPathCacheEntry> GrCCPathCache::find(const GrShape& shape, const MaskTransform& m,
184 CreateIfAbsent createIfAbsent) {
Chris Dalton31abc5a2018-10-24 07:11:13 -0600185#ifdef SK_DEBUG
186 if (kIllegalThreadID == fGraphicsThreadID) {
187 fGraphicsThreadID = SkGetThreadID();
188 }
189#endif
Chris Dalton3b572792018-10-23 18:26:20 -0600190 SkASSERT(SkGetThreadID() == fGraphicsThreadID);
191
Chris Dalton4da70192018-06-18 09:51:36 -0600192 if (!shape.hasUnstyledKey()) {
193 return nullptr;
194 }
195
Chris Dalton8f8bf882018-07-18 10:55:51 -0600196 WriteStyledKey writeKey(shape);
197 SkAutoSTMalloc<GrShape::kMaxKeyFromDataVerbCnt * 4, uint32_t> keyData(writeKey.allocCountU32());
198 writeKey.write(shape, keyData.get());
Chris Dalton4da70192018-06-18 09:51:36 -0600199
200 GrCCPathCacheEntry* entry = nullptr;
201 if (HashNode* node = fHashTable.find({keyData.get()})) {
202 entry = node->entry();
Chris Dalton9a986cf2018-10-18 15:27:59 -0600203 SkASSERT(fLRU.isInList(entry));
Chris Daltona8429cf2018-06-22 11:43:31 -0600204 if (fuzzy_equals(m, entry->fMaskTransform)) {
Chris Dalton907102e2018-06-29 13:18:53 -0600205 ++entry->fHitCount; // The path was reused with a compatible matrix.
206 } else if (CreateIfAbsent::kYes == createIfAbsent && entry->unique()) {
207 // This entry is unique: we can recycle it instead of deleting and malloc-ing a new one.
208 entry->fMaskTransform = m;
209 entry->fHitCount = 1;
210 entry->invalidateAtlas();
211 SkASSERT(!entry->fCurrFlushAtlas); // Should be null because 'entry' is unique.
Chris Daltona8429cf2018-06-22 11:43:31 -0600212 } else {
Chris Dalton907102e2018-06-29 13:18:53 -0600213 this->evict(entry);
Chris Dalton4da70192018-06-18 09:51:36 -0600214 entry = nullptr;
215 }
216 }
217
218 if (!entry) {
219 if (CreateIfAbsent::kNo == createIfAbsent) {
220 return nullptr;
221 }
222 if (fHashTable.count() >= kMaxCacheCount) {
223 this->evict(fLRU.tail()); // We've exceeded our limit.
224 }
Chris Dalton3b572792018-10-23 18:26:20 -0600225 SkASSERT(!fHashTable.find({keyData.get()}));
226 entry = fHashTable.set(HashNode(this, m, shape))->entry();
Chris Daltonf40ddfa2018-10-09 14:19:21 -0600227 shape.addGenIDChangeListener(sk_ref_sp(entry));
Chris Dalton4da70192018-06-18 09:51:36 -0600228 SkASSERT(fHashTable.count() <= kMaxCacheCount);
229 } else {
230 fLRU.remove(entry); // Will be re-added at head.
231 }
232
Chris Dalton3b572792018-10-23 18:26:20 -0600233 SkDEBUGCODE(HashNode* node = fHashTable.find(HashNode::GetKey(entry)));
234 SkASSERT(node && node->entry() == entry);
Chris Dalton4da70192018-06-18 09:51:36 -0600235 fLRU.addToHead(entry);
236 return sk_ref_sp(entry);
237}
238
Chris Dalton9a986cf2018-10-18 15:27:59 -0600239void GrCCPathCache::evict(GrCCPathCacheEntry* entry) {
Chris Dalton3b572792018-10-23 18:26:20 -0600240 SkASSERT(SkGetThreadID() == fGraphicsThreadID);
241
Greg Danieleb772c02018-10-19 15:00:06 +0000242 bool isInCache = entry->fNext || (fLRU.tail() == entry);
243 SkASSERT(isInCache == fLRU.isInList(entry));
244 if (isInCache) {
Chris Dalton3b572792018-10-23 18:26:20 -0600245 SkDEBUGCODE(HashNode* node = fHashTable.find(HashNode::GetKey(entry)));
246 SkASSERT(node && node->entry() == entry);
247 fHashTable.remove(HashNode::GetKey(entry));
248 // HashNode::willExitHashTable() takes care of the rest.
Chris Dalton9a986cf2018-10-18 15:27:59 -0600249 }
250}
Chris Dalton4da70192018-06-18 09:51:36 -0600251
Chris Dalton9a986cf2018-10-18 15:27:59 -0600252void GrCCPathCache::purgeAsNeeded() {
253 SkTArray<sk_sp<GrCCPathCacheEntry>> invalidatedEntries;
254 fInvalidatedEntriesInbox.poll(&invalidatedEntries);
255 for (const sk_sp<GrCCPathCacheEntry>& entry : invalidatedEntries) {
256 this->evict(entry.get());
257 }
Chris Dalton4da70192018-06-18 09:51:36 -0600258}
259
Chris Dalton907102e2018-06-29 13:18:53 -0600260
Chris Dalton3b572792018-10-23 18:26:20 -0600261GrCCPathCacheEntry::GrCCPathCacheEntry(uint32_t pathCacheUniqueID,
262 const MaskTransform& maskTransform)
263 : fPathCacheUniqueID(pathCacheUniqueID), fMaskTransform(maskTransform) {
264 SkASSERT(SK_InvalidUniqueID != fPathCacheUniqueID);
265 SkDEBUGCODE(fGraphicsThreadID = SkGetThreadID());
266}
267
Chris Dalton907102e2018-06-29 13:18:53 -0600268GrCCPathCacheEntry::~GrCCPathCacheEntry() {
Chris Dalton3b572792018-10-23 18:26:20 -0600269 // This might get called from a different thread.
270 SkASSERT(!fCachedAtlasInfo); // Should have been cleared when the entry was evicted.
Chris Dalton907102e2018-06-29 13:18:53 -0600271 SkASSERT(!fCurrFlushAtlas); // Client is required to reset fCurrFlushAtlas back to null.
Chris Dalton907102e2018-06-29 13:18:53 -0600272}
273
Chris Dalton9a986cf2018-10-18 15:27:59 -0600274void GrCCPathCacheEntry::initAsStashedAtlas(const GrUniqueKey& atlasKey,
Chris Dalton4da70192018-06-18 09:51:36 -0600275 const SkIVector& atlasOffset, const SkRect& devBounds,
276 const SkRect& devBounds45, const SkIRect& devIBounds,
277 const SkIVector& maskShift) {
Chris Dalton3b572792018-10-23 18:26:20 -0600278 SkASSERT(SkGetThreadID() == fGraphicsThreadID);
Chris Dalton4da70192018-06-18 09:51:36 -0600279 SkASSERT(atlasKey.isValid());
280 SkASSERT(!fCurrFlushAtlas); // Otherwise we should reuse the atlas from last time.
281
282 fAtlasKey = atlasKey;
283 fAtlasOffset = atlasOffset + maskShift;
284 SkASSERT(!fCachedAtlasInfo); // Otherwise they should have reused the cached atlas instead.
285
286 float dx = (float)maskShift.fX, dy = (float)maskShift.fY;
287 fDevBounds = devBounds.makeOffset(-dx, -dy);
288 fDevBounds45 = GrCCPathProcessor::MakeOffset45(devBounds45, -dx, -dy);
289 fDevIBounds = devIBounds.makeOffset(-maskShift.fX, -maskShift.fY);
290}
291
Chris Dalton9a986cf2018-10-18 15:27:59 -0600292void GrCCPathCacheEntry::updateToCachedAtlas(const GrUniqueKey& atlasKey,
Chris Dalton4da70192018-06-18 09:51:36 -0600293 const SkIVector& newAtlasOffset,
294 sk_sp<GrCCAtlas::CachedAtlasInfo> info) {
Chris Dalton3b572792018-10-23 18:26:20 -0600295 SkASSERT(SkGetThreadID() == fGraphicsThreadID);
Chris Dalton4da70192018-06-18 09:51:36 -0600296 SkASSERT(atlasKey.isValid());
297 SkASSERT(!fCurrFlushAtlas); // Otherwise we should reuse the atlas from last time.
298
299 fAtlasKey = atlasKey;
300 fAtlasOffset = newAtlasOffset;
301
302 SkASSERT(!fCachedAtlasInfo); // Otherwise we need to invalidate our pixels in the old info.
303 fCachedAtlasInfo = std::move(info);
304 fCachedAtlasInfo->fNumPathPixels += this->height() * this->width();
305}
306
Chris Dalton907102e2018-06-29 13:18:53 -0600307void GrCCPathCacheEntry::invalidateAtlas() {
Chris Dalton3b572792018-10-23 18:26:20 -0600308 SkASSERT(SkGetThreadID() == fGraphicsThreadID);
309
Chris Dalton907102e2018-06-29 13:18:53 -0600310 if (fCachedAtlasInfo) {
311 // Mark our own pixels invalid in the cached atlas texture.
312 fCachedAtlasInfo->fNumInvalidatedPathPixels += this->height() * this->width();
313 if (!fCachedAtlasInfo->fIsPurgedFromResourceCache &&
314 fCachedAtlasInfo->fNumInvalidatedPathPixels >= fCachedAtlasInfo->fNumPathPixels / 2) {
315 // Too many invalidated pixels: purge the atlas texture from the resource cache.
Chris Dalton9a986cf2018-10-18 15:27:59 -0600316 // The GrContext and CCPR path cache both share the same unique ID.
Chris Dalton907102e2018-06-29 13:18:53 -0600317 SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(
Chris Dalton8429c792018-10-23 15:56:22 -0600318 GrUniqueKeyInvalidatedMessage(fAtlasKey, fCachedAtlasInfo->fContextUniqueID));
Chris Dalton907102e2018-06-29 13:18:53 -0600319 fCachedAtlasInfo->fIsPurgedFromResourceCache = true;
320 }
321 }
322
323 fAtlasKey.reset();
324 fCachedAtlasInfo = nullptr;
325}
326
Chris Dalton4da70192018-06-18 09:51:36 -0600327void GrCCPathCacheEntry::onChange() {
Chris Dalton9a986cf2018-10-18 15:27:59 -0600328 // Post a thread-safe eviction message.
329 SkMessageBus<sk_sp<GrCCPathCacheEntry>>::Post(sk_ref_sp(this));
Chris Dalton4da70192018-06-18 09:51:36 -0600330}