blob: 839cba4cfec4df643870d125ebdd310a82cc6dec [file] [log] [blame]
Chris Dalton4da70192018-06-18 09:51:36 -06001/*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GrCCPathCache.h"
9
10#include "GrShape.h"
11#include "SkNx.h"
Chris Dalton4da70192018-06-18 09:51:36 -060012
Chris Dalton9a986cf2018-10-18 15:27:59 -060013DECLARE_SKMESSAGEBUS_MESSAGE(sk_sp<GrCCPathCacheEntry>);
14
Chris Dalton8429c792018-10-23 15:56:22 -060015static inline uint32_t next_path_cache_id() {
16 static std::atomic<uint32_t> gNextID(1);
17 for (;;) {
18 uint32_t id = gNextID.fetch_add(+1, std::memory_order_acquire);
19 if (SK_InvalidUniqueID != id) {
20 return id;
21 }
22 }
23}
24
Chris Dalton9a986cf2018-10-18 15:27:59 -060025static inline bool SkShouldPostMessageToBus(
26 const sk_sp<GrCCPathCacheEntry>& entry, uint32_t msgBusUniqueID) {
27 return entry->pathCacheUniqueID() == msgBusUniqueID;
28}
29
Chris Dalton4da70192018-06-18 09:51:36 -060030// The maximum number of cache entries we allow in our own cache.
31static constexpr int kMaxCacheCount = 1 << 16;
32
Chris Dalton8429c792018-10-23 15:56:22 -060033
Chris Dalton4da70192018-06-18 09:51:36 -060034GrCCPathCache::MaskTransform::MaskTransform(const SkMatrix& m, SkIVector* shift)
35 : fMatrix2x2{m.getScaleX(), m.getSkewX(), m.getSkewY(), m.getScaleY()} {
36 SkASSERT(!m.hasPerspective());
37 Sk2f translate = Sk2f(m.getTranslateX(), m.getTranslateY());
Chris Dalton76c775f2018-10-01 23:08:06 -060038 Sk2f transFloor;
39#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
40 // On Android framework we pre-round view matrix translates to integers for better caching.
41 transFloor = translate;
42#else
43 transFloor = translate.floor();
44 (translate - transFloor).store(fSubpixelTranslate);
Chris Dalton644341a2018-06-18 19:14:16 -060045#endif
Chris Dalton76c775f2018-10-01 23:08:06 -060046 shift->set((int)transFloor[0], (int)transFloor[1]);
47 SkASSERT((float)shift->fX == transFloor[0]); // Make sure transFloor had integer values.
48 SkASSERT((float)shift->fY == transFloor[1]);
Chris Dalton4da70192018-06-18 09:51:36 -060049}
50
51inline static bool fuzzy_equals(const GrCCPathCache::MaskTransform& a,
52 const GrCCPathCache::MaskTransform& b) {
Chris Dalton644341a2018-06-18 19:14:16 -060053 if ((Sk4f::Load(a.fMatrix2x2) != Sk4f::Load(b.fMatrix2x2)).anyTrue()) {
54 return false;
55 }
56#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
57 if (((Sk2f::Load(a.fSubpixelTranslate) -
58 Sk2f::Load(b.fSubpixelTranslate)).abs() > 1.f/256).anyTrue()) {
59 return false;
60 }
61#endif
62 return true;
Chris Dalton4da70192018-06-18 09:51:36 -060063}
64
Chris Dalton8f8bf882018-07-18 10:55:51 -060065namespace {
66
67// Produces a key that accounts both for a shape's path geometry, as well as any stroke/style.
68class WriteStyledKey {
69public:
Chris Dalton09a7bb22018-08-31 19:53:15 +080070 static constexpr int kStyledKeySizeInBytesIdx = 0;
71 static constexpr int kStrokeWidthIdx = 1;
72 static constexpr int kStrokeMiterIdx = 2;
73 static constexpr int kStrokeCapJoinIdx = 3;
74 static constexpr int kShapeUnstyledKeyIdx = 4;
75
76 static constexpr int kStrokeKeyCount = 3; // [width, miterLimit, cap|join].
77
78 WriteStyledKey(const GrShape& shape) : fShapeUnstyledKeyCount(shape.unstyledKeySize()) {}
Chris Dalton8f8bf882018-07-18 10:55:51 -060079
80 // Returns the total number of uint32_t's to allocate for the key.
Chris Dalton09a7bb22018-08-31 19:53:15 +080081 int allocCountU32() const { return kShapeUnstyledKeyIdx + fShapeUnstyledKeyCount; }
Chris Dalton8f8bf882018-07-18 10:55:51 -060082
83 // Writes the key to out[].
84 void write(const GrShape& shape, uint32_t* out) {
Chris Dalton09a7bb22018-08-31 19:53:15 +080085 out[kStyledKeySizeInBytesIdx] =
86 (kStrokeKeyCount + fShapeUnstyledKeyCount) * sizeof(uint32_t);
87
88 // Stroke key.
89 // We don't use GrStyle::WriteKey() because it does not account for hairlines.
90 // http://skbug.com/8273
91 SkASSERT(!shape.style().hasPathEffect());
92 const SkStrokeRec& stroke = shape.style().strokeRec();
93 if (stroke.isFillStyle()) {
94 // Use a value for width that won't collide with a valid fp32 value >= 0.
95 out[kStrokeWidthIdx] = ~0;
96 out[kStrokeMiterIdx] = out[kStrokeCapJoinIdx] = 0;
97 } else {
98 float width = stroke.getWidth(), miterLimit = stroke.getMiter();
99 memcpy(&out[kStrokeWidthIdx], &width, sizeof(float));
100 memcpy(&out[kStrokeMiterIdx], &miterLimit, sizeof(float));
101 out[kStrokeCapJoinIdx] = (stroke.getCap() << 16) | stroke.getJoin();
102 GR_STATIC_ASSERT(sizeof(out[kStrokeWidthIdx]) == sizeof(float));
103 }
104
105 // Shape unstyled key.
106 shape.writeUnstyledKey(&out[kShapeUnstyledKeyIdx]);
Chris Dalton8f8bf882018-07-18 10:55:51 -0600107 }
108
109private:
110 int fShapeUnstyledKeyCount;
Chris Dalton8f8bf882018-07-18 10:55:51 -0600111};
112
113}
114
Chris Dalton4da70192018-06-18 09:51:36 -0600115inline bool operator==(const GrCCPathCache::HashKey& key1, const GrCCPathCache::HashKey& key2) {
Chris Dalton8f8bf882018-07-18 10:55:51 -0600116 return key1.fData[0] == key2.fData[0] && !memcmp(&key1.fData[1], &key2.fData[1], key1.fData[0]);
Chris Dalton4da70192018-06-18 09:51:36 -0600117}
118
119inline GrCCPathCache::HashKey GrCCPathCache::HashNode::GetKey(const GrCCPathCacheEntry* entry) {
120 // The shape key is a variable-length footer to the entry allocation.
121 return HashKey{(const uint32_t*)((const char*)entry + sizeof(GrCCPathCacheEntry))};
122}
123
124inline uint32_t GrCCPathCache::HashNode::Hash(HashKey key) {
125 return GrResourceKeyHash(&key.fData[1], key.fData[0]);
126}
127
Chris Dalton3b572792018-10-23 18:26:20 -0600128inline GrCCPathCache::HashNode::HashNode(GrCCPathCache* pathCache, const MaskTransform& m,
129 const GrShape& shape)
130 : fPathCache(pathCache) {
131 SkASSERT(SkGetThreadID() == fPathCache->fGraphicsThreadID);
132 SkASSERT(shape.hasUnstyledKey());
133
134 WriteStyledKey writeKey(shape);
135 void* mem = ::operator new (sizeof(GrCCPathCacheEntry) +
136 writeKey.allocCountU32() * sizeof(uint32_t));
137 fEntry.reset(new (mem) GrCCPathCacheEntry(fPathCache->fInvalidatedEntriesInbox.uniqueID(), m));
138
139 // The shape key is a variable-length footer to the entry allocation.
140 uint32_t* keyData = (uint32_t*)((char*)mem + sizeof(GrCCPathCacheEntry));
141 writeKey.write(shape, keyData);
142}
143
144inline GrCCPathCache::HashNode::~HashNode() {
145 this->willExitHashTable();
146}
147
148inline GrCCPathCache::HashNode& GrCCPathCache::HashNode::operator=(HashNode&& node) {
149 this->willExitHashTable();
150 fPathCache = node.fPathCache;
151 fEntry = std::move(node.fEntry);
152 SkASSERT(!node.fEntry);
153 return *this;
154}
155
156inline void GrCCPathCache::HashNode::willExitHashTable() {
157 if (!fEntry) {
158 return; // We were moved.
159 }
160
161 SkASSERT(fPathCache);
162 SkASSERT(SkGetThreadID() == fPathCache->fGraphicsThreadID);
163 SkASSERT(fPathCache->fLRU.isInList(fEntry.get()));
164
165 fEntry->invalidateAtlas(); // Must happen on graphics thread.
Chris Daltona9441422018-10-25 18:35:55 -0600166 fEntry->markShouldUnregisterFromPath();
Chris Dalton3b572792018-10-23 18:26:20 -0600167 fPathCache->fLRU.remove(fEntry.get());
168}
169
Chris Dalton8429c792018-10-23 15:56:22 -0600170
171GrCCPathCache::GrCCPathCache()
Chris Dalton3b572792018-10-23 18:26:20 -0600172 : fInvalidatedEntriesInbox(next_path_cache_id()) {
Chris Dalton8429c792018-10-23 15:56:22 -0600173}
174
Chris Dalton9a986cf2018-10-18 15:27:59 -0600175GrCCPathCache::~GrCCPathCache() {
Chris Dalton31abc5a2018-10-24 07:11:13 -0600176 // DDL threads never use the cache, in which case it doesn't matter what thread we
177 // clean up on.
178 SkASSERT(kIllegalThreadID == fGraphicsThreadID || SkGetThreadID() == fGraphicsThreadID);
Chris Dalton3b572792018-10-23 18:26:20 -0600179
180 fHashTable.reset(); // Must be cleared first; ~HashNode calls fLRU.remove() on us.
181 SkASSERT(fLRU.isEmpty()); // Ensure the hash table and LRU list were coherent.
Chris Dalton4da70192018-06-18 09:51:36 -0600182}
Chris Dalton4da70192018-06-18 09:51:36 -0600183
184sk_sp<GrCCPathCacheEntry> GrCCPathCache::find(const GrShape& shape, const MaskTransform& m,
185 CreateIfAbsent createIfAbsent) {
Chris Dalton31abc5a2018-10-24 07:11:13 -0600186#ifdef SK_DEBUG
187 if (kIllegalThreadID == fGraphicsThreadID) {
188 fGraphicsThreadID = SkGetThreadID();
189 }
190#endif
Chris Dalton3b572792018-10-23 18:26:20 -0600191 SkASSERT(SkGetThreadID() == fGraphicsThreadID);
192
Chris Dalton4da70192018-06-18 09:51:36 -0600193 if (!shape.hasUnstyledKey()) {
194 return nullptr;
195 }
196
Chris Dalton8f8bf882018-07-18 10:55:51 -0600197 WriteStyledKey writeKey(shape);
198 SkAutoSTMalloc<GrShape::kMaxKeyFromDataVerbCnt * 4, uint32_t> keyData(writeKey.allocCountU32());
199 writeKey.write(shape, keyData.get());
Chris Dalton4da70192018-06-18 09:51:36 -0600200
201 GrCCPathCacheEntry* entry = nullptr;
202 if (HashNode* node = fHashTable.find({keyData.get()})) {
203 entry = node->entry();
Chris Dalton9a986cf2018-10-18 15:27:59 -0600204 SkASSERT(fLRU.isInList(entry));
Chris Daltona8429cf2018-06-22 11:43:31 -0600205 if (fuzzy_equals(m, entry->fMaskTransform)) {
Chris Dalton907102e2018-06-29 13:18:53 -0600206 ++entry->fHitCount; // The path was reused with a compatible matrix.
207 } else if (CreateIfAbsent::kYes == createIfAbsent && entry->unique()) {
208 // This entry is unique: we can recycle it instead of deleting and malloc-ing a new one.
209 entry->fMaskTransform = m;
210 entry->fHitCount = 1;
211 entry->invalidateAtlas();
212 SkASSERT(!entry->fCurrFlushAtlas); // Should be null because 'entry' is unique.
Chris Daltona8429cf2018-06-22 11:43:31 -0600213 } else {
Chris Dalton907102e2018-06-29 13:18:53 -0600214 this->evict(entry);
Chris Dalton4da70192018-06-18 09:51:36 -0600215 entry = nullptr;
216 }
217 }
218
219 if (!entry) {
220 if (CreateIfAbsent::kNo == createIfAbsent) {
221 return nullptr;
222 }
223 if (fHashTable.count() >= kMaxCacheCount) {
224 this->evict(fLRU.tail()); // We've exceeded our limit.
225 }
Chris Dalton3b572792018-10-23 18:26:20 -0600226 SkASSERT(!fHashTable.find({keyData.get()}));
227 entry = fHashTable.set(HashNode(this, m, shape))->entry();
Chris Daltonf40ddfa2018-10-09 14:19:21 -0600228 shape.addGenIDChangeListener(sk_ref_sp(entry));
Chris Dalton4da70192018-06-18 09:51:36 -0600229 SkASSERT(fHashTable.count() <= kMaxCacheCount);
230 } else {
231 fLRU.remove(entry); // Will be re-added at head.
232 }
233
Chris Dalton3b572792018-10-23 18:26:20 -0600234 SkDEBUGCODE(HashNode* node = fHashTable.find(HashNode::GetKey(entry)));
235 SkASSERT(node && node->entry() == entry);
Chris Dalton4da70192018-06-18 09:51:36 -0600236 fLRU.addToHead(entry);
237 return sk_ref_sp(entry);
238}
239
Chris Dalton9a986cf2018-10-18 15:27:59 -0600240void GrCCPathCache::evict(GrCCPathCacheEntry* entry) {
Chris Daltona9441422018-10-25 18:35:55 -0600241 // Has the entry already been evicted? (We mark "shouldUnregisterFromPath" upon eviction.)
242 bool isInCache = !entry->shouldUnregisterFromPath();
243 SkDEBUGCODE(HashNode* entryKeyNode = fHashTable.find(HashNode::GetKey(entry)));
244 SkASSERT((entryKeyNode && entryKeyNode->entry() == entry) == isInCache);
245 SkASSERT(fLRU.isInList(entry) == isInCache);
Chris Dalton3b572792018-10-23 18:26:20 -0600246
Greg Danieleb772c02018-10-19 15:00:06 +0000247 if (isInCache) {
Chris Dalton3b572792018-10-23 18:26:20 -0600248 fHashTable.remove(HashNode::GetKey(entry));
249 // HashNode::willExitHashTable() takes care of the rest.
Chris Dalton9a986cf2018-10-18 15:27:59 -0600250 }
251}
Chris Dalton4da70192018-06-18 09:51:36 -0600252
Chris Dalton9a986cf2018-10-18 15:27:59 -0600253void GrCCPathCache::purgeAsNeeded() {
254 SkTArray<sk_sp<GrCCPathCacheEntry>> invalidatedEntries;
255 fInvalidatedEntriesInbox.poll(&invalidatedEntries);
256 for (const sk_sp<GrCCPathCacheEntry>& entry : invalidatedEntries) {
257 this->evict(entry.get());
258 }
Chris Dalton4da70192018-06-18 09:51:36 -0600259}
260
Chris Dalton907102e2018-06-29 13:18:53 -0600261
Chris Dalton3b572792018-10-23 18:26:20 -0600262GrCCPathCacheEntry::GrCCPathCacheEntry(uint32_t pathCacheUniqueID,
263 const MaskTransform& maskTransform)
264 : fPathCacheUniqueID(pathCacheUniqueID), fMaskTransform(maskTransform) {
265 SkASSERT(SK_InvalidUniqueID != fPathCacheUniqueID);
266 SkDEBUGCODE(fGraphicsThreadID = SkGetThreadID());
267}
268
Chris Dalton907102e2018-06-29 13:18:53 -0600269GrCCPathCacheEntry::~GrCCPathCacheEntry() {
Chris Dalton3b572792018-10-23 18:26:20 -0600270 // This might get called from a different thread.
271 SkASSERT(!fCachedAtlasInfo); // Should have been cleared when the entry was evicted.
Chris Dalton907102e2018-06-29 13:18:53 -0600272 SkASSERT(!fCurrFlushAtlas); // Client is required to reset fCurrFlushAtlas back to null.
Chris Dalton907102e2018-06-29 13:18:53 -0600273}
274
Chris Dalton9a986cf2018-10-18 15:27:59 -0600275void GrCCPathCacheEntry::initAsStashedAtlas(const GrUniqueKey& atlasKey,
Chris Dalton4da70192018-06-18 09:51:36 -0600276 const SkIVector& atlasOffset, const SkRect& devBounds,
277 const SkRect& devBounds45, const SkIRect& devIBounds,
278 const SkIVector& maskShift) {
Chris Dalton3b572792018-10-23 18:26:20 -0600279 SkASSERT(SkGetThreadID() == fGraphicsThreadID);
Chris Dalton4da70192018-06-18 09:51:36 -0600280 SkASSERT(atlasKey.isValid());
281 SkASSERT(!fCurrFlushAtlas); // Otherwise we should reuse the atlas from last time.
282
283 fAtlasKey = atlasKey;
284 fAtlasOffset = atlasOffset + maskShift;
285 SkASSERT(!fCachedAtlasInfo); // Otherwise they should have reused the cached atlas instead.
286
287 float dx = (float)maskShift.fX, dy = (float)maskShift.fY;
288 fDevBounds = devBounds.makeOffset(-dx, -dy);
289 fDevBounds45 = GrCCPathProcessor::MakeOffset45(devBounds45, -dx, -dy);
290 fDevIBounds = devIBounds.makeOffset(-maskShift.fX, -maskShift.fY);
291}
292
Chris Dalton9a986cf2018-10-18 15:27:59 -0600293void GrCCPathCacheEntry::updateToCachedAtlas(const GrUniqueKey& atlasKey,
Chris Dalton4da70192018-06-18 09:51:36 -0600294 const SkIVector& newAtlasOffset,
295 sk_sp<GrCCAtlas::CachedAtlasInfo> info) {
Chris Dalton3b572792018-10-23 18:26:20 -0600296 SkASSERT(SkGetThreadID() == fGraphicsThreadID);
Chris Dalton4da70192018-06-18 09:51:36 -0600297 SkASSERT(atlasKey.isValid());
298 SkASSERT(!fCurrFlushAtlas); // Otherwise we should reuse the atlas from last time.
299
300 fAtlasKey = atlasKey;
301 fAtlasOffset = newAtlasOffset;
302
303 SkASSERT(!fCachedAtlasInfo); // Otherwise we need to invalidate our pixels in the old info.
304 fCachedAtlasInfo = std::move(info);
305 fCachedAtlasInfo->fNumPathPixels += this->height() * this->width();
306}
307
Chris Dalton907102e2018-06-29 13:18:53 -0600308void GrCCPathCacheEntry::invalidateAtlas() {
Chris Dalton3b572792018-10-23 18:26:20 -0600309 SkASSERT(SkGetThreadID() == fGraphicsThreadID);
310
Chris Dalton907102e2018-06-29 13:18:53 -0600311 if (fCachedAtlasInfo) {
312 // Mark our own pixels invalid in the cached atlas texture.
313 fCachedAtlasInfo->fNumInvalidatedPathPixels += this->height() * this->width();
314 if (!fCachedAtlasInfo->fIsPurgedFromResourceCache &&
315 fCachedAtlasInfo->fNumInvalidatedPathPixels >= fCachedAtlasInfo->fNumPathPixels / 2) {
316 // Too many invalidated pixels: purge the atlas texture from the resource cache.
Chris Dalton9a986cf2018-10-18 15:27:59 -0600317 // The GrContext and CCPR path cache both share the same unique ID.
Chris Dalton907102e2018-06-29 13:18:53 -0600318 SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(
Chris Dalton8429c792018-10-23 15:56:22 -0600319 GrUniqueKeyInvalidatedMessage(fAtlasKey, fCachedAtlasInfo->fContextUniqueID));
Chris Dalton907102e2018-06-29 13:18:53 -0600320 fCachedAtlasInfo->fIsPurgedFromResourceCache = true;
321 }
322 }
323
324 fAtlasKey.reset();
325 fCachedAtlasInfo = nullptr;
326}
327
Chris Dalton4da70192018-06-18 09:51:36 -0600328void GrCCPathCacheEntry::onChange() {
Chris Dalton9a986cf2018-10-18 15:27:59 -0600329 // Post a thread-safe eviction message.
330 SkMessageBus<sk_sp<GrCCPathCacheEntry>>::Post(sk_ref_sp(this));
Chris Dalton4da70192018-06-18 09:51:36 -0600331}