blob: 667786a9c229dd5141991ceb9f8e559a8436cbf7 [file] [log] [blame]
Chris Dalton4da70192018-06-18 09:51:36 -06001/*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
Mike Kleinc0bd9f92019-04-23 12:05:21 -05008#include "src/gpu/ccpr/GrCCPathCache.h"
Chris Dalton4da70192018-06-18 09:51:36 -06009
Mike Kleinc0bd9f92019-04-23 12:05:21 -050010#include "include/private/SkNx.h"
11#include "src/gpu/GrOnFlushResourceProvider.h"
12#include "src/gpu/GrProxyProvider.h"
Chris Dalton4da70192018-06-18 09:51:36 -060013
Chris Dalton9985a272018-10-30 14:29:39 -060014static constexpr int kMaxKeyDataCountU32 = 256; // 1kB of uint32_t's.
15
16DECLARE_SKMESSAGEBUS_MESSAGE(sk_sp<GrCCPathCache::Key>);
Chris Dalton9a986cf2018-10-18 15:27:59 -060017
Chris Dalton8429c792018-10-23 15:56:22 -060018static inline uint32_t next_path_cache_id() {
19 static std::atomic<uint32_t> gNextID(1);
20 for (;;) {
21 uint32_t id = gNextID.fetch_add(+1, std::memory_order_acquire);
22 if (SK_InvalidUniqueID != id) {
23 return id;
24 }
25 }
26}
27
Chris Dalton9a986cf2018-10-18 15:27:59 -060028static inline bool SkShouldPostMessageToBus(
Chris Dalton9985a272018-10-30 14:29:39 -060029 const sk_sp<GrCCPathCache::Key>& key, uint32_t msgBusUniqueID) {
30 return key->pathCacheUniqueID() == msgBusUniqueID;
Chris Dalton9a986cf2018-10-18 15:27:59 -060031}
32
Chris Dalton4da70192018-06-18 09:51:36 -060033// The maximum number of cache entries we allow in our own cache.
34static constexpr int kMaxCacheCount = 1 << 16;
35
Chris Dalton8429c792018-10-23 15:56:22 -060036
Chris Dalton4da70192018-06-18 09:51:36 -060037GrCCPathCache::MaskTransform::MaskTransform(const SkMatrix& m, SkIVector* shift)
38 : fMatrix2x2{m.getScaleX(), m.getSkewX(), m.getSkewY(), m.getScaleY()} {
39 SkASSERT(!m.hasPerspective());
40 Sk2f translate = Sk2f(m.getTranslateX(), m.getTranslateY());
Chris Dalton76c775f2018-10-01 23:08:06 -060041 Sk2f transFloor;
42#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
43 // On Android framework we pre-round view matrix translates to integers for better caching.
44 transFloor = translate;
45#else
46 transFloor = translate.floor();
47 (translate - transFloor).store(fSubpixelTranslate);
Chris Dalton644341a2018-06-18 19:14:16 -060048#endif
Chris Dalton76c775f2018-10-01 23:08:06 -060049 shift->set((int)transFloor[0], (int)transFloor[1]);
50 SkASSERT((float)shift->fX == transFloor[0]); // Make sure transFloor had integer values.
51 SkASSERT((float)shift->fY == transFloor[1]);
Chris Dalton4da70192018-06-18 09:51:36 -060052}
53
54inline static bool fuzzy_equals(const GrCCPathCache::MaskTransform& a,
55 const GrCCPathCache::MaskTransform& b) {
Chris Dalton644341a2018-06-18 19:14:16 -060056 if ((Sk4f::Load(a.fMatrix2x2) != Sk4f::Load(b.fMatrix2x2)).anyTrue()) {
57 return false;
58 }
59#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
60 if (((Sk2f::Load(a.fSubpixelTranslate) -
61 Sk2f::Load(b.fSubpixelTranslate)).abs() > 1.f/256).anyTrue()) {
62 return false;
63 }
64#endif
65 return true;
Chris Dalton4da70192018-06-18 09:51:36 -060066}
67
Chris Dalton9985a272018-10-30 14:29:39 -060068sk_sp<GrCCPathCache::Key> GrCCPathCache::Key::Make(uint32_t pathCacheUniqueID,
69 int dataCountU32, const void* data) {
70 void* memory = ::operator new (sizeof(Key) + dataCountU32 * sizeof(uint32_t));
71 sk_sp<GrCCPathCache::Key> key(new (memory) Key(pathCacheUniqueID, dataCountU32));
72 if (data) {
73 memcpy(key->data(), data, key->dataSizeInBytes());
74 }
75 return key;
76}
77
Andy Weiss8ef18d32019-08-29 10:36:58 -070078void GrCCPathCache::Key::operator delete(void* p) { ::operator delete(p); }
79
Chris Dalton9985a272018-10-30 14:29:39 -060080const uint32_t* GrCCPathCache::Key::data() const {
81 // The shape key is a variable-length footer to the entry allocation.
82 return reinterpret_cast<const uint32_t*>(reinterpret_cast<const char*>(this) + sizeof(Key));
83}
84
85uint32_t* GrCCPathCache::Key::data() {
86 // The shape key is a variable-length footer to the entry allocation.
87 return reinterpret_cast<uint32_t*>(reinterpret_cast<char*>(this) + sizeof(Key));
88}
89
Chris Dalton9985a272018-10-30 14:29:39 -060090void GrCCPathCache::Key::onChange() {
91 // Our key's corresponding path was invalidated. Post a thread-safe eviction message.
92 SkMessageBus<sk_sp<Key>>::Post(sk_ref_sp(this));
93}
94
Chris Dalton351e80c2019-01-06 22:51:00 -070095GrCCPathCache::GrCCPathCache(uint32_t contextUniqueID)
96 : fContextUniqueID(contextUniqueID)
97 , fInvalidatedKeysInbox(next_path_cache_id())
Chris Dalton9985a272018-10-30 14:29:39 -060098 , fScratchKey(Key::Make(fInvalidatedKeysInbox.uniqueID(), kMaxKeyDataCountU32)) {
99}
100
101GrCCPathCache::~GrCCPathCache() {
Chris Dalton351e80c2019-01-06 22:51:00 -0700102 while (!fLRU.isEmpty()) {
103 this->evict(*fLRU.tail()->fCacheKey, fLRU.tail());
104 }
105 SkASSERT(0 == fHashTable.count()); // Ensure the hash table and LRU list were coherent.
106
107 // Now take all the atlas textures we just invalidated and purge them from the GrResourceCache.
108 // We just purge via message bus since we don't have any access to the resource cache right now.
109 for (sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) {
110 SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(
111 GrUniqueKeyInvalidatedMessage(proxy->getUniqueKey(), fContextUniqueID));
112 }
113 for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) {
114 SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(
115 GrUniqueKeyInvalidatedMessage(key, fContextUniqueID));
116 }
Chris Dalton9985a272018-10-30 14:29:39 -0600117}
118
Chris Dalton8f8bf882018-07-18 10:55:51 -0600119namespace {
120
121// Produces a key that accounts both for a shape's path geometry, as well as any stroke/style.
Chris Dalton9985a272018-10-30 14:29:39 -0600122class WriteKeyHelper {
Chris Dalton8f8bf882018-07-18 10:55:51 -0600123public:
Chris Dalton9985a272018-10-30 14:29:39 -0600124 static constexpr int kStrokeWidthIdx = 0;
125 static constexpr int kStrokeMiterIdx = 1;
126 static constexpr int kStrokeCapJoinIdx = 2;
127 static constexpr int kShapeUnstyledKeyIdx = 3;
Chris Dalton09a7bb22018-08-31 19:53:15 +0800128
Chris Dalton9985a272018-10-30 14:29:39 -0600129 WriteKeyHelper(const GrShape& shape) : fShapeUnstyledKeyCount(shape.unstyledKeySize()) {}
Chris Dalton8f8bf882018-07-18 10:55:51 -0600130
131 // Returns the total number of uint32_t's to allocate for the key.
Chris Dalton09a7bb22018-08-31 19:53:15 +0800132 int allocCountU32() const { return kShapeUnstyledKeyIdx + fShapeUnstyledKeyCount; }
Chris Dalton8f8bf882018-07-18 10:55:51 -0600133
Chris Dalton9985a272018-10-30 14:29:39 -0600134 // Writes the key data to out[].
Chris Dalton8f8bf882018-07-18 10:55:51 -0600135 void write(const GrShape& shape, uint32_t* out) {
Chris Dalton09a7bb22018-08-31 19:53:15 +0800136 // Stroke key.
137 // We don't use GrStyle::WriteKey() because it does not account for hairlines.
138 // http://skbug.com/8273
139 SkASSERT(!shape.style().hasPathEffect());
140 const SkStrokeRec& stroke = shape.style().strokeRec();
141 if (stroke.isFillStyle()) {
142 // Use a value for width that won't collide with a valid fp32 value >= 0.
143 out[kStrokeWidthIdx] = ~0;
144 out[kStrokeMiterIdx] = out[kStrokeCapJoinIdx] = 0;
145 } else {
146 float width = stroke.getWidth(), miterLimit = stroke.getMiter();
147 memcpy(&out[kStrokeWidthIdx], &width, sizeof(float));
148 memcpy(&out[kStrokeMiterIdx], &miterLimit, sizeof(float));
149 out[kStrokeCapJoinIdx] = (stroke.getCap() << 16) | stroke.getJoin();
150 GR_STATIC_ASSERT(sizeof(out[kStrokeWidthIdx]) == sizeof(float));
151 }
152
153 // Shape unstyled key.
154 shape.writeUnstyledKey(&out[kShapeUnstyledKeyIdx]);
Chris Dalton8f8bf882018-07-18 10:55:51 -0600155 }
156
157private:
158 int fShapeUnstyledKeyCount;
Chris Dalton8f8bf882018-07-18 10:55:51 -0600159};
160
161}
162
Chris Daltonaaa77c12019-01-07 17:45:36 -0700163GrCCPathCache::OnFlushEntryRef GrCCPathCache::find(
164 GrOnFlushResourceProvider* onFlushRP, const GrShape& shape,
165 const SkIRect& clippedDrawBounds, const SkMatrix& viewMatrix, SkIVector* maskShift) {
Chris Dalton4da70192018-06-18 09:51:36 -0600166 if (!shape.hasUnstyledKey()) {
Chris Dalton351e80c2019-01-06 22:51:00 -0700167 return OnFlushEntryRef();
Chris Dalton4da70192018-06-18 09:51:36 -0600168 }
169
Chris Dalton9985a272018-10-30 14:29:39 -0600170 WriteKeyHelper writeKeyHelper(shape);
171 if (writeKeyHelper.allocCountU32() > kMaxKeyDataCountU32) {
Chris Dalton351e80c2019-01-06 22:51:00 -0700172 return OnFlushEntryRef();
Chris Dalton9985a272018-10-30 14:29:39 -0600173 }
174
175 SkASSERT(fScratchKey->unique());
176 fScratchKey->resetDataCountU32(writeKeyHelper.allocCountU32());
177 writeKeyHelper.write(shape, fScratchKey->data());
Chris Dalton4da70192018-06-18 09:51:36 -0600178
Chris Daltonaaa77c12019-01-07 17:45:36 -0700179 MaskTransform m(viewMatrix, maskShift);
Chris Dalton4da70192018-06-18 09:51:36 -0600180 GrCCPathCacheEntry* entry = nullptr;
Chris Dalton9985a272018-10-30 14:29:39 -0600181 if (HashNode* node = fHashTable.find(*fScratchKey)) {
Chris Dalton4da70192018-06-18 09:51:36 -0600182 entry = node->entry();
Chris Dalton9a986cf2018-10-18 15:27:59 -0600183 SkASSERT(fLRU.isInList(entry));
Chris Dalton351e80c2019-01-06 22:51:00 -0700184
Chris Dalton6c3879d2018-11-01 11:13:19 -0600185 if (!fuzzy_equals(m, entry->fMaskTransform)) {
186 // The path was reused with an incompatible matrix.
Chris Daltonaaa77c12019-01-07 17:45:36 -0700187 if (entry->unique()) {
Chris Dalton6c3879d2018-11-01 11:13:19 -0600188 // This entry is unique: recycle it instead of deleting and malloc-ing a new one.
Chris Dalton351e80c2019-01-06 22:51:00 -0700189 SkASSERT(0 == entry->fOnFlushRefCnt); // Because we are unique.
Chris Dalton6c3879d2018-11-01 11:13:19 -0600190 entry->fMaskTransform = m;
191 entry->fHitCount = 0;
Chris Daltonaaa77c12019-01-07 17:45:36 -0700192 entry->fHitRect = SkIRect::MakeEmpty();
Chris Dalton351e80c2019-01-06 22:51:00 -0700193 entry->releaseCachedAtlas(this);
Chris Dalton6c3879d2018-11-01 11:13:19 -0600194 } else {
195 this->evict(*fScratchKey);
196 entry = nullptr;
197 }
Chris Dalton4da70192018-06-18 09:51:36 -0600198 }
199 }
200
201 if (!entry) {
Chris Dalton4da70192018-06-18 09:51:36 -0600202 if (fHashTable.count() >= kMaxCacheCount) {
Chris Dalton9985a272018-10-30 14:29:39 -0600203 SkDEBUGCODE(HashNode* node = fHashTable.find(*fLRU.tail()->fCacheKey));
204 SkASSERT(node && node->entry() == fLRU.tail());
205 this->evict(*fLRU.tail()->fCacheKey); // We've exceeded our limit.
Chris Dalton4da70192018-06-18 09:51:36 -0600206 }
Chris Dalton9985a272018-10-30 14:29:39 -0600207
208 // Create a new entry in the cache.
209 sk_sp<Key> permanentKey = Key::Make(fInvalidatedKeysInbox.uniqueID(),
210 writeKeyHelper.allocCountU32(), fScratchKey->data());
211 SkASSERT(*permanentKey == *fScratchKey);
212 SkASSERT(!fHashTable.find(*permanentKey));
213 entry = fHashTable.set(HashNode(this, std::move(permanentKey), m, shape))->entry();
214
Chris Dalton4da70192018-06-18 09:51:36 -0600215 SkASSERT(fHashTable.count() <= kMaxCacheCount);
216 } else {
217 fLRU.remove(entry); // Will be re-added at head.
218 }
219
Chris Dalton9985a272018-10-30 14:29:39 -0600220 SkDEBUGCODE(HashNode* node = fHashTable.find(*fScratchKey));
Chris Dalton3b572792018-10-23 18:26:20 -0600221 SkASSERT(node && node->entry() == entry);
Chris Dalton4da70192018-06-18 09:51:36 -0600222 fLRU.addToHead(entry);
Chris Dalton6c3879d2018-11-01 11:13:19 -0600223
Chris Dalton351e80c2019-01-06 22:51:00 -0700224 if (0 == entry->fOnFlushRefCnt) {
225 // Only update the time stamp and hit count if we haven't seen this entry yet during the
226 // current flush.
227 entry->fTimestamp = this->quickPerFlushTimestamp();
228 ++entry->fHitCount;
229
230 if (entry->fCachedAtlas) {
Chris Dalton45f6b3d2019-05-21 12:06:03 -0600231 SkASSERT(SkToBool(entry->fCachedAtlas->peekOnFlushRefCnt()) ==
232 SkToBool(entry->fCachedAtlas->getOnFlushProxy()));
Chris Dalton351e80c2019-01-06 22:51:00 -0700233 if (!entry->fCachedAtlas->getOnFlushProxy()) {
Brian Salomon2af3e702019-08-11 19:10:31 -0400234 auto ct = GrCCAtlas::CoverageTypeToColorType(entry->fCachedAtlas->coverageType());
Chris Dalton45f6b3d2019-05-21 12:06:03 -0600235 if (sk_sp<GrTextureProxy> onFlushProxy = onFlushRP->findOrCreateProxyByUniqueKey(
Brian Salomonbeb7f522019-08-30 16:19:42 -0400236 entry->fCachedAtlas->textureKey(), ct, GrCCAtlas::kTextureOrigin,
237 GrSurfaceProxy::UseAllocator::kNo)) {
Chris Dalton45f6b3d2019-05-21 12:06:03 -0600238 entry->fCachedAtlas->setOnFlushProxy(std::move(onFlushProxy));
239 }
Chris Dalton351e80c2019-01-06 22:51:00 -0700240 }
241 if (!entry->fCachedAtlas->getOnFlushProxy()) {
242 // Our atlas's backing texture got purged from the GrResourceCache. Release the
243 // cached atlas.
244 entry->releaseCachedAtlas(this);
245 }
246 }
247 }
Chris Daltonaaa77c12019-01-07 17:45:36 -0700248 entry->fHitRect.join(clippedDrawBounds.makeOffset(-maskShift->x(), -maskShift->y()));
Chris Dalton351e80c2019-01-06 22:51:00 -0700249 SkASSERT(!entry->fCachedAtlas || entry->fCachedAtlas->getOnFlushProxy());
250 return OnFlushEntryRef::OnFlushRef(entry);
Chris Dalton4da70192018-06-18 09:51:36 -0600251}
252
Chris Dalton351e80c2019-01-06 22:51:00 -0700253void GrCCPathCache::evict(const GrCCPathCache::Key& key, GrCCPathCacheEntry* entry) {
254 if (!entry) {
255 HashNode* node = fHashTable.find(key);
256 SkASSERT(node);
257 entry = node->entry();
258 }
259 SkASSERT(*entry->fCacheKey == key);
260 SkASSERT(!entry->hasBeenEvicted());
261 entry->fCacheKey->markShouldUnregisterFromPath(); // Unregister the path listener.
262 entry->releaseCachedAtlas(this);
263 fLRU.remove(entry);
264 fHashTable.remove(key);
265}
266
267void GrCCPathCache::doPreFlushProcessing() {
268 this->evictInvalidatedCacheKeys();
Chris Dalton6c3879d2018-11-01 11:13:19 -0600269
270 // Mark the per-flush timestamp as needing to be updated with a newer clock reading.
271 fPerFlushTimestamp = GrStdSteadyClock::time_point::min();
272}
273
Chris Dalton351e80c2019-01-06 22:51:00 -0700274void GrCCPathCache::purgeEntriesOlderThan(GrProxyProvider* proxyProvider,
275 const GrStdSteadyClock::time_point& purgeTime) {
276 this->evictInvalidatedCacheKeys();
Chris Dalton6c3879d2018-11-01 11:13:19 -0600277
278#ifdef SK_DEBUG
279 auto lastTimestamp = (fLRU.isEmpty())
280 ? GrStdSteadyClock::time_point::max()
281 : fLRU.tail()->fTimestamp;
282#endif
283
Chris Dalton351e80c2019-01-06 22:51:00 -0700284 // Evict every entry from our local path cache whose timestamp is older than purgeTime.
Chris Dalton6c3879d2018-11-01 11:13:19 -0600285 while (!fLRU.isEmpty() && fLRU.tail()->fTimestamp < purgeTime) {
286#ifdef SK_DEBUG
287 // Verify that fLRU is sorted by timestamp.
288 auto timestamp = fLRU.tail()->fTimestamp;
289 SkASSERT(timestamp >= lastTimestamp);
290 lastTimestamp = timestamp;
291#endif
292 this->evict(*fLRU.tail()->fCacheKey);
293 }
Chris Dalton351e80c2019-01-06 22:51:00 -0700294
295 // Now take all the atlas textures we just invalidated and purge them from the GrResourceCache.
296 this->purgeInvalidatedAtlasTextures(proxyProvider);
Chris Dalton6c3879d2018-11-01 11:13:19 -0600297}
298
Chris Dalton351e80c2019-01-06 22:51:00 -0700299void GrCCPathCache::purgeInvalidatedAtlasTextures(GrOnFlushResourceProvider* onFlushRP) {
300 for (sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) {
301 onFlushRP->removeUniqueKeyFromProxy(proxy.get());
302 }
303 fInvalidatedProxies.reset();
304
305 for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) {
306 onFlushRP->processInvalidUniqueKey(key);
307 }
308 fInvalidatedProxyUniqueKeys.reset();
309}
310
311void GrCCPathCache::purgeInvalidatedAtlasTextures(GrProxyProvider* proxyProvider) {
312 for (sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) {
313 proxyProvider->removeUniqueKeyFromProxy(proxy.get());
314 }
315 fInvalidatedProxies.reset();
316
317 for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) {
318 proxyProvider->processInvalidUniqueKey(key, nullptr,
319 GrProxyProvider::InvalidateGPUResource::kYes);
320 }
321 fInvalidatedProxyUniqueKeys.reset();
322}
323
324void GrCCPathCache::evictInvalidatedCacheKeys() {
Chris Dalton9985a272018-10-30 14:29:39 -0600325 SkTArray<sk_sp<Key>> invalidatedKeys;
326 fInvalidatedKeysInbox.poll(&invalidatedKeys);
327 for (const sk_sp<Key>& key : invalidatedKeys) {
328 bool isInCache = !key->shouldUnregisterFromPath(); // Gets set upon exiting the cache.
329 if (isInCache) {
330 this->evict(*key);
331 }
Chris Dalton9a986cf2018-10-18 15:27:59 -0600332 }
Chris Dalton4da70192018-06-18 09:51:36 -0600333}
334
Chris Dalton351e80c2019-01-06 22:51:00 -0700335GrCCPathCache::OnFlushEntryRef
336GrCCPathCache::OnFlushEntryRef::OnFlushRef(GrCCPathCacheEntry* entry) {
337 entry->ref();
338 ++entry->fOnFlushRefCnt;
339 if (entry->fCachedAtlas) {
340 entry->fCachedAtlas->incrOnFlushRefCnt();
341 }
342 return OnFlushEntryRef(entry);
343}
Chris Dalton907102e2018-06-29 13:18:53 -0600344
Chris Dalton351e80c2019-01-06 22:51:00 -0700345GrCCPathCache::OnFlushEntryRef::~OnFlushEntryRef() {
346 if (!fEntry) {
347 return;
348 }
349 --fEntry->fOnFlushRefCnt;
350 SkASSERT(fEntry->fOnFlushRefCnt >= 0);
351 if (fEntry->fCachedAtlas) {
352 fEntry->fCachedAtlas->decrOnFlushRefCnt();
353 }
354 fEntry->unref();
355}
Chris Dalton4da70192018-06-18 09:51:36 -0600356
Chris Dalton351e80c2019-01-06 22:51:00 -0700357
358void GrCCPathCacheEntry::setCoverageCountAtlas(
359 GrOnFlushResourceProvider* onFlushRP, GrCCAtlas* atlas, const SkIVector& atlasOffset,
Chris Dalton8610e9c2019-05-09 11:07:10 -0600360 const GrOctoBounds& octoBounds, const SkIRect& devIBounds, const SkIVector& maskShift) {
Chris Dalton351e80c2019-01-06 22:51:00 -0700361 SkASSERT(fOnFlushRefCnt > 0);
362 SkASSERT(!fCachedAtlas); // Otherwise we would need to call releaseCachedAtlas().
363
364 if (this->hasBeenEvicted()) {
365 // This entry will never be found in the path cache again. Don't bother trying to save an
366 // atlas texture for it in the GrResourceCache.
367 return;
368 }
369
370 fCachedAtlas = atlas->refOrMakeCachedAtlas(onFlushRP);
371 fCachedAtlas->incrOnFlushRefCnt(fOnFlushRefCnt);
372 fCachedAtlas->addPathPixels(devIBounds.height() * devIBounds.width());
373
Chris Dalton4da70192018-06-18 09:51:36 -0600374 fAtlasOffset = atlasOffset + maskShift;
Chris Dalton4da70192018-06-18 09:51:36 -0600375
Chris Dalton8610e9c2019-05-09 11:07:10 -0600376 fOctoBounds.setOffset(octoBounds, -maskShift.fX, -maskShift.fY);
Chris Dalton4da70192018-06-18 09:51:36 -0600377 fDevIBounds = devIBounds.makeOffset(-maskShift.fX, -maskShift.fY);
378}
379
Chris Dalton351e80c2019-01-06 22:51:00 -0700380GrCCPathCacheEntry::ReleaseAtlasResult GrCCPathCacheEntry::upgradeToLiteralCoverageAtlas(
381 GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP, GrCCAtlas* atlas,
382 const SkIVector& newAtlasOffset) {
383 SkASSERT(!this->hasBeenEvicted());
384 SkASSERT(fOnFlushRefCnt > 0);
385 SkASSERT(fCachedAtlas);
Chris Daltonc3318f02019-07-19 14:20:53 -0600386 SkASSERT(GrCCAtlas::CoverageType::kA8_LiteralCoverage != fCachedAtlas->coverageType());
Chris Dalton4da70192018-06-18 09:51:36 -0600387
Chris Dalton351e80c2019-01-06 22:51:00 -0700388 ReleaseAtlasResult releaseAtlasResult = this->releaseCachedAtlas(pathCache);
389
390 fCachedAtlas = atlas->refOrMakeCachedAtlas(onFlushRP);
391 fCachedAtlas->incrOnFlushRefCnt(fOnFlushRefCnt);
392 fCachedAtlas->addPathPixels(this->height() * this->width());
393
Chris Dalton4da70192018-06-18 09:51:36 -0600394 fAtlasOffset = newAtlasOffset;
Chris Dalton351e80c2019-01-06 22:51:00 -0700395 return releaseAtlasResult;
Chris Dalton4da70192018-06-18 09:51:36 -0600396}
397
Chris Dalton351e80c2019-01-06 22:51:00 -0700398GrCCPathCacheEntry::ReleaseAtlasResult GrCCPathCacheEntry::releaseCachedAtlas(
399 GrCCPathCache* pathCache) {
400 ReleaseAtlasResult result = ReleaseAtlasResult::kNone;
401 if (fCachedAtlas) {
402 result = fCachedAtlas->invalidatePathPixels(pathCache, this->height() * this->width());
403 if (fOnFlushRefCnt) {
404 SkASSERT(fOnFlushRefCnt > 0);
405 fCachedAtlas->decrOnFlushRefCnt(fOnFlushRefCnt);
Chris Dalton907102e2018-06-29 13:18:53 -0600406 }
Chris Dalton351e80c2019-01-06 22:51:00 -0700407 fCachedAtlas = nullptr;
Chris Dalton907102e2018-06-29 13:18:53 -0600408 }
Chris Dalton351e80c2019-01-06 22:51:00 -0700409 return result;
410}
Chris Dalton907102e2018-06-29 13:18:53 -0600411
Chris Dalton351e80c2019-01-06 22:51:00 -0700412GrCCPathCacheEntry::ReleaseAtlasResult GrCCCachedAtlas::invalidatePathPixels(
413 GrCCPathCache* pathCache, int numPixels) {
414 // Mark the pixels invalid in the cached atlas texture.
415 fNumInvalidatedPathPixels += numPixels;
416 SkASSERT(fNumInvalidatedPathPixels <= fNumPathPixels);
417 if (!fIsInvalidatedFromResourceCache && fNumInvalidatedPathPixels >= fNumPathPixels / 2) {
418 // Too many invalidated pixels: purge the atlas texture from the resource cache.
419 if (fOnFlushProxy) {
420 // Don't clear (or std::move) fOnFlushProxy. Other path cache entries might still have a
421 // reference on this atlas and expect to use our proxy during the current flush.
422 // fOnFlushProxy will be cleared once fOnFlushRefCnt decrements to zero.
423 pathCache->fInvalidatedProxies.push_back(fOnFlushProxy);
424 } else {
425 pathCache->fInvalidatedProxyUniqueKeys.push_back(fTextureKey);
426 }
427 fIsInvalidatedFromResourceCache = true;
428 return ReleaseAtlasResult::kDidInvalidateFromCache;
429 }
430 return ReleaseAtlasResult::kNone;
431}
432
433void GrCCCachedAtlas::decrOnFlushRefCnt(int count) const {
434 SkASSERT(count > 0);
435 fOnFlushRefCnt -= count;
436 SkASSERT(fOnFlushRefCnt >= 0);
437 if (0 == fOnFlushRefCnt) {
438 // Don't hold the actual proxy past the end of the current flush.
439 SkASSERT(fOnFlushProxy);
440 fOnFlushProxy = nullptr;
441 }
Chris Dalton907102e2018-06-29 13:18:53 -0600442}