Chris Dalton | 4da7019 | 2018-06-18 09:51:36 -0600 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2018 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
Mike Klein | c0bd9f9 | 2019-04-23 12:05:21 -0500 | [diff] [blame] | 8 | #include "src/gpu/ccpr/GrCCPathCache.h" |
Chris Dalton | 4da7019 | 2018-06-18 09:51:36 -0600 | [diff] [blame] | 9 | |
Mike Klein | c0bd9f9 | 2019-04-23 12:05:21 -0500 | [diff] [blame] | 10 | #include "include/private/SkNx.h" |
| 11 | #include "src/gpu/GrOnFlushResourceProvider.h" |
| 12 | #include "src/gpu/GrProxyProvider.h" |
Chris Dalton | 4da7019 | 2018-06-18 09:51:36 -0600 | [diff] [blame] | 13 | |
Chris Dalton | 9985a27 | 2018-10-30 14:29:39 -0600 | [diff] [blame] | 14 | static constexpr int kMaxKeyDataCountU32 = 256; // 1kB of uint32_t's. |
| 15 | |
| 16 | DECLARE_SKMESSAGEBUS_MESSAGE(sk_sp<GrCCPathCache::Key>); |
Chris Dalton | 9a986cf | 2018-10-18 15:27:59 -0600 | [diff] [blame] | 17 | |
Chris Dalton | 8429c79 | 2018-10-23 15:56:22 -0600 | [diff] [blame] | 18 | static inline uint32_t next_path_cache_id() { |
| 19 | static std::atomic<uint32_t> gNextID(1); |
| 20 | for (;;) { |
| 21 | uint32_t id = gNextID.fetch_add(+1, std::memory_order_acquire); |
| 22 | if (SK_InvalidUniqueID != id) { |
| 23 | return id; |
| 24 | } |
| 25 | } |
| 26 | } |
| 27 | |
Chris Dalton | 9a986cf | 2018-10-18 15:27:59 -0600 | [diff] [blame] | 28 | static inline bool SkShouldPostMessageToBus( |
Chris Dalton | 9985a27 | 2018-10-30 14:29:39 -0600 | [diff] [blame] | 29 | const sk_sp<GrCCPathCache::Key>& key, uint32_t msgBusUniqueID) { |
| 30 | return key->pathCacheUniqueID() == msgBusUniqueID; |
Chris Dalton | 9a986cf | 2018-10-18 15:27:59 -0600 | [diff] [blame] | 31 | } |
| 32 | |
Chris Dalton | 4da7019 | 2018-06-18 09:51:36 -0600 | [diff] [blame] | 33 | // The maximum number of cache entries we allow in our own cache. |
| 34 | static constexpr int kMaxCacheCount = 1 << 16; |
| 35 | |
Chris Dalton | 8429c79 | 2018-10-23 15:56:22 -0600 | [diff] [blame] | 36 | |
Chris Dalton | 4da7019 | 2018-06-18 09:51:36 -0600 | [diff] [blame] | 37 | GrCCPathCache::MaskTransform::MaskTransform(const SkMatrix& m, SkIVector* shift) |
| 38 | : fMatrix2x2{m.getScaleX(), m.getSkewX(), m.getSkewY(), m.getScaleY()} { |
| 39 | SkASSERT(!m.hasPerspective()); |
| 40 | Sk2f translate = Sk2f(m.getTranslateX(), m.getTranslateY()); |
Chris Dalton | 76c775f | 2018-10-01 23:08:06 -0600 | [diff] [blame] | 41 | Sk2f transFloor; |
| 42 | #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK |
| 43 | // On Android framework we pre-round view matrix translates to integers for better caching. |
| 44 | transFloor = translate; |
| 45 | #else |
| 46 | transFloor = translate.floor(); |
| 47 | (translate - transFloor).store(fSubpixelTranslate); |
Chris Dalton | 644341a | 2018-06-18 19:14:16 -0600 | [diff] [blame] | 48 | #endif |
Chris Dalton | 76c775f | 2018-10-01 23:08:06 -0600 | [diff] [blame] | 49 | shift->set((int)transFloor[0], (int)transFloor[1]); |
| 50 | SkASSERT((float)shift->fX == transFloor[0]); // Make sure transFloor had integer values. |
| 51 | SkASSERT((float)shift->fY == transFloor[1]); |
Chris Dalton | 4da7019 | 2018-06-18 09:51:36 -0600 | [diff] [blame] | 52 | } |
| 53 | |
| 54 | inline static bool fuzzy_equals(const GrCCPathCache::MaskTransform& a, |
| 55 | const GrCCPathCache::MaskTransform& b) { |
Chris Dalton | 644341a | 2018-06-18 19:14:16 -0600 | [diff] [blame] | 56 | if ((Sk4f::Load(a.fMatrix2x2) != Sk4f::Load(b.fMatrix2x2)).anyTrue()) { |
| 57 | return false; |
| 58 | } |
| 59 | #ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK |
| 60 | if (((Sk2f::Load(a.fSubpixelTranslate) - |
| 61 | Sk2f::Load(b.fSubpixelTranslate)).abs() > 1.f/256).anyTrue()) { |
| 62 | return false; |
| 63 | } |
| 64 | #endif |
| 65 | return true; |
Chris Dalton | 4da7019 | 2018-06-18 09:51:36 -0600 | [diff] [blame] | 66 | } |
| 67 | |
Chris Dalton | 9985a27 | 2018-10-30 14:29:39 -0600 | [diff] [blame] | 68 | sk_sp<GrCCPathCache::Key> GrCCPathCache::Key::Make(uint32_t pathCacheUniqueID, |
| 69 | int dataCountU32, const void* data) { |
| 70 | void* memory = ::operator new (sizeof(Key) + dataCountU32 * sizeof(uint32_t)); |
| 71 | sk_sp<GrCCPathCache::Key> key(new (memory) Key(pathCacheUniqueID, dataCountU32)); |
| 72 | if (data) { |
| 73 | memcpy(key->data(), data, key->dataSizeInBytes()); |
| 74 | } |
| 75 | return key; |
| 76 | } |
| 77 | |
Andy Weiss | 8ef18d3 | 2019-08-29 10:36:58 -0700 | [diff] [blame^] | 78 | void GrCCPathCache::Key::operator delete(void* p) { ::operator delete(p); } |
| 79 | |
Chris Dalton | 9985a27 | 2018-10-30 14:29:39 -0600 | [diff] [blame] | 80 | const uint32_t* GrCCPathCache::Key::data() const { |
| 81 | // The shape key is a variable-length footer to the entry allocation. |
| 82 | return reinterpret_cast<const uint32_t*>(reinterpret_cast<const char*>(this) + sizeof(Key)); |
| 83 | } |
| 84 | |
| 85 | uint32_t* GrCCPathCache::Key::data() { |
| 86 | // The shape key is a variable-length footer to the entry allocation. |
| 87 | return reinterpret_cast<uint32_t*>(reinterpret_cast<char*>(this) + sizeof(Key)); |
| 88 | } |
| 89 | |
Chris Dalton | 9985a27 | 2018-10-30 14:29:39 -0600 | [diff] [blame] | 90 | void GrCCPathCache::Key::onChange() { |
| 91 | // Our key's corresponding path was invalidated. Post a thread-safe eviction message. |
| 92 | SkMessageBus<sk_sp<Key>>::Post(sk_ref_sp(this)); |
| 93 | } |
| 94 | |
Chris Dalton | 351e80c | 2019-01-06 22:51:00 -0700 | [diff] [blame] | 95 | GrCCPathCache::GrCCPathCache(uint32_t contextUniqueID) |
| 96 | : fContextUniqueID(contextUniqueID) |
| 97 | , fInvalidatedKeysInbox(next_path_cache_id()) |
Chris Dalton | 9985a27 | 2018-10-30 14:29:39 -0600 | [diff] [blame] | 98 | , fScratchKey(Key::Make(fInvalidatedKeysInbox.uniqueID(), kMaxKeyDataCountU32)) { |
| 99 | } |
| 100 | |
| 101 | GrCCPathCache::~GrCCPathCache() { |
Chris Dalton | 351e80c | 2019-01-06 22:51:00 -0700 | [diff] [blame] | 102 | while (!fLRU.isEmpty()) { |
| 103 | this->evict(*fLRU.tail()->fCacheKey, fLRU.tail()); |
| 104 | } |
| 105 | SkASSERT(0 == fHashTable.count()); // Ensure the hash table and LRU list were coherent. |
| 106 | |
| 107 | // Now take all the atlas textures we just invalidated and purge them from the GrResourceCache. |
| 108 | // We just purge via message bus since we don't have any access to the resource cache right now. |
| 109 | for (sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) { |
| 110 | SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post( |
| 111 | GrUniqueKeyInvalidatedMessage(proxy->getUniqueKey(), fContextUniqueID)); |
| 112 | } |
| 113 | for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) { |
| 114 | SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post( |
| 115 | GrUniqueKeyInvalidatedMessage(key, fContextUniqueID)); |
| 116 | } |
Chris Dalton | 9985a27 | 2018-10-30 14:29:39 -0600 | [diff] [blame] | 117 | } |
| 118 | |
Chris Dalton | 8f8bf88 | 2018-07-18 10:55:51 -0600 | [diff] [blame] | 119 | namespace { |
| 120 | |
| 121 | // Produces a key that accounts both for a shape's path geometry, as well as any stroke/style. |
Chris Dalton | 9985a27 | 2018-10-30 14:29:39 -0600 | [diff] [blame] | 122 | class WriteKeyHelper { |
Chris Dalton | 8f8bf88 | 2018-07-18 10:55:51 -0600 | [diff] [blame] | 123 | public: |
Chris Dalton | 9985a27 | 2018-10-30 14:29:39 -0600 | [diff] [blame] | 124 | static constexpr int kStrokeWidthIdx = 0; |
| 125 | static constexpr int kStrokeMiterIdx = 1; |
| 126 | static constexpr int kStrokeCapJoinIdx = 2; |
| 127 | static constexpr int kShapeUnstyledKeyIdx = 3; |
Chris Dalton | 09a7bb2 | 2018-08-31 19:53:15 +0800 | [diff] [blame] | 128 | |
Chris Dalton | 9985a27 | 2018-10-30 14:29:39 -0600 | [diff] [blame] | 129 | WriteKeyHelper(const GrShape& shape) : fShapeUnstyledKeyCount(shape.unstyledKeySize()) {} |
Chris Dalton | 8f8bf88 | 2018-07-18 10:55:51 -0600 | [diff] [blame] | 130 | |
| 131 | // Returns the total number of uint32_t's to allocate for the key. |
Chris Dalton | 09a7bb2 | 2018-08-31 19:53:15 +0800 | [diff] [blame] | 132 | int allocCountU32() const { return kShapeUnstyledKeyIdx + fShapeUnstyledKeyCount; } |
Chris Dalton | 8f8bf88 | 2018-07-18 10:55:51 -0600 | [diff] [blame] | 133 | |
Chris Dalton | 9985a27 | 2018-10-30 14:29:39 -0600 | [diff] [blame] | 134 | // Writes the key data to out[]. |
Chris Dalton | 8f8bf88 | 2018-07-18 10:55:51 -0600 | [diff] [blame] | 135 | void write(const GrShape& shape, uint32_t* out) { |
Chris Dalton | 09a7bb2 | 2018-08-31 19:53:15 +0800 | [diff] [blame] | 136 | // Stroke key. |
| 137 | // We don't use GrStyle::WriteKey() because it does not account for hairlines. |
| 138 | // http://skbug.com/8273 |
| 139 | SkASSERT(!shape.style().hasPathEffect()); |
| 140 | const SkStrokeRec& stroke = shape.style().strokeRec(); |
| 141 | if (stroke.isFillStyle()) { |
| 142 | // Use a value for width that won't collide with a valid fp32 value >= 0. |
| 143 | out[kStrokeWidthIdx] = ~0; |
| 144 | out[kStrokeMiterIdx] = out[kStrokeCapJoinIdx] = 0; |
| 145 | } else { |
| 146 | float width = stroke.getWidth(), miterLimit = stroke.getMiter(); |
| 147 | memcpy(&out[kStrokeWidthIdx], &width, sizeof(float)); |
| 148 | memcpy(&out[kStrokeMiterIdx], &miterLimit, sizeof(float)); |
| 149 | out[kStrokeCapJoinIdx] = (stroke.getCap() << 16) | stroke.getJoin(); |
| 150 | GR_STATIC_ASSERT(sizeof(out[kStrokeWidthIdx]) == sizeof(float)); |
| 151 | } |
| 152 | |
| 153 | // Shape unstyled key. |
| 154 | shape.writeUnstyledKey(&out[kShapeUnstyledKeyIdx]); |
Chris Dalton | 8f8bf88 | 2018-07-18 10:55:51 -0600 | [diff] [blame] | 155 | } |
| 156 | |
| 157 | private: |
| 158 | int fShapeUnstyledKeyCount; |
Chris Dalton | 8f8bf88 | 2018-07-18 10:55:51 -0600 | [diff] [blame] | 159 | }; |
| 160 | |
| 161 | } |
| 162 | |
Chris Dalton | aaa77c1 | 2019-01-07 17:45:36 -0700 | [diff] [blame] | 163 | GrCCPathCache::OnFlushEntryRef GrCCPathCache::find( |
| 164 | GrOnFlushResourceProvider* onFlushRP, const GrShape& shape, |
| 165 | const SkIRect& clippedDrawBounds, const SkMatrix& viewMatrix, SkIVector* maskShift) { |
Chris Dalton | 4da7019 | 2018-06-18 09:51:36 -0600 | [diff] [blame] | 166 | if (!shape.hasUnstyledKey()) { |
Chris Dalton | 351e80c | 2019-01-06 22:51:00 -0700 | [diff] [blame] | 167 | return OnFlushEntryRef(); |
Chris Dalton | 4da7019 | 2018-06-18 09:51:36 -0600 | [diff] [blame] | 168 | } |
| 169 | |
Chris Dalton | 9985a27 | 2018-10-30 14:29:39 -0600 | [diff] [blame] | 170 | WriteKeyHelper writeKeyHelper(shape); |
| 171 | if (writeKeyHelper.allocCountU32() > kMaxKeyDataCountU32) { |
Chris Dalton | 351e80c | 2019-01-06 22:51:00 -0700 | [diff] [blame] | 172 | return OnFlushEntryRef(); |
Chris Dalton | 9985a27 | 2018-10-30 14:29:39 -0600 | [diff] [blame] | 173 | } |
| 174 | |
| 175 | SkASSERT(fScratchKey->unique()); |
| 176 | fScratchKey->resetDataCountU32(writeKeyHelper.allocCountU32()); |
| 177 | writeKeyHelper.write(shape, fScratchKey->data()); |
Chris Dalton | 4da7019 | 2018-06-18 09:51:36 -0600 | [diff] [blame] | 178 | |
Chris Dalton | aaa77c1 | 2019-01-07 17:45:36 -0700 | [diff] [blame] | 179 | MaskTransform m(viewMatrix, maskShift); |
Chris Dalton | 4da7019 | 2018-06-18 09:51:36 -0600 | [diff] [blame] | 180 | GrCCPathCacheEntry* entry = nullptr; |
Chris Dalton | 9985a27 | 2018-10-30 14:29:39 -0600 | [diff] [blame] | 181 | if (HashNode* node = fHashTable.find(*fScratchKey)) { |
Chris Dalton | 4da7019 | 2018-06-18 09:51:36 -0600 | [diff] [blame] | 182 | entry = node->entry(); |
Chris Dalton | 9a986cf | 2018-10-18 15:27:59 -0600 | [diff] [blame] | 183 | SkASSERT(fLRU.isInList(entry)); |
Chris Dalton | 351e80c | 2019-01-06 22:51:00 -0700 | [diff] [blame] | 184 | |
Chris Dalton | 6c3879d | 2018-11-01 11:13:19 -0600 | [diff] [blame] | 185 | if (!fuzzy_equals(m, entry->fMaskTransform)) { |
| 186 | // The path was reused with an incompatible matrix. |
Chris Dalton | aaa77c1 | 2019-01-07 17:45:36 -0700 | [diff] [blame] | 187 | if (entry->unique()) { |
Chris Dalton | 6c3879d | 2018-11-01 11:13:19 -0600 | [diff] [blame] | 188 | // This entry is unique: recycle it instead of deleting and malloc-ing a new one. |
Chris Dalton | 351e80c | 2019-01-06 22:51:00 -0700 | [diff] [blame] | 189 | SkASSERT(0 == entry->fOnFlushRefCnt); // Because we are unique. |
Chris Dalton | 6c3879d | 2018-11-01 11:13:19 -0600 | [diff] [blame] | 190 | entry->fMaskTransform = m; |
| 191 | entry->fHitCount = 0; |
Chris Dalton | aaa77c1 | 2019-01-07 17:45:36 -0700 | [diff] [blame] | 192 | entry->fHitRect = SkIRect::MakeEmpty(); |
Chris Dalton | 351e80c | 2019-01-06 22:51:00 -0700 | [diff] [blame] | 193 | entry->releaseCachedAtlas(this); |
Chris Dalton | 6c3879d | 2018-11-01 11:13:19 -0600 | [diff] [blame] | 194 | } else { |
| 195 | this->evict(*fScratchKey); |
| 196 | entry = nullptr; |
| 197 | } |
Chris Dalton | 4da7019 | 2018-06-18 09:51:36 -0600 | [diff] [blame] | 198 | } |
| 199 | } |
| 200 | |
| 201 | if (!entry) { |
Chris Dalton | 4da7019 | 2018-06-18 09:51:36 -0600 | [diff] [blame] | 202 | if (fHashTable.count() >= kMaxCacheCount) { |
Chris Dalton | 9985a27 | 2018-10-30 14:29:39 -0600 | [diff] [blame] | 203 | SkDEBUGCODE(HashNode* node = fHashTable.find(*fLRU.tail()->fCacheKey)); |
| 204 | SkASSERT(node && node->entry() == fLRU.tail()); |
| 205 | this->evict(*fLRU.tail()->fCacheKey); // We've exceeded our limit. |
Chris Dalton | 4da7019 | 2018-06-18 09:51:36 -0600 | [diff] [blame] | 206 | } |
Chris Dalton | 9985a27 | 2018-10-30 14:29:39 -0600 | [diff] [blame] | 207 | |
| 208 | // Create a new entry in the cache. |
| 209 | sk_sp<Key> permanentKey = Key::Make(fInvalidatedKeysInbox.uniqueID(), |
| 210 | writeKeyHelper.allocCountU32(), fScratchKey->data()); |
| 211 | SkASSERT(*permanentKey == *fScratchKey); |
| 212 | SkASSERT(!fHashTable.find(*permanentKey)); |
| 213 | entry = fHashTable.set(HashNode(this, std::move(permanentKey), m, shape))->entry(); |
| 214 | |
Chris Dalton | 4da7019 | 2018-06-18 09:51:36 -0600 | [diff] [blame] | 215 | SkASSERT(fHashTable.count() <= kMaxCacheCount); |
| 216 | } else { |
| 217 | fLRU.remove(entry); // Will be re-added at head. |
| 218 | } |
| 219 | |
Chris Dalton | 9985a27 | 2018-10-30 14:29:39 -0600 | [diff] [blame] | 220 | SkDEBUGCODE(HashNode* node = fHashTable.find(*fScratchKey)); |
Chris Dalton | 3b57279 | 2018-10-23 18:26:20 -0600 | [diff] [blame] | 221 | SkASSERT(node && node->entry() == entry); |
Chris Dalton | 4da7019 | 2018-06-18 09:51:36 -0600 | [diff] [blame] | 222 | fLRU.addToHead(entry); |
Chris Dalton | 6c3879d | 2018-11-01 11:13:19 -0600 | [diff] [blame] | 223 | |
Chris Dalton | 351e80c | 2019-01-06 22:51:00 -0700 | [diff] [blame] | 224 | if (0 == entry->fOnFlushRefCnt) { |
| 225 | // Only update the time stamp and hit count if we haven't seen this entry yet during the |
| 226 | // current flush. |
| 227 | entry->fTimestamp = this->quickPerFlushTimestamp(); |
| 228 | ++entry->fHitCount; |
| 229 | |
| 230 | if (entry->fCachedAtlas) { |
Chris Dalton | 45f6b3d | 2019-05-21 12:06:03 -0600 | [diff] [blame] | 231 | SkASSERT(SkToBool(entry->fCachedAtlas->peekOnFlushRefCnt()) == |
| 232 | SkToBool(entry->fCachedAtlas->getOnFlushProxy())); |
Chris Dalton | 351e80c | 2019-01-06 22:51:00 -0700 | [diff] [blame] | 233 | if (!entry->fCachedAtlas->getOnFlushProxy()) { |
Brian Salomon | 2af3e70 | 2019-08-11 19:10:31 -0400 | [diff] [blame] | 234 | auto ct = GrCCAtlas::CoverageTypeToColorType(entry->fCachedAtlas->coverageType()); |
Chris Dalton | 45f6b3d | 2019-05-21 12:06:03 -0600 | [diff] [blame] | 235 | if (sk_sp<GrTextureProxy> onFlushProxy = onFlushRP->findOrCreateProxyByUniqueKey( |
Brian Salomon | 2af3e70 | 2019-08-11 19:10:31 -0400 | [diff] [blame] | 236 | entry->fCachedAtlas->textureKey(), ct, GrCCAtlas::kTextureOrigin)) { |
Chris Dalton | 45f6b3d | 2019-05-21 12:06:03 -0600 | [diff] [blame] | 237 | onFlushProxy->priv().setIgnoredByResourceAllocator(); |
| 238 | entry->fCachedAtlas->setOnFlushProxy(std::move(onFlushProxy)); |
| 239 | } |
Chris Dalton | 351e80c | 2019-01-06 22:51:00 -0700 | [diff] [blame] | 240 | } |
| 241 | if (!entry->fCachedAtlas->getOnFlushProxy()) { |
| 242 | // Our atlas's backing texture got purged from the GrResourceCache. Release the |
| 243 | // cached atlas. |
| 244 | entry->releaseCachedAtlas(this); |
| 245 | } |
| 246 | } |
| 247 | } |
Chris Dalton | aaa77c1 | 2019-01-07 17:45:36 -0700 | [diff] [blame] | 248 | entry->fHitRect.join(clippedDrawBounds.makeOffset(-maskShift->x(), -maskShift->y())); |
Chris Dalton | 351e80c | 2019-01-06 22:51:00 -0700 | [diff] [blame] | 249 | SkASSERT(!entry->fCachedAtlas || entry->fCachedAtlas->getOnFlushProxy()); |
| 250 | return OnFlushEntryRef::OnFlushRef(entry); |
Chris Dalton | 4da7019 | 2018-06-18 09:51:36 -0600 | [diff] [blame] | 251 | } |
| 252 | |
Chris Dalton | 351e80c | 2019-01-06 22:51:00 -0700 | [diff] [blame] | 253 | void GrCCPathCache::evict(const GrCCPathCache::Key& key, GrCCPathCacheEntry* entry) { |
| 254 | if (!entry) { |
| 255 | HashNode* node = fHashTable.find(key); |
| 256 | SkASSERT(node); |
| 257 | entry = node->entry(); |
| 258 | } |
| 259 | SkASSERT(*entry->fCacheKey == key); |
| 260 | SkASSERT(!entry->hasBeenEvicted()); |
| 261 | entry->fCacheKey->markShouldUnregisterFromPath(); // Unregister the path listener. |
| 262 | entry->releaseCachedAtlas(this); |
| 263 | fLRU.remove(entry); |
| 264 | fHashTable.remove(key); |
| 265 | } |
| 266 | |
| 267 | void GrCCPathCache::doPreFlushProcessing() { |
| 268 | this->evictInvalidatedCacheKeys(); |
Chris Dalton | 6c3879d | 2018-11-01 11:13:19 -0600 | [diff] [blame] | 269 | |
| 270 | // Mark the per-flush timestamp as needing to be updated with a newer clock reading. |
| 271 | fPerFlushTimestamp = GrStdSteadyClock::time_point::min(); |
| 272 | } |
| 273 | |
Chris Dalton | 351e80c | 2019-01-06 22:51:00 -0700 | [diff] [blame] | 274 | void GrCCPathCache::purgeEntriesOlderThan(GrProxyProvider* proxyProvider, |
| 275 | const GrStdSteadyClock::time_point& purgeTime) { |
| 276 | this->evictInvalidatedCacheKeys(); |
Chris Dalton | 6c3879d | 2018-11-01 11:13:19 -0600 | [diff] [blame] | 277 | |
| 278 | #ifdef SK_DEBUG |
| 279 | auto lastTimestamp = (fLRU.isEmpty()) |
| 280 | ? GrStdSteadyClock::time_point::max() |
| 281 | : fLRU.tail()->fTimestamp; |
| 282 | #endif |
| 283 | |
Chris Dalton | 351e80c | 2019-01-06 22:51:00 -0700 | [diff] [blame] | 284 | // Evict every entry from our local path cache whose timestamp is older than purgeTime. |
Chris Dalton | 6c3879d | 2018-11-01 11:13:19 -0600 | [diff] [blame] | 285 | while (!fLRU.isEmpty() && fLRU.tail()->fTimestamp < purgeTime) { |
| 286 | #ifdef SK_DEBUG |
| 287 | // Verify that fLRU is sorted by timestamp. |
| 288 | auto timestamp = fLRU.tail()->fTimestamp; |
| 289 | SkASSERT(timestamp >= lastTimestamp); |
| 290 | lastTimestamp = timestamp; |
| 291 | #endif |
| 292 | this->evict(*fLRU.tail()->fCacheKey); |
| 293 | } |
Chris Dalton | 351e80c | 2019-01-06 22:51:00 -0700 | [diff] [blame] | 294 | |
| 295 | // Now take all the atlas textures we just invalidated and purge them from the GrResourceCache. |
| 296 | this->purgeInvalidatedAtlasTextures(proxyProvider); |
Chris Dalton | 6c3879d | 2018-11-01 11:13:19 -0600 | [diff] [blame] | 297 | } |
| 298 | |
Chris Dalton | 351e80c | 2019-01-06 22:51:00 -0700 | [diff] [blame] | 299 | void GrCCPathCache::purgeInvalidatedAtlasTextures(GrOnFlushResourceProvider* onFlushRP) { |
| 300 | for (sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) { |
| 301 | onFlushRP->removeUniqueKeyFromProxy(proxy.get()); |
| 302 | } |
| 303 | fInvalidatedProxies.reset(); |
| 304 | |
| 305 | for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) { |
| 306 | onFlushRP->processInvalidUniqueKey(key); |
| 307 | } |
| 308 | fInvalidatedProxyUniqueKeys.reset(); |
| 309 | } |
| 310 | |
| 311 | void GrCCPathCache::purgeInvalidatedAtlasTextures(GrProxyProvider* proxyProvider) { |
| 312 | for (sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) { |
| 313 | proxyProvider->removeUniqueKeyFromProxy(proxy.get()); |
| 314 | } |
| 315 | fInvalidatedProxies.reset(); |
| 316 | |
| 317 | for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) { |
| 318 | proxyProvider->processInvalidUniqueKey(key, nullptr, |
| 319 | GrProxyProvider::InvalidateGPUResource::kYes); |
| 320 | } |
| 321 | fInvalidatedProxyUniqueKeys.reset(); |
| 322 | } |
| 323 | |
| 324 | void GrCCPathCache::evictInvalidatedCacheKeys() { |
Chris Dalton | 9985a27 | 2018-10-30 14:29:39 -0600 | [diff] [blame] | 325 | SkTArray<sk_sp<Key>> invalidatedKeys; |
| 326 | fInvalidatedKeysInbox.poll(&invalidatedKeys); |
| 327 | for (const sk_sp<Key>& key : invalidatedKeys) { |
| 328 | bool isInCache = !key->shouldUnregisterFromPath(); // Gets set upon exiting the cache. |
| 329 | if (isInCache) { |
| 330 | this->evict(*key); |
| 331 | } |
Chris Dalton | 9a986cf | 2018-10-18 15:27:59 -0600 | [diff] [blame] | 332 | } |
Chris Dalton | 4da7019 | 2018-06-18 09:51:36 -0600 | [diff] [blame] | 333 | } |
| 334 | |
Chris Dalton | 351e80c | 2019-01-06 22:51:00 -0700 | [diff] [blame] | 335 | GrCCPathCache::OnFlushEntryRef |
| 336 | GrCCPathCache::OnFlushEntryRef::OnFlushRef(GrCCPathCacheEntry* entry) { |
| 337 | entry->ref(); |
| 338 | ++entry->fOnFlushRefCnt; |
| 339 | if (entry->fCachedAtlas) { |
| 340 | entry->fCachedAtlas->incrOnFlushRefCnt(); |
| 341 | } |
| 342 | return OnFlushEntryRef(entry); |
| 343 | } |
Chris Dalton | 907102e | 2018-06-29 13:18:53 -0600 | [diff] [blame] | 344 | |
Chris Dalton | 351e80c | 2019-01-06 22:51:00 -0700 | [diff] [blame] | 345 | GrCCPathCache::OnFlushEntryRef::~OnFlushEntryRef() { |
| 346 | if (!fEntry) { |
| 347 | return; |
| 348 | } |
| 349 | --fEntry->fOnFlushRefCnt; |
| 350 | SkASSERT(fEntry->fOnFlushRefCnt >= 0); |
| 351 | if (fEntry->fCachedAtlas) { |
| 352 | fEntry->fCachedAtlas->decrOnFlushRefCnt(); |
| 353 | } |
| 354 | fEntry->unref(); |
| 355 | } |
Chris Dalton | 4da7019 | 2018-06-18 09:51:36 -0600 | [diff] [blame] | 356 | |
Chris Dalton | 351e80c | 2019-01-06 22:51:00 -0700 | [diff] [blame] | 357 | |
| 358 | void GrCCPathCacheEntry::setCoverageCountAtlas( |
| 359 | GrOnFlushResourceProvider* onFlushRP, GrCCAtlas* atlas, const SkIVector& atlasOffset, |
Chris Dalton | 8610e9c | 2019-05-09 11:07:10 -0600 | [diff] [blame] | 360 | const GrOctoBounds& octoBounds, const SkIRect& devIBounds, const SkIVector& maskShift) { |
Chris Dalton | 351e80c | 2019-01-06 22:51:00 -0700 | [diff] [blame] | 361 | SkASSERT(fOnFlushRefCnt > 0); |
| 362 | SkASSERT(!fCachedAtlas); // Otherwise we would need to call releaseCachedAtlas(). |
| 363 | |
| 364 | if (this->hasBeenEvicted()) { |
| 365 | // This entry will never be found in the path cache again. Don't bother trying to save an |
| 366 | // atlas texture for it in the GrResourceCache. |
| 367 | return; |
| 368 | } |
| 369 | |
| 370 | fCachedAtlas = atlas->refOrMakeCachedAtlas(onFlushRP); |
| 371 | fCachedAtlas->incrOnFlushRefCnt(fOnFlushRefCnt); |
| 372 | fCachedAtlas->addPathPixels(devIBounds.height() * devIBounds.width()); |
| 373 | |
Chris Dalton | 4da7019 | 2018-06-18 09:51:36 -0600 | [diff] [blame] | 374 | fAtlasOffset = atlasOffset + maskShift; |
Chris Dalton | 4da7019 | 2018-06-18 09:51:36 -0600 | [diff] [blame] | 375 | |
Chris Dalton | 8610e9c | 2019-05-09 11:07:10 -0600 | [diff] [blame] | 376 | fOctoBounds.setOffset(octoBounds, -maskShift.fX, -maskShift.fY); |
Chris Dalton | 4da7019 | 2018-06-18 09:51:36 -0600 | [diff] [blame] | 377 | fDevIBounds = devIBounds.makeOffset(-maskShift.fX, -maskShift.fY); |
| 378 | } |
| 379 | |
Chris Dalton | 351e80c | 2019-01-06 22:51:00 -0700 | [diff] [blame] | 380 | GrCCPathCacheEntry::ReleaseAtlasResult GrCCPathCacheEntry::upgradeToLiteralCoverageAtlas( |
| 381 | GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP, GrCCAtlas* atlas, |
| 382 | const SkIVector& newAtlasOffset) { |
| 383 | SkASSERT(!this->hasBeenEvicted()); |
| 384 | SkASSERT(fOnFlushRefCnt > 0); |
| 385 | SkASSERT(fCachedAtlas); |
Chris Dalton | c3318f0 | 2019-07-19 14:20:53 -0600 | [diff] [blame] | 386 | SkASSERT(GrCCAtlas::CoverageType::kA8_LiteralCoverage != fCachedAtlas->coverageType()); |
Chris Dalton | 4da7019 | 2018-06-18 09:51:36 -0600 | [diff] [blame] | 387 | |
Chris Dalton | 351e80c | 2019-01-06 22:51:00 -0700 | [diff] [blame] | 388 | ReleaseAtlasResult releaseAtlasResult = this->releaseCachedAtlas(pathCache); |
| 389 | |
| 390 | fCachedAtlas = atlas->refOrMakeCachedAtlas(onFlushRP); |
| 391 | fCachedAtlas->incrOnFlushRefCnt(fOnFlushRefCnt); |
| 392 | fCachedAtlas->addPathPixels(this->height() * this->width()); |
| 393 | |
Chris Dalton | 4da7019 | 2018-06-18 09:51:36 -0600 | [diff] [blame] | 394 | fAtlasOffset = newAtlasOffset; |
Chris Dalton | 351e80c | 2019-01-06 22:51:00 -0700 | [diff] [blame] | 395 | return releaseAtlasResult; |
Chris Dalton | 4da7019 | 2018-06-18 09:51:36 -0600 | [diff] [blame] | 396 | } |
| 397 | |
Chris Dalton | 351e80c | 2019-01-06 22:51:00 -0700 | [diff] [blame] | 398 | GrCCPathCacheEntry::ReleaseAtlasResult GrCCPathCacheEntry::releaseCachedAtlas( |
| 399 | GrCCPathCache* pathCache) { |
| 400 | ReleaseAtlasResult result = ReleaseAtlasResult::kNone; |
| 401 | if (fCachedAtlas) { |
| 402 | result = fCachedAtlas->invalidatePathPixels(pathCache, this->height() * this->width()); |
| 403 | if (fOnFlushRefCnt) { |
| 404 | SkASSERT(fOnFlushRefCnt > 0); |
| 405 | fCachedAtlas->decrOnFlushRefCnt(fOnFlushRefCnt); |
Chris Dalton | 907102e | 2018-06-29 13:18:53 -0600 | [diff] [blame] | 406 | } |
Chris Dalton | 351e80c | 2019-01-06 22:51:00 -0700 | [diff] [blame] | 407 | fCachedAtlas = nullptr; |
Chris Dalton | 907102e | 2018-06-29 13:18:53 -0600 | [diff] [blame] | 408 | } |
Chris Dalton | 351e80c | 2019-01-06 22:51:00 -0700 | [diff] [blame] | 409 | return result; |
| 410 | } |
Chris Dalton | 907102e | 2018-06-29 13:18:53 -0600 | [diff] [blame] | 411 | |
Chris Dalton | 351e80c | 2019-01-06 22:51:00 -0700 | [diff] [blame] | 412 | GrCCPathCacheEntry::ReleaseAtlasResult GrCCCachedAtlas::invalidatePathPixels( |
| 413 | GrCCPathCache* pathCache, int numPixels) { |
| 414 | // Mark the pixels invalid in the cached atlas texture. |
| 415 | fNumInvalidatedPathPixels += numPixels; |
| 416 | SkASSERT(fNumInvalidatedPathPixels <= fNumPathPixels); |
| 417 | if (!fIsInvalidatedFromResourceCache && fNumInvalidatedPathPixels >= fNumPathPixels / 2) { |
| 418 | // Too many invalidated pixels: purge the atlas texture from the resource cache. |
| 419 | if (fOnFlushProxy) { |
| 420 | // Don't clear (or std::move) fOnFlushProxy. Other path cache entries might still have a |
| 421 | // reference on this atlas and expect to use our proxy during the current flush. |
| 422 | // fOnFlushProxy will be cleared once fOnFlushRefCnt decrements to zero. |
| 423 | pathCache->fInvalidatedProxies.push_back(fOnFlushProxy); |
| 424 | } else { |
| 425 | pathCache->fInvalidatedProxyUniqueKeys.push_back(fTextureKey); |
| 426 | } |
| 427 | fIsInvalidatedFromResourceCache = true; |
| 428 | return ReleaseAtlasResult::kDidInvalidateFromCache; |
| 429 | } |
| 430 | return ReleaseAtlasResult::kNone; |
| 431 | } |
| 432 | |
| 433 | void GrCCCachedAtlas::decrOnFlushRefCnt(int count) const { |
| 434 | SkASSERT(count > 0); |
| 435 | fOnFlushRefCnt -= count; |
| 436 | SkASSERT(fOnFlushRefCnt >= 0); |
| 437 | if (0 == fOnFlushRefCnt) { |
| 438 | // Don't hold the actual proxy past the end of the current flush. |
| 439 | SkASSERT(fOnFlushProxy); |
| 440 | fOnFlushProxy = nullptr; |
| 441 | } |
Chris Dalton | 907102e | 2018-06-29 13:18:53 -0600 | [diff] [blame] | 442 | } |