blob: 8c52bf9fb75f6db1ec1fa8b918a4c53d373df07a [file] [log] [blame]
Chris Dalton4da70192018-06-18 09:51:36 -06001/*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrCCPathCache_DEFINED
9#define GrCCPathCache_DEFINED
10
Chris Dalton351e80c2019-01-06 22:51:00 -070011#include "GrShape.h"
Chris Dalton4da70192018-06-18 09:51:36 -060012#include "SkExchange.h"
13#include "SkTHash.h"
14#include "SkTInternalLList.h"
15#include "ccpr/GrCCAtlas.h"
16#include "ccpr/GrCCPathProcessor.h"
17
18class GrCCPathCacheEntry;
19class GrShape;
20
21/**
22 * This class implements an LRU cache that maps from GrShape to GrCCPathCacheEntry objects. Shapes
23 * are only given one entry in the cache, so any time they are accessed with a different matrix, the
24 * old entry gets evicted.
25 */
26class GrCCPathCache {
27public:
Chris Dalton351e80c2019-01-06 22:51:00 -070028 GrCCPathCache(uint32_t contextUniqueID);
Chris Dalton3b572792018-10-23 18:26:20 -060029 ~GrCCPathCache();
Chris Dalton4da70192018-06-18 09:51:36 -060030
Chris Dalton9985a272018-10-30 14:29:39 -060031 class Key : public SkPathRef::GenIDChangeListener {
32 public:
33 static sk_sp<Key> Make(uint32_t pathCacheUniqueID, int dataCountU32,
34 const void* data = nullptr);
35
36 uint32_t pathCacheUniqueID() const { return fPathCacheUniqueID; }
37
38 int dataSizeInBytes() const { return fDataSizeInBytes; }
39 const uint32_t* data() const;
40
41 void resetDataCountU32(int dataCountU32) {
42 SkASSERT(dataCountU32 <= fDataReserveCountU32);
43 fDataSizeInBytes = dataCountU32 * sizeof(uint32_t);
44 }
45 uint32_t* data();
46
Chris Dalton351e80c2019-01-06 22:51:00 -070047 bool operator==(const Key& that) const {
48 return fDataSizeInBytes == that.fDataSizeInBytes &&
49 !memcmp(this->data(), that.data(), fDataSizeInBytes);
50 }
Chris Dalton9985a272018-10-30 14:29:39 -060051
52 // Called when our corresponding path is modified or deleted. Not threadsafe.
53 void onChange() override;
54
55 private:
56 Key(uint32_t pathCacheUniqueID, int dataCountU32)
57 : fPathCacheUniqueID(pathCacheUniqueID)
58 , fDataSizeInBytes(dataCountU32 * sizeof(uint32_t))
59 SkDEBUGCODE(, fDataReserveCountU32(dataCountU32)) {
60 SkASSERT(SK_InvalidUniqueID != fPathCacheUniqueID);
61 }
62
63 const uint32_t fPathCacheUniqueID;
64 int fDataSizeInBytes;
65 SkDEBUGCODE(const int fDataReserveCountU32);
66 // The GrShape's unstyled key is stored as a variable-length footer to this class. GetKey
67 // provides access to it.
68 };
69
Chris Dalton4da70192018-06-18 09:51:36 -060070 // Stores the components of a transformation that affect a path mask (i.e. everything but
71 // integer translation). During construction, any integer portions of the matrix's translate are
72 // shaved off and returned to the caller. The caller is responsible for those integer shifts.
73 struct MaskTransform {
74 MaskTransform(const SkMatrix& m, SkIVector* shift);
75 float fMatrix2x2[4];
Chris Dalton644341a2018-06-18 19:14:16 -060076#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
77 // Except on AOSP, cache hits must have matching subpixel portions of their view matrix.
78 // On AOSP we follow after HWUI and ignore the subpixel translate.
Chris Dalton4da70192018-06-18 09:51:36 -060079 float fSubpixelTranslate[2];
Chris Dalton644341a2018-06-18 19:14:16 -060080#endif
Chris Dalton4da70192018-06-18 09:51:36 -060081 };
82
Chris Dalton351e80c2019-01-06 22:51:00 -070083 // Represents a ref on a GrCCPathCacheEntry that should only be used during the current flush.
84 class OnFlushEntryRef : SkNoncopyable {
85 public:
86 static OnFlushEntryRef OnFlushRef(GrCCPathCacheEntry*);
87 OnFlushEntryRef() = default;
88 OnFlushEntryRef(OnFlushEntryRef&& ref) : fEntry(skstd::exchange(ref.fEntry, nullptr)) {}
89 ~OnFlushEntryRef();
90
91 GrCCPathCacheEntry* get() const { return fEntry; }
92 GrCCPathCacheEntry* operator->() const { return fEntry; }
93 GrCCPathCacheEntry& operator*() const { return *fEntry; }
94 explicit operator bool() const { return fEntry; }
95 void operator=(OnFlushEntryRef&& ref) { fEntry = skstd::exchange(ref.fEntry, nullptr); }
96
97 private:
98 OnFlushEntryRef(GrCCPathCacheEntry* entry) : fEntry(entry) {}
99 GrCCPathCacheEntry* fEntry = nullptr;
100 };
101
Chris Dalton4da70192018-06-18 09:51:36 -0600102 enum class CreateIfAbsent : bool {
103 kNo = false,
104 kYes = true
105 };
106
107 // Finds an entry in the cache. Shapes are only given one entry, so any time they are accessed
108 // with a different MaskTransform, the old entry gets evicted.
Chris Dalton351e80c2019-01-06 22:51:00 -0700109 OnFlushEntryRef find(GrOnFlushResourceProvider*, const GrShape&, const MaskTransform&,
110 CreateIfAbsent = CreateIfAbsent::kNo);
Chris Dalton4da70192018-06-18 09:51:36 -0600111
Chris Dalton351e80c2019-01-06 22:51:00 -0700112 void doPreFlushProcessing();
113
114 void purgeEntriesOlderThan(GrProxyProvider*, const GrStdSteadyClock::time_point& purgeTime);
115
116 // As we evict entries from our local path cache, we accumulate a list of invalidated atlas
117 // textures. This call purges the invalidated atlas textures from the mainline GrResourceCache.
118 // This call is available with two different "provider" objects, to accomodate whatever might
119 // be available at the callsite.
120 void purgeInvalidatedAtlasTextures(GrOnFlushResourceProvider*);
121 void purgeInvalidatedAtlasTextures(GrProxyProvider*);
Chris Dalton4da70192018-06-18 09:51:36 -0600122
123private:
Chris Dalton9a986cf2018-10-18 15:27:59 -0600124 // This is a special ref ptr for GrCCPathCacheEntry, used by the hash table. It provides static
125 // methods for SkTHash, and can only be moved. This guarantees the hash table holds exactly one
Chris Dalton3b572792018-10-23 18:26:20 -0600126 // reference for each entry. Also, when a HashNode goes out of scope, that means it is exiting
127 // the hash table. We take that opportunity to remove it from the LRU list and do some cleanup.
Chris Dalton4da70192018-06-18 09:51:36 -0600128 class HashNode : SkNoncopyable {
129 public:
Chris Dalton9985a272018-10-30 14:29:39 -0600130 static const Key& GetKey(const HashNode&);
Chris Dalton351e80c2019-01-06 22:51:00 -0700131 inline static uint32_t Hash(const Key& key) {
132 return GrResourceKeyHash(key.data(), key.dataSizeInBytes());
133 }
Chris Dalton4da70192018-06-18 09:51:36 -0600134
135 HashNode() = default;
Chris Dalton9985a272018-10-30 14:29:39 -0600136 HashNode(GrCCPathCache*, sk_sp<Key>, const MaskTransform&, const GrShape&);
Chris Dalton3b572792018-10-23 18:26:20 -0600137 HashNode(HashNode&& node)
138 : fPathCache(node.fPathCache), fEntry(std::move(node.fEntry)) {
Chris Dalton9a986cf2018-10-18 15:27:59 -0600139 SkASSERT(!node.fEntry);
140 }
Chris Dalton4da70192018-06-18 09:51:36 -0600141
Chris Dalton3b572792018-10-23 18:26:20 -0600142 ~HashNode();
143
Chris Dalton351e80c2019-01-06 22:51:00 -0700144 void operator=(HashNode&& node);
Chris Dalton4da70192018-06-18 09:51:36 -0600145
Chris Dalton9a986cf2018-10-18 15:27:59 -0600146 GrCCPathCacheEntry* entry() const { return fEntry.get(); }
Chris Dalton4da70192018-06-18 09:51:36 -0600147
148 private:
Chris Dalton3b572792018-10-23 18:26:20 -0600149 GrCCPathCache* fPathCache = nullptr;
Chris Dalton9a986cf2018-10-18 15:27:59 -0600150 sk_sp<GrCCPathCacheEntry> fEntry;
Chris Dalton4da70192018-06-18 09:51:36 -0600151 };
152
Chris Dalton6c3879d2018-11-01 11:13:19 -0600153 GrStdSteadyClock::time_point quickPerFlushTimestamp() {
154 // time_point::min() means it's time to update fPerFlushTimestamp with a newer clock read.
155 if (GrStdSteadyClock::time_point::min() == fPerFlushTimestamp) {
156 fPerFlushTimestamp = GrStdSteadyClock::now();
157 }
158 return fPerFlushTimestamp;
159 }
160
Chris Dalton351e80c2019-01-06 22:51:00 -0700161 void evict(const GrCCPathCache::Key&, GrCCPathCacheEntry* = nullptr);
Chris Dalton3b572792018-10-23 18:26:20 -0600162
Chris Dalton351e80c2019-01-06 22:51:00 -0700163 // Evicts all the cache entries whose keys have been queued up in fInvalidatedKeysInbox via
164 // SkPath listeners.
165 void evictInvalidatedCacheKeys();
Chris Dalton6c3879d2018-11-01 11:13:19 -0600166
Chris Dalton351e80c2019-01-06 22:51:00 -0700167 const uint32_t fContextUniqueID;
168
169 SkTHashTable<HashNode, const Key&> fHashTable;
Chris Dalton9985a272018-10-30 14:29:39 -0600170 SkTInternalLList<GrCCPathCacheEntry> fLRU;
171 SkMessageBus<sk_sp<Key>>::Inbox fInvalidatedKeysInbox;
172 sk_sp<Key> fScratchKey; // Reused for creating a temporary key in the find() method.
Chris Dalton6c3879d2018-11-01 11:13:19 -0600173
174 // We only read the clock once per flush, and cache it in this variable. This prevents us from
175 // excessive clock reads for cache timestamps that might degrade performance.
176 GrStdSteadyClock::time_point fPerFlushTimestamp = GrStdSteadyClock::time_point::min();
Chris Dalton351e80c2019-01-06 22:51:00 -0700177
178 // As we evict entries from our local path cache, we accumulate lists of invalidated atlas
179 // textures in these two members. We hold these until we purge them from the GrResourceCache
180 // (e.g. via purgeInvalidatedAtlasTextures().)
181 SkSTArray<4, sk_sp<GrTextureProxy>> fInvalidatedProxies;
182 SkSTArray<4, GrUniqueKey> fInvalidatedProxyUniqueKeys;
183
184 friend class GrCCCachedAtlas; // To append to fInvalidatedProxies, fInvalidatedProxyUniqueKeys.
185
186public:
187 const SkTHashTable<HashNode, const Key&>& testingOnly_getHashTable() const;
188 const SkTInternalLList<GrCCPathCacheEntry>& testingOnly_getLRU() const;
Chris Dalton4da70192018-06-18 09:51:36 -0600189};
190
191/**
Chris Daltona8429cf2018-06-22 11:43:31 -0600192 * This class stores all the data necessary to draw a specific path + matrix combination from their
193 * corresponding cached atlas.
Chris Dalton4da70192018-06-18 09:51:36 -0600194 */
Chris Dalton9985a272018-10-30 14:29:39 -0600195class GrCCPathCacheEntry : public GrNonAtomicRef<GrCCPathCacheEntry> {
Chris Dalton4da70192018-06-18 09:51:36 -0600196public:
197 SK_DECLARE_INTERNAL_LLIST_INTERFACE(GrCCPathCacheEntry);
198
Chris Dalton9985a272018-10-30 14:29:39 -0600199 ~GrCCPathCacheEntry() {
Chris Dalton351e80c2019-01-06 22:51:00 -0700200 SkASSERT(this->hasBeenEvicted()); // Should have called GrCCPathCache::evict().
201 SkASSERT(!fCachedAtlas);
202 SkASSERT(0 == fOnFlushRefCnt);
Chris Dalton9985a272018-10-30 14:29:39 -0600203 }
Chris Dalton9a986cf2018-10-18 15:27:59 -0600204
Chris Dalton351e80c2019-01-06 22:51:00 -0700205 const GrCCPathCache::Key& cacheKey() const { SkASSERT(fCacheKey); return *fCacheKey; }
206
Chris Daltona8429cf2018-06-22 11:43:31 -0600207 // The number of times this specific entry (path + matrix combination) has been pulled from
208 // the path cache. As long as the caller does exactly one lookup per draw, this translates to
209 // the number of times the path has been drawn with a compatible matrix.
210 //
211 // If the entry did not previously exist and was created during
212 // GrCCPathCache::find(.., CreateIfAbsent::kYes), its hit count will be 1.
213 int hitCount() const { return fHitCount; }
214
Chris Dalton351e80c2019-01-06 22:51:00 -0700215 const GrCCCachedAtlas* cachedAtlas() const { return fCachedAtlas.get(); }
Chris Dalton4da70192018-06-18 09:51:36 -0600216
217 const SkIRect& devIBounds() const { return fDevIBounds; }
218 int width() const { return fDevIBounds.width(); }
219 int height() const { return fDevIBounds.height(); }
220
Chris Dalton351e80c2019-01-06 22:51:00 -0700221 enum class ReleaseAtlasResult : bool {
222 kNone,
223 kDidInvalidateFromCache
224 };
225
Chris Dalton4da70192018-06-18 09:51:36 -0600226 // Called once our path has been rendered into the mainline CCPR (fp16, coverage count) atlas.
227 // The caller will stash this atlas texture away after drawing, and during the next flush,
228 // recover it and attempt to copy any paths that got reused into permanent 8-bit atlases.
Chris Dalton351e80c2019-01-06 22:51:00 -0700229 void setCoverageCountAtlas(GrOnFlushResourceProvider*, GrCCAtlas*, const SkIVector& atlasOffset,
230 const SkRect& devBounds, const SkRect& devBounds45,
231 const SkIRect& devIBounds, const SkIVector& maskShift);
Chris Dalton4da70192018-06-18 09:51:36 -0600232
233 // Called once our path mask has been copied into a permanent, 8-bit atlas. This method points
Chris Dalton351e80c2019-01-06 22:51:00 -0700234 // the entry at the new atlas and updates the GrCCCCachedAtlas data.
235 ReleaseAtlasResult upgradeToLiteralCoverageAtlas(GrCCPathCache*, GrOnFlushResourceProvider*,
236 GrCCAtlas*, const SkIVector& newAtlasOffset);
Chris Dalton4da70192018-06-18 09:51:36 -0600237
238private:
239 using MaskTransform = GrCCPathCache::MaskTransform;
240
Chris Dalton9985a272018-10-30 14:29:39 -0600241 GrCCPathCacheEntry(sk_sp<GrCCPathCache::Key> cacheKey, const MaskTransform& maskTransform)
242 : fCacheKey(std::move(cacheKey)), fMaskTransform(maskTransform) {
243 }
Chris Dalton4da70192018-06-18 09:51:36 -0600244
Chris Dalton351e80c2019-01-06 22:51:00 -0700245 bool hasBeenEvicted() const { return fCacheKey->shouldUnregisterFromPath(); }
246
Chris Dalton907102e2018-06-29 13:18:53 -0600247 // Resets this entry back to not having an atlas, and purges its previous atlas texture from the
248 // resource cache if needed.
Chris Dalton351e80c2019-01-06 22:51:00 -0700249 ReleaseAtlasResult releaseCachedAtlas(GrCCPathCache*);
Chris Dalton907102e2018-06-29 13:18:53 -0600250
Chris Dalton9985a272018-10-30 14:29:39 -0600251 sk_sp<GrCCPathCache::Key> fCacheKey;
Chris Dalton6c3879d2018-11-01 11:13:19 -0600252 GrStdSteadyClock::time_point fTimestamp;
253 int fHitCount = 0;
Chris Dalton4da70192018-06-18 09:51:36 -0600254
Chris Dalton351e80c2019-01-06 22:51:00 -0700255 sk_sp<GrCCCachedAtlas> fCachedAtlas;
Chris Dalton4da70192018-06-18 09:51:36 -0600256 SkIVector fAtlasOffset;
257
Chris Dalton351e80c2019-01-06 22:51:00 -0700258 MaskTransform fMaskTransform;
Chris Dalton4da70192018-06-18 09:51:36 -0600259 SkRect fDevBounds;
260 SkRect fDevBounds45;
261 SkIRect fDevIBounds;
262
Chris Dalton351e80c2019-01-06 22:51:00 -0700263 int fOnFlushRefCnt = 0;
Chris Dalton4da70192018-06-18 09:51:36 -0600264
Chris Dalton4da70192018-06-18 09:51:36 -0600265 friend class GrCCPathCache;
266 friend void GrCCPathProcessor::Instance::set(const GrCCPathCacheEntry&, const SkIVector&,
Brian Osman1be2b7c2018-10-29 16:07:15 -0400267 GrColor, DoEvenOddFill); // To access data.
Chris Dalton351e80c2019-01-06 22:51:00 -0700268
269public:
270 int testingOnly_peekOnFlushRefCnt() const;
Chris Dalton4da70192018-06-18 09:51:36 -0600271};
272
Chris Dalton351e80c2019-01-06 22:51:00 -0700273/**
274 * Encapsulates the data for an atlas whose texture is stored in the mainline GrResourceCache. Many
275 * instances of GrCCPathCacheEntry will reference the same GrCCCachedAtlas.
276 *
277 * We use this object to track the percentage of the original atlas pixels that could still ever
278 * potentially be reused (i.e., those which still represent an extant path). When the percentage
279 * of useful pixels drops below 50%, we purge the entire texture from the resource cache.
280 *
281 * This object also holds a ref on the atlas's actual texture proxy during flush. When
282 * fOnFlushRefCnt decrements back down to zero, we release fOnFlushProxy and reset it back to null.
283 */
284class GrCCCachedAtlas : public GrNonAtomicRef<GrCCCachedAtlas> {
285public:
286 using ReleaseAtlasResult = GrCCPathCacheEntry::ReleaseAtlasResult;
287
288 GrCCCachedAtlas(GrCCAtlas::CoverageType type, const GrUniqueKey& textureKey,
289 sk_sp<GrTextureProxy> onFlushProxy)
290 : fCoverageType(type)
291 , fTextureKey(textureKey)
292 , fOnFlushProxy(std::move(onFlushProxy)) {}
293
294 ~GrCCCachedAtlas() {
295 SkASSERT(!fOnFlushProxy);
296 SkASSERT(!fOnFlushRefCnt);
297 }
298
299 GrCCAtlas::CoverageType coverageType() const { return fCoverageType; }
300 const GrUniqueKey& textureKey() const { return fTextureKey; }
301
302 GrTextureProxy* getOnFlushProxy() const { return fOnFlushProxy.get(); }
303
304 void setOnFlushProxy(sk_sp<GrTextureProxy> proxy) {
305 SkASSERT(!fOnFlushProxy);
306 fOnFlushProxy = std::move(proxy);
307 }
308
309 void addPathPixels(int numPixels) { fNumPathPixels += numPixels; }
310 ReleaseAtlasResult invalidatePathPixels(GrCCPathCache*, int numPixels);
311
312 int peekOnFlushRefCnt() const { return fOnFlushRefCnt; }
313 void incrOnFlushRefCnt(int count = 1) const {
314 SkASSERT(count > 0);
315 SkASSERT(fOnFlushProxy);
316 fOnFlushRefCnt += count;
317 }
318 void decrOnFlushRefCnt(int count = 1) const;
319
320private:
321 const GrCCAtlas::CoverageType fCoverageType;
322 const GrUniqueKey fTextureKey;
323
324 int fNumPathPixels = 0;
325 int fNumInvalidatedPathPixels = 0;
326 bool fIsInvalidatedFromResourceCache = false;
327
328 mutable sk_sp<GrTextureProxy> fOnFlushProxy;
329 mutable int fOnFlushRefCnt = 0;
330
331public:
332 int testingOnly_peekOnFlushRefCnt() const;
333};
334
335
336inline GrCCPathCache::HashNode::HashNode(GrCCPathCache* pathCache, sk_sp<Key> key,
337 const MaskTransform& m, const GrShape& shape)
338 : fPathCache(pathCache)
339 , fEntry(new GrCCPathCacheEntry(key, m)) {
340 SkASSERT(shape.hasUnstyledKey());
341 shape.addGenIDChangeListener(std::move(key));
342}
343
344inline const GrCCPathCache::Key& GrCCPathCache::HashNode::GetKey(
345 const GrCCPathCache::HashNode& node) {
346 return *node.entry()->fCacheKey;
347}
348
349inline GrCCPathCache::HashNode::~HashNode() {
350 SkASSERT(!fEntry || fEntry->hasBeenEvicted()); // Should have called GrCCPathCache::evict().
351}
352
353inline void GrCCPathCache::HashNode::operator=(HashNode&& node) {
354 SkASSERT(!fEntry || fEntry->hasBeenEvicted()); // Should have called GrCCPathCache::evict().
355 fEntry = skstd::exchange(node.fEntry, nullptr);
356}
357
Chris Dalton4da70192018-06-18 09:51:36 -0600358inline void GrCCPathProcessor::Instance::set(const GrCCPathCacheEntry& entry,
359 const SkIVector& shift, GrColor color,
360 DoEvenOddFill doEvenOddFill) {
361 float dx = (float)shift.fX, dy = (float)shift.fY;
362 this->set(entry.fDevBounds.makeOffset(dx, dy), MakeOffset45(entry.fDevBounds45, dx, dy),
363 entry.fAtlasOffset - shift, color, doEvenOddFill);
364}
365
366#endif