blob: 3b34fe24d76d22d1acd65591eae9abd6568049c7 [file] [log] [blame]
Chris Dalton4da70192018-06-18 09:51:36 -06001/*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrCCPathCache_DEFINED
9#define GrCCPathCache_DEFINED
10
11#include "SkExchange.h"
12#include "SkTHash.h"
13#include "SkTInternalLList.h"
14#include "ccpr/GrCCAtlas.h"
15#include "ccpr/GrCCPathProcessor.h"
16
17class GrCCPathCacheEntry;
18class GrShape;
19
20/**
21 * This class implements an LRU cache that maps from GrShape to GrCCPathCacheEntry objects. Shapes
22 * are only given one entry in the cache, so any time they are accessed with a different matrix, the
23 * old entry gets evicted.
24 */
25class GrCCPathCache {
26public:
Chris Dalton2e825a32019-01-04 22:14:27 +000027 GrCCPathCache();
Chris Dalton3b572792018-10-23 18:26:20 -060028 ~GrCCPathCache();
Chris Dalton4da70192018-06-18 09:51:36 -060029
Chris Dalton9985a272018-10-30 14:29:39 -060030 class Key : public SkPathRef::GenIDChangeListener {
31 public:
32 static sk_sp<Key> Make(uint32_t pathCacheUniqueID, int dataCountU32,
33 const void* data = nullptr);
34
35 uint32_t pathCacheUniqueID() const { return fPathCacheUniqueID; }
36
37 int dataSizeInBytes() const { return fDataSizeInBytes; }
38 const uint32_t* data() const;
39
40 void resetDataCountU32(int dataCountU32) {
41 SkASSERT(dataCountU32 <= fDataReserveCountU32);
42 fDataSizeInBytes = dataCountU32 * sizeof(uint32_t);
43 }
44 uint32_t* data();
45
Chris Dalton2e825a32019-01-04 22:14:27 +000046 bool operator==(const Key&) const;
Chris Dalton9985a272018-10-30 14:29:39 -060047
48 // Called when our corresponding path is modified or deleted. Not threadsafe.
49 void onChange() override;
50
51 private:
52 Key(uint32_t pathCacheUniqueID, int dataCountU32)
53 : fPathCacheUniqueID(pathCacheUniqueID)
54 , fDataSizeInBytes(dataCountU32 * sizeof(uint32_t))
55 SkDEBUGCODE(, fDataReserveCountU32(dataCountU32)) {
56 SkASSERT(SK_InvalidUniqueID != fPathCacheUniqueID);
57 }
58
59 const uint32_t fPathCacheUniqueID;
60 int fDataSizeInBytes;
61 SkDEBUGCODE(const int fDataReserveCountU32);
62 // The GrShape's unstyled key is stored as a variable-length footer to this class. GetKey
63 // provides access to it.
64 };
65
Chris Dalton4da70192018-06-18 09:51:36 -060066 // Stores the components of a transformation that affect a path mask (i.e. everything but
67 // integer translation). During construction, any integer portions of the matrix's translate are
68 // shaved off and returned to the caller. The caller is responsible for those integer shifts.
69 struct MaskTransform {
70 MaskTransform(const SkMatrix& m, SkIVector* shift);
71 float fMatrix2x2[4];
Chris Dalton644341a2018-06-18 19:14:16 -060072#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
73 // Except on AOSP, cache hits must have matching subpixel portions of their view matrix.
74 // On AOSP we follow after HWUI and ignore the subpixel translate.
Chris Dalton4da70192018-06-18 09:51:36 -060075 float fSubpixelTranslate[2];
Chris Dalton644341a2018-06-18 19:14:16 -060076#endif
Chris Dalton4da70192018-06-18 09:51:36 -060077 };
78
79 enum class CreateIfAbsent : bool {
80 kNo = false,
81 kYes = true
82 };
83
84 // Finds an entry in the cache. Shapes are only given one entry, so any time they are accessed
85 // with a different MaskTransform, the old entry gets evicted.
Chris Dalton2e825a32019-01-04 22:14:27 +000086 sk_sp<GrCCPathCacheEntry> find(const GrShape&, const MaskTransform&,
87 CreateIfAbsent = CreateIfAbsent::kNo);
Chris Dalton4da70192018-06-18 09:51:36 -060088
Chris Dalton2e825a32019-01-04 22:14:27 +000089 void doPostFlushProcessing();
90 void purgeEntriesOlderThan(const GrStdSteadyClock::time_point& purgeTime);
Chris Dalton4da70192018-06-18 09:51:36 -060091
92private:
Chris Dalton9a986cf2018-10-18 15:27:59 -060093 // This is a special ref ptr for GrCCPathCacheEntry, used by the hash table. It provides static
94 // methods for SkTHash, and can only be moved. This guarantees the hash table holds exactly one
Chris Dalton3b572792018-10-23 18:26:20 -060095 // reference for each entry. Also, when a HashNode goes out of scope, that means it is exiting
96 // the hash table. We take that opportunity to remove it from the LRU list and do some cleanup.
Chris Dalton4da70192018-06-18 09:51:36 -060097 class HashNode : SkNoncopyable {
98 public:
Chris Dalton9985a272018-10-30 14:29:39 -060099 static const Key& GetKey(const HashNode&);
Chris Dalton2e825a32019-01-04 22:14:27 +0000100 static uint32_t Hash(const Key&);
Chris Dalton4da70192018-06-18 09:51:36 -0600101
102 HashNode() = default;
Chris Dalton9985a272018-10-30 14:29:39 -0600103 HashNode(GrCCPathCache*, sk_sp<Key>, const MaskTransform&, const GrShape&);
Chris Dalton3b572792018-10-23 18:26:20 -0600104 HashNode(HashNode&& node)
105 : fPathCache(node.fPathCache), fEntry(std::move(node.fEntry)) {
Chris Dalton9a986cf2018-10-18 15:27:59 -0600106 SkASSERT(!node.fEntry);
107 }
Chris Dalton4da70192018-06-18 09:51:36 -0600108
Chris Dalton3b572792018-10-23 18:26:20 -0600109 ~HashNode();
110
Chris Dalton2e825a32019-01-04 22:14:27 +0000111 HashNode& operator=(HashNode&& node);
Chris Dalton4da70192018-06-18 09:51:36 -0600112
Chris Dalton9a986cf2018-10-18 15:27:59 -0600113 GrCCPathCacheEntry* entry() const { return fEntry.get(); }
Chris Dalton4da70192018-06-18 09:51:36 -0600114
115 private:
Chris Dalton2e825a32019-01-04 22:14:27 +0000116 void willExitHashTable();
117
Chris Dalton3b572792018-10-23 18:26:20 -0600118 GrCCPathCache* fPathCache = nullptr;
Chris Dalton9a986cf2018-10-18 15:27:59 -0600119 sk_sp<GrCCPathCacheEntry> fEntry;
Chris Dalton4da70192018-06-18 09:51:36 -0600120 };
121
Chris Dalton6c3879d2018-11-01 11:13:19 -0600122 GrStdSteadyClock::time_point quickPerFlushTimestamp() {
123 // time_point::min() means it's time to update fPerFlushTimestamp with a newer clock read.
124 if (GrStdSteadyClock::time_point::min() == fPerFlushTimestamp) {
125 fPerFlushTimestamp = GrStdSteadyClock::now();
126 }
127 return fPerFlushTimestamp;
128 }
129
Chris Dalton2e825a32019-01-04 22:14:27 +0000130 void evict(const GrCCPathCache::Key& key) {
131 fHashTable.remove(key); // HashNode::willExitHashTable() takes care of the rest.
132 }
Chris Dalton3b572792018-10-23 18:26:20 -0600133
Chris Dalton2e825a32019-01-04 22:14:27 +0000134 void purgeInvalidatedKeys();
Chris Dalton6c3879d2018-11-01 11:13:19 -0600135
Chris Dalton2e825a32019-01-04 22:14:27 +0000136 SkTHashTable<HashNode, const GrCCPathCache::Key&> fHashTable;
Chris Dalton9985a272018-10-30 14:29:39 -0600137 SkTInternalLList<GrCCPathCacheEntry> fLRU;
138 SkMessageBus<sk_sp<Key>>::Inbox fInvalidatedKeysInbox;
139 sk_sp<Key> fScratchKey; // Reused for creating a temporary key in the find() method.
Chris Dalton6c3879d2018-11-01 11:13:19 -0600140
141 // We only read the clock once per flush, and cache it in this variable. This prevents us from
142 // excessive clock reads for cache timestamps that might degrade performance.
143 GrStdSteadyClock::time_point fPerFlushTimestamp = GrStdSteadyClock::time_point::min();
Chris Dalton4da70192018-06-18 09:51:36 -0600144};
145
146/**
Chris Daltona8429cf2018-06-22 11:43:31 -0600147 * This class stores all the data necessary to draw a specific path + matrix combination from their
148 * corresponding cached atlas.
Chris Dalton4da70192018-06-18 09:51:36 -0600149 */
Chris Dalton9985a272018-10-30 14:29:39 -0600150class GrCCPathCacheEntry : public GrNonAtomicRef<GrCCPathCacheEntry> {
Chris Dalton4da70192018-06-18 09:51:36 -0600151public:
152 SK_DECLARE_INTERNAL_LLIST_INTERFACE(GrCCPathCacheEntry);
153
Chris Dalton9985a272018-10-30 14:29:39 -0600154 ~GrCCPathCacheEntry() {
Chris Dalton2e825a32019-01-04 22:14:27 +0000155 SkASSERT(!fCurrFlushAtlas); // Client is required to reset fCurrFlushAtlas back to null.
156 this->invalidateAtlas();
Chris Dalton9985a272018-10-30 14:29:39 -0600157 }
Chris Dalton9a986cf2018-10-18 15:27:59 -0600158
Chris Daltona8429cf2018-06-22 11:43:31 -0600159 // The number of times this specific entry (path + matrix combination) has been pulled from
160 // the path cache. As long as the caller does exactly one lookup per draw, this translates to
161 // the number of times the path has been drawn with a compatible matrix.
162 //
163 // If the entry did not previously exist and was created during
164 // GrCCPathCache::find(.., CreateIfAbsent::kYes), its hit count will be 1.
165 int hitCount() const { return fHitCount; }
166
Chris Dalton2e825a32019-01-04 22:14:27 +0000167 // Does this entry reference a permanent, 8-bit atlas that resides in the resource cache?
168 // (i.e. not a temporarily-stashed, fp16 coverage count atlas.)
169 bool hasCachedAtlas() const { return SkToBool(fCachedAtlasInfo); }
Chris Dalton4da70192018-06-18 09:51:36 -0600170
171 const SkIRect& devIBounds() const { return fDevIBounds; }
172 int width() const { return fDevIBounds.width(); }
173 int height() const { return fDevIBounds.height(); }
174
175 // Called once our path has been rendered into the mainline CCPR (fp16, coverage count) atlas.
176 // The caller will stash this atlas texture away after drawing, and during the next flush,
177 // recover it and attempt to copy any paths that got reused into permanent 8-bit atlases.
Chris Dalton2e825a32019-01-04 22:14:27 +0000178 void initAsStashedAtlas(const GrUniqueKey& atlasKey, const SkIVector& atlasOffset,
179 const SkRect& devBounds, const SkRect& devBounds45,
180 const SkIRect& devIBounds, const SkIVector& maskShift);
Chris Dalton4da70192018-06-18 09:51:36 -0600181
182 // Called once our path mask has been copied into a permanent, 8-bit atlas. This method points
Chris Dalton2e825a32019-01-04 22:14:27 +0000183 // the entry at the new atlas and updates the CachedAtlasInfo data.
184 void updateToCachedAtlas(const GrUniqueKey& atlasKey, const SkIVector& newAtlasOffset,
185 sk_sp<GrCCAtlas::CachedAtlasInfo>);
186
187 const GrUniqueKey& atlasKey() const { return fAtlasKey; }
188
189 void resetAtlasKeyAndInfo() {
190 fAtlasKey.reset();
191 fCachedAtlasInfo.reset();
192 }
193
194 // This is a utility for the caller to detect when a path gets drawn more than once during the
195 // same flush, with compatible matrices. Before adding a path to an atlas, the caller may check
196 // here to see if they have already placed the path previously during the same flush. The caller
197 // is required to reset all currFlushAtlas references back to null before any subsequent flush.
198 void setCurrFlushAtlas(const GrCCAtlas* currFlushAtlas) {
199 // This should not get called more than once in a single flush. Once fCurrFlushAtlas is
200 // non-null, it can only be set back to null (once the flush is over).
201 SkASSERT(!fCurrFlushAtlas || !currFlushAtlas);
202 fCurrFlushAtlas = currFlushAtlas;
203 }
204 const GrCCAtlas* currFlushAtlas() const { return fCurrFlushAtlas; }
Chris Dalton4da70192018-06-18 09:51:36 -0600205
206private:
207 using MaskTransform = GrCCPathCache::MaskTransform;
208
Chris Dalton9985a272018-10-30 14:29:39 -0600209 GrCCPathCacheEntry(sk_sp<GrCCPathCache::Key> cacheKey, const MaskTransform& maskTransform)
210 : fCacheKey(std::move(cacheKey)), fMaskTransform(maskTransform) {
211 }
Chris Dalton4da70192018-06-18 09:51:36 -0600212
Chris Dalton907102e2018-06-29 13:18:53 -0600213 // Resets this entry back to not having an atlas, and purges its previous atlas texture from the
214 // resource cache if needed.
Chris Dalton2e825a32019-01-04 22:14:27 +0000215 void invalidateAtlas();
Chris Dalton907102e2018-06-29 13:18:53 -0600216
Chris Dalton9985a272018-10-30 14:29:39 -0600217 sk_sp<GrCCPathCache::Key> fCacheKey;
Chris Dalton2e825a32019-01-04 22:14:27 +0000218
Chris Dalton6c3879d2018-11-01 11:13:19 -0600219 GrStdSteadyClock::time_point fTimestamp;
220 int fHitCount = 0;
Chris Dalton2e825a32019-01-04 22:14:27 +0000221 MaskTransform fMaskTransform;
Chris Dalton4da70192018-06-18 09:51:36 -0600222
Chris Dalton2e825a32019-01-04 22:14:27 +0000223 GrUniqueKey fAtlasKey;
Chris Dalton4da70192018-06-18 09:51:36 -0600224 SkIVector fAtlasOffset;
225
Chris Dalton4da70192018-06-18 09:51:36 -0600226 SkRect fDevBounds;
227 SkRect fDevBounds45;
228 SkIRect fDevIBounds;
229
Chris Dalton2e825a32019-01-04 22:14:27 +0000230 // If null, then we are referencing a "stashed" atlas (see initAsStashedAtlas()).
231 sk_sp<GrCCAtlas::CachedAtlasInfo> fCachedAtlasInfo;
232
233 // This field is for when a path gets drawn more than once during the same flush.
234 const GrCCAtlas* fCurrFlushAtlas = nullptr;
Chris Dalton4da70192018-06-18 09:51:36 -0600235
Chris Dalton4da70192018-06-18 09:51:36 -0600236 friend class GrCCPathCache;
237 friend void GrCCPathProcessor::Instance::set(const GrCCPathCacheEntry&, const SkIVector&,
Brian Osman1be2b7c2018-10-29 16:07:15 -0400238 GrColor, DoEvenOddFill); // To access data.
Chris Dalton4da70192018-06-18 09:51:36 -0600239};
240
Chris Dalton4da70192018-06-18 09:51:36 -0600241inline void GrCCPathProcessor::Instance::set(const GrCCPathCacheEntry& entry,
242 const SkIVector& shift, GrColor color,
243 DoEvenOddFill doEvenOddFill) {
244 float dx = (float)shift.fX, dy = (float)shift.fY;
245 this->set(entry.fDevBounds.makeOffset(dx, dy), MakeOffset45(entry.fDevBounds45, dx, dy),
246 entry.fAtlasOffset - shift, color, doEvenOddFill);
247}
248
249#endif