blob: 5dc8f424b9ce67e6f9b84d32ad416240d540864c [file] [log] [blame]
Robert Phillips26f3aeb2020-09-16 10:57:32 -04001/*
2 * Copyright 2020 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
Robert Phillipsd464feb2020-10-08 11:00:02 -04008#ifndef GrThreadSafeCache_DEFINED
9#define GrThreadSafeCache_DEFINED
Robert Phillips26f3aeb2020-09-16 10:57:32 -040010
Robert Phillips42a741a2020-10-23 12:27:47 -040011#include "include/core/SkRefCnt.h"
Robert Phillips26f3aeb2020-09-16 10:57:32 -040012#include "include/private/SkSpinlock.h"
Robert Phillipsf3e2b3c2020-09-18 14:07:43 -040013#include "src/core/SkArenaAlloc.h"
Robert Phillips752f7e12020-09-18 12:28:59 -040014#include "src/core/SkTDynamicHash.h"
Robert Phillips45593682020-09-18 16:16:33 -040015#include "src/core/SkTInternalLList.h"
Robert Phillips26f3aeb2020-09-16 10:57:32 -040016#include "src/gpu/GrSurfaceProxyView.h"
17
Robert Phillips42a741a2020-10-23 12:27:47 -040018class GrGpuBuffer;
19
Robert Phillips26f3aeb2020-09-16 10:57:32 -040020// Ganesh creates a lot of utility textures (e.g., blurred-rrect masks) that need to be shared
21// between the direct context and all the DDL recording contexts. This thread-safe cache
22// allows this sharing.
23//
24// In operation, each thread will first check if the threaded cache possesses the required texture.
25//
26// If a DDL thread doesn't find a needed texture it will go off and create it on the cpu and then
27// attempt to add it to the cache. If another thread had added it in the interim, the losing thread
28// will discard its work and use the texture the winning thread had created.
29//
30// If the thread in possession of the direct context doesn't find the needed texture it should
31// add a place holder view and then queue up the draw calls to complete it. In this way the
32// gpu-thread has precedence over the recording threads.
33//
34// The invariants for this cache differ a bit from those of the proxy and resource caches.
35// For this cache:
36//
37// only this cache knows the unique key - neither the proxy nor backing resource should
38// be discoverable in any other cache by the unique key
39// if a backing resource resides in the resource cache then there should be an entry in this
40// cache
41// an entry in this cache, however, doesn't guarantee that there is a corresponding entry in
42// the resource cache - although the entry here should be able to generate that entry
43// (i.e., be a lazy proxy)
Robert Phillipsc61c8952020-09-22 14:24:43 -040044//
45// Wrt interactions w/ GrContext/GrResourceCache purging, we have:
46//
47// Both GrContext::abandonContext and GrContext::releaseResourcesAndAbandonContext will cause
48// all the refs held in this cache to be dropped prior to clearing out the resource cache.
49//
50// For the size_t-variant of GrContext::purgeUnlockedResources, after an initial attempt
51// to purge the requested amount of resources fails, uniquely held resources in this cache
52// will be dropped in LRU to MRU order until the cache is under budget. Note that this
53// prioritizes the survival of resources in this cache over those just in the resource cache.
Robert Phillips331699c2020-09-22 15:20:01 -040054//
55// For the 'scratchResourcesOnly' variant of GrContext::purgeUnlockedResources, this cache
56// won't be modified in the scratch-only case unless the resource cache is over budget (in
57// which case it will purge uniquely-held resources in LRU to MRU order to get
58// back under budget). In the non-scratch-only case, all uniquely held resources in this cache
59// will be released prior to the resource cache being cleared out.
60//
61// For GrContext::setResourceCacheLimit, if an initial pass through the resource cache doesn't
62// reach the budget, uniquely held resources in this cache will be released in LRU to MRU order.
Robert Phillipsc2fe1642020-09-22 17:34:51 -040063//
64// For GrContext::performDeferredCleanup, any uniquely held resources that haven't been accessed
65// w/in 'msNotUsed' will be released from this cache prior to the resource cache being cleaned.
Robert Phillipsd464feb2020-10-08 11:00:02 -040066class GrThreadSafeCache {
Robert Phillips26f3aeb2020-09-16 10:57:32 -040067public:
Robert Phillipsd464feb2020-10-08 11:00:02 -040068 GrThreadSafeCache();
69 ~GrThreadSafeCache();
Robert Phillips26f3aeb2020-09-16 10:57:32 -040070
71#if GR_TEST_UTILS
72 int numEntries() const SK_EXCLUDES(fSpinLock);
Robert Phillipsc61c8952020-09-22 14:24:43 -040073
74 size_t approxBytesUsedForHash() const SK_EXCLUDES(fSpinLock);
Robert Phillips26f3aeb2020-09-16 10:57:32 -040075#endif
76
Robert Phillips752f7e12020-09-18 12:28:59 -040077 void dropAllRefs() SK_EXCLUDES(fSpinLock);
Robert Phillips331699c2020-09-22 15:20:01 -040078
Robert Phillipsc2fe1642020-09-22 17:34:51 -040079 // Drop uniquely held refs until under the resource cache's budget.
80 // A null parameter means drop all uniquely held refs.
Robert Phillips331699c2020-09-22 15:20:01 -040081 void dropUniqueRefs(GrResourceCache* resourceCache) SK_EXCLUDES(fSpinLock);
Robert Phillips26f3aeb2020-09-16 10:57:32 -040082
Robert Phillipsc2fe1642020-09-22 17:34:51 -040083 // Drop uniquely held refs that were last accessed before 'purgeTime'
84 void dropUniqueRefsOlderThan(GrStdSteadyClock::time_point purgeTime) SK_EXCLUDES(fSpinLock);
85
Robert Phillips83c38a82020-10-28 14:57:53 -040086 SkDEBUGCODE(bool has(const GrUniqueKey&) SK_EXCLUDES(fSpinLock);)
87
Robert Phillips26f3aeb2020-09-16 10:57:32 -040088 GrSurfaceProxyView find(const GrUniqueKey&) SK_EXCLUDES(fSpinLock);
Robert Phillips6e17ffe2020-10-06 14:52:11 -040089 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> findWithData(
90 const GrUniqueKey&) SK_EXCLUDES(fSpinLock);
Robert Phillips26f3aeb2020-09-16 10:57:32 -040091
92 GrSurfaceProxyView add(const GrUniqueKey&, const GrSurfaceProxyView&) SK_EXCLUDES(fSpinLock);
Robert Phillips6e17ffe2020-10-06 14:52:11 -040093 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> addWithData(
94 const GrUniqueKey&, const GrSurfaceProxyView&) SK_EXCLUDES(fSpinLock);
Robert Phillips26f3aeb2020-09-16 10:57:32 -040095
Robert Phillips3380be92020-09-25 12:47:10 -040096 GrSurfaceProxyView findOrAdd(const GrUniqueKey&,
97 const GrSurfaceProxyView&) SK_EXCLUDES(fSpinLock);
Robert Phillips6e17ffe2020-10-06 14:52:11 -040098 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> findOrAddWithData(
99 const GrUniqueKey&, const GrSurfaceProxyView&) SK_EXCLUDES(fSpinLock);
Robert Phillips3380be92020-09-25 12:47:10 -0400100
Robert Phillips42a741a2020-10-23 12:27:47 -0400101 // To hold vertex data in the cache and have it transparently transition from cpu-side to
102 // gpu-side while being shared between all the threads we need a ref counted object that
103 // keeps hold of the cpu-side data but allows deferred filling in of the mirroring gpu buffer.
104 class VertexData : public SkNVRefCnt<VertexData> {
105 public:
106 ~VertexData();
107
108 const void* vertices() const { return fVertices; }
109 size_t size() const { return fNumVertices * fVertexSize; }
110
111 int numVertices() const { return fNumVertices; }
112 size_t vertexSize() const { return fVertexSize; }
113
114 // TODO: make these return const GrGpuBuffers?
115 GrGpuBuffer* gpuBuffer() { return fGpuBuffer.get(); }
116 sk_sp<GrGpuBuffer> refGpuBuffer() { return fGpuBuffer; }
117
118 void setGpuBuffer(sk_sp<GrGpuBuffer> gpuBuffer) {
119 // TODO: once we add the gpuBuffer we could free 'fVertices'. Deinstantiable
120 // DDLs could throw a monkey wrench into that plan though.
121 SkASSERT(!fGpuBuffer);
122 fGpuBuffer = gpuBuffer;
123 }
124
125 void reset() {
126 sk_free(const_cast<void*>(fVertices));
127 fVertices = nullptr;
128 fNumVertices = 0;
129 fVertexSize = 0;
130 fGpuBuffer.reset();
131 }
132
133 private:
134 friend class GrThreadSafeCache; // for access to ctor
135
136 VertexData(const void* vertices, int numVertices, size_t vertexSize)
137 : fVertices(vertices)
138 , fNumVertices(numVertices)
139 , fVertexSize(vertexSize) {
140 }
141
Robert Phillips83c38a82020-10-28 14:57:53 -0400142 VertexData(sk_sp<GrGpuBuffer> gpuBuffer, int numVertices, size_t vertexSize)
143 : fVertices(nullptr)
144 , fNumVertices(numVertices)
145 , fVertexSize(vertexSize)
146 , fGpuBuffer(std::move(gpuBuffer)) {
147 }
148
Robert Phillips42a741a2020-10-23 12:27:47 -0400149 const void* fVertices;
150 int fNumVertices;
151 size_t fVertexSize;
152
153 sk_sp<GrGpuBuffer> fGpuBuffer;
154 };
155
156 // The returned VertexData object takes ownership of 'vertices' which had better have been
157 // allocated with malloc!
158 static sk_sp<VertexData> MakeVertexData(const void* vertices,
159 int vertexCount,
160 size_t vertexSize);
Robert Phillips83c38a82020-10-28 14:57:53 -0400161 static sk_sp<VertexData> MakeVertexData(sk_sp<GrGpuBuffer> buffer,
162 int vertexCount,
163 size_t vertexSize);
Robert Phillips42a741a2020-10-23 12:27:47 -0400164
165 std::tuple<sk_sp<VertexData>, sk_sp<SkData>> findVertsWithData(
Robert Phillips83c38a82020-10-28 14:57:53 -0400166 const GrUniqueKey&) SK_EXCLUDES(fSpinLock);
Robert Phillips42a741a2020-10-23 12:27:47 -0400167
168 std::tuple<sk_sp<VertexData>, sk_sp<SkData>> addVertsWithData(
Robert Phillips83c38a82020-10-28 14:57:53 -0400169 const GrUniqueKey&,
170 sk_sp<VertexData>) SK_EXCLUDES(fSpinLock);
Robert Phillips42a741a2020-10-23 12:27:47 -0400171
Robert Phillips3380be92020-09-25 12:47:10 -0400172 void remove(const GrUniqueKey&) SK_EXCLUDES(fSpinLock);
173
Robert Phillipsb1807122020-10-06 16:44:18 -0400174 // To allow gpu-created resources to have priority, we pre-emptively place a lazy proxy
175 // in the thread-safe cache (with findOrAdd). The Trampoline object allows that lazy proxy to
176 // be instantiated with some later generated rendering result.
177 class Trampoline : public SkRefCnt {
178 public:
179 sk_sp<GrTextureProxy> fProxy;
180 };
181
182 static std::tuple<GrSurfaceProxyView, sk_sp<Trampoline>> CreateLazyView(GrDirectContext*,
Robert Phillipsb1807122020-10-06 16:44:18 -0400183 GrColorType,
Robert Phillipsfde67e42020-10-07 15:33:43 -0400184 SkISize dimensions,
185 GrSurfaceOrigin,
186 SkBackingFit);
Robert Phillips26f3aeb2020-09-16 10:57:32 -0400187private:
188 struct Entry {
Robert Phillips01771c12020-10-20 15:46:07 -0400189 Entry(const GrUniqueKey& key, const GrSurfaceProxyView& view)
190 : fKey(key)
191 , fView(view)
192 , fTag(Entry::kView) {
193 }
Robert Phillips26f3aeb2020-09-16 10:57:32 -0400194
Robert Phillips42a741a2020-10-23 12:27:47 -0400195 Entry(const GrUniqueKey& key, sk_sp<VertexData> vertData)
196 : fKey(key)
197 , fVertData(std::move(vertData))
198 , fTag(Entry::kVertData) {
199 }
200
201 ~Entry() {
202 this->makeEmpty();
203 }
204
Robert Phillips01771c12020-10-20 15:46:07 -0400205 bool uniquelyHeld() const {
206 SkASSERT(fTag != kEmpty);
207
208 if (fTag == kView && fView.proxy()->unique()) {
209 return true;
Robert Phillips42a741a2020-10-23 12:27:47 -0400210 } else if (fTag == kVertData && fVertData->unique()) {
211 return true;
Robert Phillips01771c12020-10-20 15:46:07 -0400212 }
213
214 return false;
215 }
216
217 const GrUniqueKey& key() const {
218 SkASSERT(fTag != kEmpty);
219 return fKey;
220 }
221
Robert Phillips83c38a82020-10-28 14:57:53 -0400222 SkData* getCustomData() const {
223 SkASSERT(fTag != kEmpty);
224 return fKey.getCustomData();
225 }
226
Robert Phillips01771c12020-10-20 15:46:07 -0400227 sk_sp<SkData> refCustomData() const {
228 SkASSERT(fTag != kEmpty);
229 return fKey.refCustomData();
230 }
231
232 GrSurfaceProxyView view() {
233 SkASSERT(fTag == kView);
234 return fView;
235 }
236
Robert Phillips42a741a2020-10-23 12:27:47 -0400237 sk_sp<VertexData> vertexData() {
238 SkASSERT(fTag == kVertData);
239 return fVertData;
240 }
241
Robert Phillips01771c12020-10-20 15:46:07 -0400242 void set(const GrUniqueKey& key, const GrSurfaceProxyView& view) {
243 SkASSERT(fTag == kEmpty);
244 fKey = key;
245 fView = view;
246 fTag = kView;
247 }
248
249 void makeEmpty() {
Robert Phillips01771c12020-10-20 15:46:07 -0400250 fKey.reset();
Robert Phillips42a741a2020-10-23 12:27:47 -0400251 if (fTag == kView) {
252 fView.reset();
253 } else if (fTag == kVertData) {
254 fVertData.reset();
255 }
Robert Phillips01771c12020-10-20 15:46:07 -0400256 fTag = kEmpty;
257 }
258
Robert Phillips42a741a2020-10-23 12:27:47 -0400259 void set(const GrUniqueKey& key, sk_sp<VertexData> vertData) {
Robert Phillips83c38a82020-10-28 14:57:53 -0400260 SkASSERT(fTag == kEmpty || fTag == kVertData);
Robert Phillips42a741a2020-10-23 12:27:47 -0400261 fKey = key;
262 fVertData = vertData;
263 fTag = kVertData;
264 }
Robert Phillips45593682020-09-18 16:16:33 -0400265
Robert Phillips42a741a2020-10-23 12:27:47 -0400266 // The thread-safe cache gets to directly manipulate the llist and last-access members
267 GrStdSteadyClock::time_point fLastAccess;
Robert Phillips45593682020-09-18 16:16:33 -0400268 SK_DECLARE_INTERNAL_LLIST_INTERFACE(Entry);
Robert Phillips752f7e12020-09-18 12:28:59 -0400269
270 // for SkTDynamicHash
Robert Phillips01771c12020-10-20 15:46:07 -0400271 static const GrUniqueKey& GetKey(const Entry& e) {
272 SkASSERT(e.fTag != kEmpty);
273 return e.fKey;
274 }
Robert Phillips752f7e12020-09-18 12:28:59 -0400275 static uint32_t Hash(const GrUniqueKey& key) { return key.hash(); }
Robert Phillips01771c12020-10-20 15:46:07 -0400276
277 private:
278 // Note: the unique key is stored here bc it is never attached to a proxy or a GrTexture
Robert Phillips42a741a2020-10-23 12:27:47 -0400279 GrUniqueKey fKey;
280 union {
281 GrSurfaceProxyView fView;
282 sk_sp<VertexData> fVertData;
283 };
Robert Phillips01771c12020-10-20 15:46:07 -0400284
285 enum {
286 kEmpty,
287 kView,
Robert Phillips42a741a2020-10-23 12:27:47 -0400288 kVertData,
Robert Phillips01771c12020-10-20 15:46:07 -0400289 } fTag { kEmpty };
Robert Phillips26f3aeb2020-09-16 10:57:32 -0400290 };
291
Robert Phillips42a741a2020-10-23 12:27:47 -0400292 void makeExistingEntryMRU(Entry*) SK_REQUIRES(fSpinLock);
293 Entry* makeNewEntryMRU(Entry*) SK_REQUIRES(fSpinLock);
294
295 Entry* getEntry(const GrUniqueKey&, const GrSurfaceProxyView&) SK_REQUIRES(fSpinLock);
296 Entry* getEntry(const GrUniqueKey&, sk_sp<VertexData>) SK_REQUIRES(fSpinLock);
297
Robert Phillips45593682020-09-18 16:16:33 -0400298 void recycleEntry(Entry*) SK_REQUIRES(fSpinLock);
Robert Phillipsf3e2b3c2020-09-18 14:07:43 -0400299
Robert Phillips6e17ffe2020-10-06 14:52:11 -0400300 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> internalFind(
Robert Phillips83c38a82020-10-28 14:57:53 -0400301 const GrUniqueKey&) SK_REQUIRES(fSpinLock);
Robert Phillips6e17ffe2020-10-06 14:52:11 -0400302 std::tuple<GrSurfaceProxyView, sk_sp<SkData>> internalAdd(
Robert Phillips83c38a82020-10-28 14:57:53 -0400303 const GrUniqueKey&,
304 const GrSurfaceProxyView&) SK_REQUIRES(fSpinLock);
Robert Phillips26f3aeb2020-09-16 10:57:32 -0400305
Robert Phillips42a741a2020-10-23 12:27:47 -0400306 std::tuple<sk_sp<VertexData>, sk_sp<SkData>> internalFindVerts(
Robert Phillips83c38a82020-10-28 14:57:53 -0400307 const GrUniqueKey&) SK_REQUIRES(fSpinLock);
Robert Phillips42a741a2020-10-23 12:27:47 -0400308 std::tuple<sk_sp<VertexData>, sk_sp<SkData>> internalAddVerts(
Robert Phillips83c38a82020-10-28 14:57:53 -0400309 const GrUniqueKey&,
310 sk_sp<VertexData>) SK_REQUIRES(fSpinLock);
Robert Phillips42a741a2020-10-23 12:27:47 -0400311
Robert Phillips26f3aeb2020-09-16 10:57:32 -0400312 mutable SkSpinlock fSpinLock;
313
Robert Phillips74ad27d2020-10-20 10:16:55 -0400314 SkTDynamicHash<Entry, GrUniqueKey> fUniquelyKeyedEntryMap SK_GUARDED_BY(fSpinLock);
Robert Phillips45593682020-09-18 16:16:33 -0400315 // The head of this list is the MRU
Robert Phillips74ad27d2020-10-20 10:16:55 -0400316 SkTInternalLList<Entry> fUniquelyKeyedEntryList SK_GUARDED_BY(fSpinLock);
Robert Phillipsf3e2b3c2020-09-18 14:07:43 -0400317
318 // TODO: empirically determine this from the skps
319 static const int kInitialArenaSize = 64 * sizeof(Entry);
320
321 char fStorage[kInitialArenaSize];
322 SkArenaAlloc fEntryAllocator{fStorage, kInitialArenaSize, kInitialArenaSize};
Robert Phillips45593682020-09-18 16:16:33 -0400323 Entry* fFreeEntryList SK_GUARDED_BY(fSpinLock);
Robert Phillips26f3aeb2020-09-16 10:57:32 -0400324};
325
Robert Phillipsd464feb2020-10-08 11:00:02 -0400326#endif // GrThreadSafeCache_DEFINED