blob: 301a26b7b22b7f6ff06b3a18c9eb4357f338899b [file] [log] [blame]
Robert Phillips5af44de2017-07-18 14:49:38 -04001/*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrResourceAllocator_DEFINED
9#define GrResourceAllocator_DEFINED
10
Adlai Holler539db2f2021-03-16 09:45:05 -040011#include "include/private/SkTHash.h"
12
Adlai Holler539db2f2021-03-16 09:45:05 -040013#include "src/gpu/GrHashMapWithCache.h"
Greg Daniel456f9b52020-03-05 19:14:18 +000014#include "src/gpu/GrSurface.h"
Adlai Hollerca1137b2021-04-08 11:39:55 -040015#include "src/gpu/GrSurfaceProxy.h"
Robert Phillips8186cbe2017-11-01 17:32:39 -040016
Ben Wagner729a23f2019-05-17 16:29:34 -040017#include "src/core/SkArenaAlloc.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050018#include "src/core/SkTMultiMap.h"
Robert Phillips5af44de2017-07-18 14:49:38 -040019
Adlai Hollerca1137b2021-04-08 11:39:55 -040020class GrDirectContext;
Robert Phillips5af44de2017-07-18 14:49:38 -040021
Robert Phillips715d08c2018-07-18 13:56:48 -040022// Print out explicit allocation information
23#define GR_ALLOCATION_SPEW 0
24
Robert Phillipsda1be462018-07-27 07:18:06 -040025// Print out information about interval creation
26#define GR_TRACK_INTERVAL_CREATION 0
27
Robert Phillips5af44de2017-07-18 14:49:38 -040028/*
29 * The ResourceAllocator explicitly distributes GPU resources at flush time. It operates by
30 * being given the usage intervals of the various proxies. It keeps these intervals in a singly
31 * linked list sorted by increasing start index. (It also maintains a hash table from proxyID
32 * to interval to find proxy reuse). When it comes time to allocate the resources it
33 * traverses the sorted list and:
34 * removes intervals from the active list that have completed (returning their GrSurfaces
35 * to the free pool)
36
37 * allocates a new resource (preferably from the free pool) for the new interval
38 * adds the new interval to the active list (that is sorted by increasing end index)
39 *
40 * Note: the op indices (used in the usage intervals) come from the order of the ops in
Greg Danielf41b2bd2019-08-22 16:19:24 -040041 * their opsTasks after the opsTask DAG has been linearized.
Robert Phillips82774f82019-06-20 14:38:27 -040042 *
43 *************************************************************************************************
44 * How does instantiation failure handling work when explicitly allocating?
45 *
46 * In the gather usage intervals pass all the GrSurfaceProxies used in the flush should be
Greg Danielf41b2bd2019-08-22 16:19:24 -040047 * gathered (i.e., in GrOpsTask::gatherProxyIntervals).
Robert Phillips82774f82019-06-20 14:38:27 -040048 *
49 * The allocator will churn through this list but could fail anywhere.
50 *
51 * Allocation failure handling occurs at two levels:
52 *
Greg Danielf41b2bd2019-08-22 16:19:24 -040053 * 1) If the GrSurface backing an opsTask fails to allocate then the entire opsTask is dropped.
Robert Phillips82774f82019-06-20 14:38:27 -040054 *
55 * 2) If an individual GrSurfaceProxy fails to allocate then any ops that use it are dropped
Greg Danielf41b2bd2019-08-22 16:19:24 -040056 * (via GrOpsTask::purgeOpsWithUninstantiatedProxies)
Robert Phillips82774f82019-06-20 14:38:27 -040057 *
Greg Danielf41b2bd2019-08-22 16:19:24 -040058 * The pass to determine which ops to drop is a bit laborious so we only check the opsTasks and
Robert Phillips82774f82019-06-20 14:38:27 -040059 * individual ops when something goes wrong in allocation (i.e., when the return code from
60 * GrResourceAllocator::assign is bad)
61 *
62 * All together this means we should never attempt to draw an op which is missing some
63 * required GrSurface.
64 *
65 * One wrinkle in this plan is that promise images are fulfilled during the gather interval pass.
66 * If any of the promise images fail at this stage then the allocator is set into an error
67 * state and all allocations are then scanned for failures during the main allocation pass.
Robert Phillips5af44de2017-07-18 14:49:38 -040068 */
69class GrResourceAllocator {
70public:
Adlai Hollerca1137b2021-04-08 11:39:55 -040071 GrResourceAllocator(GrDirectContext* dContext SkDEBUGCODE(, int numOpsTasks))
72 : fDContext(dContext) {}
Robert Phillips5af44de2017-07-18 14:49:38 -040073
Robert Phillips5b65a842017-11-13 15:48:12 -050074 ~GrResourceAllocator();
75
Robert Phillips5af44de2017-07-18 14:49:38 -040076 unsigned int curOp() const { return fNumOps; }
77 void incOps() { fNumOps++; }
Robert Phillips5af44de2017-07-18 14:49:38 -040078
Adlai Holler7f7a5df2021-02-09 17:41:10 +000079 /** Indicates whether a given call to addInterval represents an actual usage of the
80 * provided proxy. This is mainly here to accommodate deferred proxies attached to opsTasks.
81 * In that case we need to create an extra long interval for them (due to the upload) but
82 * don't want to count that usage/reference towards the proxy's recyclability.
83 */
84 enum class ActualUse : bool {
85 kNo = false,
86 kYes = true
87 };
88
Robert Phillipseafd48a2017-11-16 07:52:08 -050089 // Add a usage interval from 'start' to 'end' inclusive. This is usually used for renderTargets.
Robert Phillips5af44de2017-07-18 14:49:38 -040090 // If an existing interval already exists it will be expanded to include the new range.
Adlai Holler7f7a5df2021-02-09 17:41:10 +000091 void addInterval(GrSurfaceProxy*, unsigned int start, unsigned int end, ActualUse actualUse
Chris Dalton8816b932017-11-29 16:48:25 -070092 SkDEBUGCODE(, bool isDirectDstRead = false));
Robert Phillips5af44de2017-07-18 14:49:38 -040093
Adlai Holleree2837b2021-04-09 16:52:48 -040094 // Generate an internal plan for resource allocation.
95 // Fully-lazy proxies are also instantiated at this point so that their size can
96 // be known accurately. Returns false if any lazy proxy failed to instantiate, true otherwise.
97 bool planAssignment();
98
99 // Instantiate and assign resources to all proxies.
Adlai Holler19fd5142021-03-08 10:19:30 -0700100 bool assign();
Robert Phillips5af44de2017-07-18 14:49:38 -0400101
Robert Phillips715d08c2018-07-18 13:56:48 -0400102#if GR_ALLOCATION_SPEW
103 void dumpIntervals();
104#endif
105
Robert Phillips5af44de2017-07-18 14:49:38 -0400106private:
107 class Interval;
Adlai Holler4cfbe532021-03-17 10:36:39 -0400108 class Register;
Robert Phillips5af44de2017-07-18 14:49:38 -0400109
110 // Remove dead intervals from the active list
111 void expire(unsigned int curIndex);
112
113 // These two methods wrap the interactions with the free pool
Adlai Holler4cfbe532021-03-17 10:36:39 -0400114 void recycleRegister(Register* r);
Adlai Holler7df8d222021-03-19 12:27:49 -0400115 Register* findOrCreateRegisterFor(GrSurfaceProxy* proxy);
Robert Phillips5af44de2017-07-18 14:49:38 -0400116
Robert Phillips57aa3672017-07-21 11:38:13 -0400117 struct FreePoolTraits {
Adlai Holler4cfbe532021-03-17 10:36:39 -0400118 static const GrScratchKey& GetKey(const Register& r) {
119 return r.scratchKey();
Robert Phillips57aa3672017-07-21 11:38:13 -0400120 }
Robert Phillips5af44de2017-07-18 14:49:38 -0400121
Robert Phillips57aa3672017-07-21 11:38:13 -0400122 static uint32_t Hash(const GrScratchKey& key) { return key.hash(); }
Adlai Holler4cfbe532021-03-17 10:36:39 -0400123 static void OnFree(Register* r) { }
Robert Phillips5af44de2017-07-18 14:49:38 -0400124 };
Adlai Holler4cfbe532021-03-17 10:36:39 -0400125 typedef SkTMultiMap<Register, GrScratchKey, FreePoolTraits> FreePoolMultiMap;
Robert Phillips57aa3672017-07-21 11:38:13 -0400126
Adlai Holler7df8d222021-03-19 12:27:49 -0400127 typedef SkTHashMap<uint32_t, Interval*, GrCheapHash> IntvlHash;
Robert Phillips5af44de2017-07-18 14:49:38 -0400128
Adlai Holler7df8d222021-03-19 12:27:49 -0400129 struct UniqueKeyHash {
130 uint32_t operator()(const GrUniqueKey& key) const { return key.hash(); }
131 };
132 typedef SkTHashMap<GrUniqueKey, Register*, UniqueKeyHash> UniqueKeyRegisterHash;
133
134 // Each proxy – with some exceptions – is assigned a register. After all assignments are made,
135 // another pass is performed to instantiate and assign actual surfaces to the proxies. Right
136 // now these are performed in one call, but in the future they will be separable and the user
137 // will be able to query re: memory cost before committing to surface creation.
Adlai Holler4cfbe532021-03-17 10:36:39 -0400138 class Register {
139 public:
Adlai Holler7df8d222021-03-19 12:27:49 -0400140 // It's OK to pass an invalid scratch key iff the proxy has a unique key.
Adlai Holleree2837b2021-04-09 16:52:48 -0400141 Register(GrSurfaceProxy* originatingProxy, GrScratchKey, GrResourceProvider*);
Adlai Holler4cfbe532021-03-17 10:36:39 -0400142
Adlai Holler7df8d222021-03-19 12:27:49 -0400143 const GrScratchKey& scratchKey() const { return fScratchKey; }
144 const GrUniqueKey& uniqueKey() const { return fOriginatingProxy->getUniqueKey(); }
Adlai Holler4cfbe532021-03-17 10:36:39 -0400145
Adlai Holleree2837b2021-04-09 16:52:48 -0400146 GrSurface* existingSurface() const { return fExistingSurface.get(); }
147
Adlai Holler7df8d222021-03-19 12:27:49 -0400148 // Can this register be used by other proxies after this one?
149 bool isRecyclable(const GrCaps&, GrSurfaceProxy* proxy, int knownUseCount) const;
150
151 // Resolve the register allocation to an actual GrSurface. 'fOriginatingProxy'
152 // is used to cache the allocation when a given register is used by multiple
153 // proxies.
154 bool instantiateSurface(GrSurfaceProxy*, GrResourceProvider*);
Adlai Holler4cfbe532021-03-17 10:36:39 -0400155
156 SkDEBUGCODE(uint32_t uniqueID() const { return fUniqueID; })
157
158 private:
Adlai Holler7df8d222021-03-19 12:27:49 -0400159 GrSurfaceProxy* fOriginatingProxy;
160 GrScratchKey fScratchKey; // free pool wants a reference to this.
Adlai Holleree2837b2021-04-09 16:52:48 -0400161 sk_sp<GrSurface> fExistingSurface; // queried from resource cache. may be null.
Adlai Holler4cfbe532021-03-17 10:36:39 -0400162
163#ifdef SK_DEBUG
164 uint32_t fUniqueID;
165
166 static uint32_t CreateUniqueID();
167#endif
168 };
169
Robert Phillips5af44de2017-07-18 14:49:38 -0400170 class Interval {
171 public:
172 Interval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end)
Adlai Holler7df8d222021-03-19 12:27:49 -0400173 : fProxy(proxy)
174 , fStart(start)
175 , fEnd(end) {
Robert Phillips5af44de2017-07-18 14:49:38 -0400176 SkASSERT(proxy);
Adlai Holler4cfbe532021-03-17 10:36:39 -0400177 SkDEBUGCODE(fUniqueID = CreateUniqueID());
Robert Phillipsda1be462018-07-27 07:18:06 -0400178#if GR_TRACK_INTERVAL_CREATION
Robert Phillips047d5bb2021-01-08 13:39:19 -0500179 SkString proxyStr = proxy->dump();
180 SkDebugf("New intvl %d: %s [%d, %d]\n", fUniqueID, proxyStr.c_str(), start, end);
Robert Phillipsda1be462018-07-27 07:18:06 -0400181#endif
Robert Phillips5af44de2017-07-18 14:49:38 -0400182 }
183
Robert Phillipsf8e25022017-11-08 15:24:31 -0500184 const GrSurfaceProxy* proxy() const { return fProxy; }
185 GrSurfaceProxy* proxy() { return fProxy; }
Robert Phillipsc73666f2019-04-24 08:49:48 -0400186
Robert Phillipsf8e25022017-11-08 15:24:31 -0500187 unsigned int start() const { return fStart; }
188 unsigned int end() const { return fEnd; }
Robert Phillipsc73666f2019-04-24 08:49:48 -0400189
190 void setNext(Interval* next) { fNext = next; }
Robert Phillipsf8e25022017-11-08 15:24:31 -0500191 const Interval* next() const { return fNext; }
192 Interval* next() { return fNext; }
193
Adlai Holler4cfbe532021-03-17 10:36:39 -0400194 Register* getRegister() const { return fRegister; }
195 void setRegister(Register* r) { fRegister = r; }
196
Robert Phillipsc73666f2019-04-24 08:49:48 -0400197 void addUse() { fUses++; }
Adlai Holler1143b1b2021-03-16 13:07:40 -0400198 int uses() const { return fUses; }
Robert Phillipsf8e25022017-11-08 15:24:31 -0500199
200 void extendEnd(unsigned int newEnd) {
Chris Dalton8816b932017-11-29 16:48:25 -0700201 if (newEnd > fEnd) {
202 fEnd = newEnd;
Robert Phillipsda1be462018-07-27 07:18:06 -0400203#if GR_TRACK_INTERVAL_CREATION
204 SkDebugf("intvl %d: extending from %d to %d\n", fUniqueID, fEnd, newEnd);
205#endif
Chris Dalton8816b932017-11-29 16:48:25 -0700206 }
Robert Phillipsf8e25022017-11-08 15:24:31 -0500207 }
208
Adlai Holler4cfbe532021-03-17 10:36:39 -0400209 SkDEBUGCODE(uint32_t uniqueID() const { return fUniqueID; })
210
Robert Phillipsf8e25022017-11-08 15:24:31 -0500211 private:
Robert Phillips5b65a842017-11-13 15:48:12 -0500212 GrSurfaceProxy* fProxy;
Robert Phillips5b65a842017-11-13 15:48:12 -0500213 unsigned int fStart;
214 unsigned int fEnd;
Adlai Holler1143b1b2021-03-16 13:07:40 -0400215 Interval* fNext = nullptr;
Robert Phillipsc73666f2019-04-24 08:49:48 -0400216 unsigned int fUses = 0;
Adlai Holler4cfbe532021-03-17 10:36:39 -0400217 Register* fRegister = nullptr;
Robert Phillipsda1be462018-07-27 07:18:06 -0400218
Adlai Holler4cfbe532021-03-17 10:36:39 -0400219#ifdef SK_DEBUG
Robert Phillipsda1be462018-07-27 07:18:06 -0400220 uint32_t fUniqueID;
221
Adlai Hollerda163672021-03-15 11:03:37 -0400222 static uint32_t CreateUniqueID();
Robert Phillipsda1be462018-07-27 07:18:06 -0400223#endif
Robert Phillips5af44de2017-07-18 14:49:38 -0400224 };
225
226 class IntervalList {
227 public:
228 IntervalList() = default;
229 ~IntervalList() {
Robert Phillips8186cbe2017-11-01 17:32:39 -0400230 // The only time we delete an IntervalList is in the GrResourceAllocator dtor.
231 // Since the arena allocator will clean up for us we don't bother here.
Robert Phillips5af44de2017-07-18 14:49:38 -0400232 }
233
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400234 bool empty() const {
235 SkASSERT(SkToBool(fHead) == SkToBool(fTail));
236 return !SkToBool(fHead);
237 }
Robert Phillips5af44de2017-07-18 14:49:38 -0400238 const Interval* peekHead() const { return fHead; }
Robert Phillipsc73666f2019-04-24 08:49:48 -0400239 Interval* peekHead() { return fHead; }
Robert Phillips5af44de2017-07-18 14:49:38 -0400240 Interval* popHead();
241 void insertByIncreasingStart(Interval*);
242 void insertByIncreasingEnd(Interval*);
243
244 private:
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400245 SkDEBUGCODE(void validate() const;)
246
Robert Phillips5af44de2017-07-18 14:49:38 -0400247 Interval* fHead = nullptr;
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400248 Interval* fTail = nullptr;
Robert Phillips5af44de2017-07-18 14:49:38 -0400249 };
250
Brian Salomon309b0532018-10-10 17:22:00 -0400251 // Compositing use cases can create > 80 intervals.
252 static const int kInitialArenaSize = 128 * sizeof(Interval);
Robert Phillips8186cbe2017-11-01 17:32:39 -0400253
Adlai Hollerca1137b2021-04-08 11:39:55 -0400254 GrDirectContext* fDContext;
Brian Salomon577aa0f2018-11-30 13:32:23 -0500255 FreePoolMultiMap fFreePool; // Recently created/used GrSurfaces
256 IntvlHash fIntvlHash; // All the intervals, hashed by proxyID
Robert Phillips5af44de2017-07-18 14:49:38 -0400257
Brian Salomon577aa0f2018-11-30 13:32:23 -0500258 IntervalList fIntvlList; // All the intervals sorted by increasing start
259 IntervalList fActiveIntvls; // List of live intervals during assignment
Robert Phillipsc476e5d2019-03-26 14:50:08 -0400260 // (sorted by increasing end)
Adlai Holler7df8d222021-03-19 12:27:49 -0400261 IntervalList fFinishedIntvls; // All the completed intervals
262 // (sorted by increasing start)
263 UniqueKeyRegisterHash fUniqueKeyRegisters;
Robert Phillipsc476e5d2019-03-26 14:50:08 -0400264 unsigned int fNumOps = 0;
Robert Phillips8186cbe2017-11-01 17:32:39 -0400265
Adlai Holleree2837b2021-04-09 16:52:48 -0400266 SkDEBUGCODE(bool fPlanned = false;)
Brian Salomon577aa0f2018-11-30 13:32:23 -0500267 SkDEBUGCODE(bool fAssigned = false;)
Robert Phillipseafd48a2017-11-16 07:52:08 -0500268
Adlai Holler4cfbe532021-03-17 10:36:39 -0400269 SkSTArenaAlloc<kInitialArenaSize> fInternalAllocator; // intervals & registers live here
Adlai Holler19fd5142021-03-08 10:19:30 -0700270 bool fFailedInstantiation = false;
Robert Phillips5af44de2017-07-18 14:49:38 -0400271};
272
273#endif // GrResourceAllocator_DEFINED