Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2017 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | #ifndef GrResourceAllocator_DEFINED |
| 9 | #define GrResourceAllocator_DEFINED |
| 10 | |
Adlai Holler | 539db2f | 2021-03-16 09:45:05 -0400 | [diff] [blame] | 11 | #include "include/private/SkTHash.h" |
| 12 | |
Adlai Holler | 539db2f | 2021-03-16 09:45:05 -0400 | [diff] [blame] | 13 | #include "src/gpu/GrHashMapWithCache.h" |
Greg Daniel | 456f9b5 | 2020-03-05 19:14:18 +0000 | [diff] [blame] | 14 | #include "src/gpu/GrSurface.h" |
Adlai Holler | ca1137b | 2021-04-08 11:39:55 -0400 | [diff] [blame] | 15 | #include "src/gpu/GrSurfaceProxy.h" |
Robert Phillips | 8186cbe | 2017-11-01 17:32:39 -0400 | [diff] [blame] | 16 | |
Ben Wagner | 729a23f | 2019-05-17 16:29:34 -0400 | [diff] [blame] | 17 | #include "src/core/SkArenaAlloc.h" |
Mike Klein | c0bd9f9 | 2019-04-23 12:05:21 -0500 | [diff] [blame] | 18 | #include "src/core/SkTMultiMap.h" |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 19 | |
Adlai Holler | ca1137b | 2021-04-08 11:39:55 -0400 | [diff] [blame] | 20 | class GrDirectContext; |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 21 | |
Robert Phillips | 715d08c | 2018-07-18 13:56:48 -0400 | [diff] [blame] | 22 | // Print out explicit allocation information |
| 23 | #define GR_ALLOCATION_SPEW 0 |
| 24 | |
Robert Phillips | da1be46 | 2018-07-27 07:18:06 -0400 | [diff] [blame] | 25 | // Print out information about interval creation |
| 26 | #define GR_TRACK_INTERVAL_CREATION 0 |
| 27 | |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 28 | /* |
| 29 | * The ResourceAllocator explicitly distributes GPU resources at flush time. It operates by |
| 30 | * being given the usage intervals of the various proxies. It keeps these intervals in a singly |
| 31 | * linked list sorted by increasing start index. (It also maintains a hash table from proxyID |
| 32 | * to interval to find proxy reuse). When it comes time to allocate the resources it |
| 33 | * traverses the sorted list and: |
| 34 | * removes intervals from the active list that have completed (returning their GrSurfaces |
| 35 | * to the free pool) |
| 36 | |
| 37 | * allocates a new resource (preferably from the free pool) for the new interval |
| 38 | * adds the new interval to the active list (that is sorted by increasing end index) |
| 39 | * |
| 40 | * Note: the op indices (used in the usage intervals) come from the order of the ops in |
Greg Daniel | f41b2bd | 2019-08-22 16:19:24 -0400 | [diff] [blame] | 41 | * their opsTasks after the opsTask DAG has been linearized. |
Robert Phillips | 82774f8 | 2019-06-20 14:38:27 -0400 | [diff] [blame] | 42 | * |
| 43 | ************************************************************************************************* |
| 44 | * How does instantiation failure handling work when explicitly allocating? |
| 45 | * |
| 46 | * In the gather usage intervals pass all the GrSurfaceProxies used in the flush should be |
Greg Daniel | f41b2bd | 2019-08-22 16:19:24 -0400 | [diff] [blame] | 47 | * gathered (i.e., in GrOpsTask::gatherProxyIntervals). |
Robert Phillips | 82774f8 | 2019-06-20 14:38:27 -0400 | [diff] [blame] | 48 | * |
| 49 | * The allocator will churn through this list but could fail anywhere. |
| 50 | * |
| 51 | * Allocation failure handling occurs at two levels: |
| 52 | * |
Greg Daniel | f41b2bd | 2019-08-22 16:19:24 -0400 | [diff] [blame] | 53 | * 1) If the GrSurface backing an opsTask fails to allocate then the entire opsTask is dropped. |
Robert Phillips | 82774f8 | 2019-06-20 14:38:27 -0400 | [diff] [blame] | 54 | * |
| 55 | * 2) If an individual GrSurfaceProxy fails to allocate then any ops that use it are dropped |
Greg Daniel | f41b2bd | 2019-08-22 16:19:24 -0400 | [diff] [blame] | 56 | * (via GrOpsTask::purgeOpsWithUninstantiatedProxies) |
Robert Phillips | 82774f8 | 2019-06-20 14:38:27 -0400 | [diff] [blame] | 57 | * |
Greg Daniel | f41b2bd | 2019-08-22 16:19:24 -0400 | [diff] [blame] | 58 | * The pass to determine which ops to drop is a bit laborious so we only check the opsTasks and |
Robert Phillips | 82774f8 | 2019-06-20 14:38:27 -0400 | [diff] [blame] | 59 | * individual ops when something goes wrong in allocation (i.e., when the return code from |
| 60 | * GrResourceAllocator::assign is bad) |
| 61 | * |
| 62 | * All together this means we should never attempt to draw an op which is missing some |
| 63 | * required GrSurface. |
| 64 | * |
| 65 | * One wrinkle in this plan is that promise images are fulfilled during the gather interval pass. |
| 66 | * If any of the promise images fail at this stage then the allocator is set into an error |
| 67 | * state and all allocations are then scanned for failures during the main allocation pass. |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 68 | */ |
| 69 | class GrResourceAllocator { |
| 70 | public: |
Adlai Holler | b51dd57 | 2021-04-15 11:01:46 -0400 | [diff] [blame^] | 71 | GrResourceAllocator(GrDirectContext* dContext) |
Adlai Holler | ca1137b | 2021-04-08 11:39:55 -0400 | [diff] [blame] | 72 | : fDContext(dContext) {} |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 73 | |
Robert Phillips | 5b65a84 | 2017-11-13 15:48:12 -0500 | [diff] [blame] | 74 | ~GrResourceAllocator(); |
| 75 | |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 76 | unsigned int curOp() const { return fNumOps; } |
| 77 | void incOps() { fNumOps++; } |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 78 | |
Adlai Holler | 7f7a5df | 2021-02-09 17:41:10 +0000 | [diff] [blame] | 79 | /** Indicates whether a given call to addInterval represents an actual usage of the |
| 80 | * provided proxy. This is mainly here to accommodate deferred proxies attached to opsTasks. |
| 81 | * In that case we need to create an extra long interval for them (due to the upload) but |
| 82 | * don't want to count that usage/reference towards the proxy's recyclability. |
| 83 | */ |
| 84 | enum class ActualUse : bool { |
| 85 | kNo = false, |
| 86 | kYes = true |
| 87 | }; |
| 88 | |
Robert Phillips | eafd48a | 2017-11-16 07:52:08 -0500 | [diff] [blame] | 89 | // Add a usage interval from 'start' to 'end' inclusive. This is usually used for renderTargets. |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 90 | // If an existing interval already exists it will be expanded to include the new range. |
Adlai Holler | 7f7a5df | 2021-02-09 17:41:10 +0000 | [diff] [blame] | 91 | void addInterval(GrSurfaceProxy*, unsigned int start, unsigned int end, ActualUse actualUse |
Chris Dalton | 8816b93 | 2017-11-29 16:48:25 -0700 | [diff] [blame] | 92 | SkDEBUGCODE(, bool isDirectDstRead = false)); |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 93 | |
Adlai Holler | b51dd57 | 2021-04-15 11:01:46 -0400 | [diff] [blame^] | 94 | bool failedInstantiation() const { return fFailedInstantiation; } |
| 95 | |
Adlai Holler | cd2f96d | 2021-04-09 17:58:14 -0400 | [diff] [blame] | 96 | // Generate an internal plan for resource allocation. After this you can optionally call |
| 97 | // `makeBudgetHeadroom` to check whether that plan would go over our memory budget. |
Adlai Holler | ee2837b | 2021-04-09 16:52:48 -0400 | [diff] [blame] | 98 | // Fully-lazy proxies are also instantiated at this point so that their size can |
| 99 | // be known accurately. Returns false if any lazy proxy failed to instantiate, true otherwise. |
| 100 | bool planAssignment(); |
| 101 | |
Adlai Holler | cd2f96d | 2021-04-09 17:58:14 -0400 | [diff] [blame] | 102 | // Figure out how much VRAM headroom this plan requires. If there's enough purgeable resources, |
| 103 | // purge them and return true. Otherwise return false. |
| 104 | bool makeBudgetHeadroom(); |
| 105 | |
Adlai Holler | b51dd57 | 2021-04-15 11:01:46 -0400 | [diff] [blame^] | 106 | // Clear all internal state in preparation for a new set of intervals. |
| 107 | void reset(); |
| 108 | |
Adlai Holler | ee2837b | 2021-04-09 16:52:48 -0400 | [diff] [blame] | 109 | // Instantiate and assign resources to all proxies. |
Adlai Holler | 19fd514 | 2021-03-08 10:19:30 -0700 | [diff] [blame] | 110 | bool assign(); |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 111 | |
Robert Phillips | 715d08c | 2018-07-18 13:56:48 -0400 | [diff] [blame] | 112 | #if GR_ALLOCATION_SPEW |
| 113 | void dumpIntervals(); |
| 114 | #endif |
| 115 | |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 116 | private: |
| 117 | class Interval; |
Adlai Holler | 4cfbe53 | 2021-03-17 10:36:39 -0400 | [diff] [blame] | 118 | class Register; |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 119 | |
| 120 | // Remove dead intervals from the active list |
| 121 | void expire(unsigned int curIndex); |
| 122 | |
| 123 | // These two methods wrap the interactions with the free pool |
Adlai Holler | 4cfbe53 | 2021-03-17 10:36:39 -0400 | [diff] [blame] | 124 | void recycleRegister(Register* r); |
Adlai Holler | 7df8d22 | 2021-03-19 12:27:49 -0400 | [diff] [blame] | 125 | Register* findOrCreateRegisterFor(GrSurfaceProxy* proxy); |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 126 | |
Robert Phillips | 57aa367 | 2017-07-21 11:38:13 -0400 | [diff] [blame] | 127 | struct FreePoolTraits { |
Adlai Holler | 4cfbe53 | 2021-03-17 10:36:39 -0400 | [diff] [blame] | 128 | static const GrScratchKey& GetKey(const Register& r) { |
| 129 | return r.scratchKey(); |
Robert Phillips | 57aa367 | 2017-07-21 11:38:13 -0400 | [diff] [blame] | 130 | } |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 131 | |
Robert Phillips | 57aa367 | 2017-07-21 11:38:13 -0400 | [diff] [blame] | 132 | static uint32_t Hash(const GrScratchKey& key) { return key.hash(); } |
Adlai Holler | 4cfbe53 | 2021-03-17 10:36:39 -0400 | [diff] [blame] | 133 | static void OnFree(Register* r) { } |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 134 | }; |
Adlai Holler | 4cfbe53 | 2021-03-17 10:36:39 -0400 | [diff] [blame] | 135 | typedef SkTMultiMap<Register, GrScratchKey, FreePoolTraits> FreePoolMultiMap; |
Robert Phillips | 57aa367 | 2017-07-21 11:38:13 -0400 | [diff] [blame] | 136 | |
Adlai Holler | b51dd57 | 2021-04-15 11:01:46 -0400 | [diff] [blame^] | 137 | typedef SkTHashMap<uint32_t, Interval*, GrCheapHash> IntvlHash; |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 138 | |
Adlai Holler | 7df8d22 | 2021-03-19 12:27:49 -0400 | [diff] [blame] | 139 | struct UniqueKeyHash { |
| 140 | uint32_t operator()(const GrUniqueKey& key) const { return key.hash(); } |
| 141 | }; |
| 142 | typedef SkTHashMap<GrUniqueKey, Register*, UniqueKeyHash> UniqueKeyRegisterHash; |
| 143 | |
| 144 | // Each proxy – with some exceptions – is assigned a register. After all assignments are made, |
| 145 | // another pass is performed to instantiate and assign actual surfaces to the proxies. Right |
| 146 | // now these are performed in one call, but in the future they will be separable and the user |
| 147 | // will be able to query re: memory cost before committing to surface creation. |
Adlai Holler | 4cfbe53 | 2021-03-17 10:36:39 -0400 | [diff] [blame] | 148 | class Register { |
| 149 | public: |
Adlai Holler | 7df8d22 | 2021-03-19 12:27:49 -0400 | [diff] [blame] | 150 | // It's OK to pass an invalid scratch key iff the proxy has a unique key. |
Adlai Holler | ee2837b | 2021-04-09 16:52:48 -0400 | [diff] [blame] | 151 | Register(GrSurfaceProxy* originatingProxy, GrScratchKey, GrResourceProvider*); |
Adlai Holler | 4cfbe53 | 2021-03-17 10:36:39 -0400 | [diff] [blame] | 152 | |
Adlai Holler | 7df8d22 | 2021-03-19 12:27:49 -0400 | [diff] [blame] | 153 | const GrScratchKey& scratchKey() const { return fScratchKey; } |
| 154 | const GrUniqueKey& uniqueKey() const { return fOriginatingProxy->getUniqueKey(); } |
Adlai Holler | 4cfbe53 | 2021-03-17 10:36:39 -0400 | [diff] [blame] | 155 | |
Adlai Holler | cd2f96d | 2021-04-09 17:58:14 -0400 | [diff] [blame] | 156 | bool accountedForInBudget() const { return fAccountedForInBudget; } |
| 157 | void setAccountedForInBudget() { fAccountedForInBudget = true; } |
| 158 | |
Adlai Holler | ee2837b | 2021-04-09 16:52:48 -0400 | [diff] [blame] | 159 | GrSurface* existingSurface() const { return fExistingSurface.get(); } |
| 160 | |
Adlai Holler | 7df8d22 | 2021-03-19 12:27:49 -0400 | [diff] [blame] | 161 | // Can this register be used by other proxies after this one? |
| 162 | bool isRecyclable(const GrCaps&, GrSurfaceProxy* proxy, int knownUseCount) const; |
| 163 | |
| 164 | // Resolve the register allocation to an actual GrSurface. 'fOriginatingProxy' |
| 165 | // is used to cache the allocation when a given register is used by multiple |
| 166 | // proxies. |
| 167 | bool instantiateSurface(GrSurfaceProxy*, GrResourceProvider*); |
Adlai Holler | 4cfbe53 | 2021-03-17 10:36:39 -0400 | [diff] [blame] | 168 | |
| 169 | SkDEBUGCODE(uint32_t uniqueID() const { return fUniqueID; }) |
| 170 | |
| 171 | private: |
Adlai Holler | 7df8d22 | 2021-03-19 12:27:49 -0400 | [diff] [blame] | 172 | GrSurfaceProxy* fOriginatingProxy; |
| 173 | GrScratchKey fScratchKey; // free pool wants a reference to this. |
Adlai Holler | ee2837b | 2021-04-09 16:52:48 -0400 | [diff] [blame] | 174 | sk_sp<GrSurface> fExistingSurface; // queried from resource cache. may be null. |
Adlai Holler | cd2f96d | 2021-04-09 17:58:14 -0400 | [diff] [blame] | 175 | bool fAccountedForInBudget = false; |
Adlai Holler | 4cfbe53 | 2021-03-17 10:36:39 -0400 | [diff] [blame] | 176 | |
| 177 | #ifdef SK_DEBUG |
| 178 | uint32_t fUniqueID; |
| 179 | |
| 180 | static uint32_t CreateUniqueID(); |
| 181 | #endif |
| 182 | }; |
| 183 | |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 184 | class Interval { |
| 185 | public: |
| 186 | Interval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end) |
Adlai Holler | 7df8d22 | 2021-03-19 12:27:49 -0400 | [diff] [blame] | 187 | : fProxy(proxy) |
| 188 | , fStart(start) |
| 189 | , fEnd(end) { |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 190 | SkASSERT(proxy); |
Adlai Holler | 4cfbe53 | 2021-03-17 10:36:39 -0400 | [diff] [blame] | 191 | SkDEBUGCODE(fUniqueID = CreateUniqueID()); |
Robert Phillips | da1be46 | 2018-07-27 07:18:06 -0400 | [diff] [blame] | 192 | #if GR_TRACK_INTERVAL_CREATION |
Robert Phillips | 047d5bb | 2021-01-08 13:39:19 -0500 | [diff] [blame] | 193 | SkString proxyStr = proxy->dump(); |
| 194 | SkDebugf("New intvl %d: %s [%d, %d]\n", fUniqueID, proxyStr.c_str(), start, end); |
Robert Phillips | da1be46 | 2018-07-27 07:18:06 -0400 | [diff] [blame] | 195 | #endif |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 196 | } |
| 197 | |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 198 | const GrSurfaceProxy* proxy() const { return fProxy; } |
| 199 | GrSurfaceProxy* proxy() { return fProxy; } |
Robert Phillips | c73666f | 2019-04-24 08:49:48 -0400 | [diff] [blame] | 200 | |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 201 | unsigned int start() const { return fStart; } |
| 202 | unsigned int end() const { return fEnd; } |
Robert Phillips | c73666f | 2019-04-24 08:49:48 -0400 | [diff] [blame] | 203 | |
| 204 | void setNext(Interval* next) { fNext = next; } |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 205 | const Interval* next() const { return fNext; } |
| 206 | Interval* next() { return fNext; } |
| 207 | |
Adlai Holler | 4cfbe53 | 2021-03-17 10:36:39 -0400 | [diff] [blame] | 208 | Register* getRegister() const { return fRegister; } |
| 209 | void setRegister(Register* r) { fRegister = r; } |
| 210 | |
Robert Phillips | c73666f | 2019-04-24 08:49:48 -0400 | [diff] [blame] | 211 | void addUse() { fUses++; } |
Adlai Holler | 1143b1b | 2021-03-16 13:07:40 -0400 | [diff] [blame] | 212 | int uses() const { return fUses; } |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 213 | |
| 214 | void extendEnd(unsigned int newEnd) { |
Chris Dalton | 8816b93 | 2017-11-29 16:48:25 -0700 | [diff] [blame] | 215 | if (newEnd > fEnd) { |
| 216 | fEnd = newEnd; |
Robert Phillips | da1be46 | 2018-07-27 07:18:06 -0400 | [diff] [blame] | 217 | #if GR_TRACK_INTERVAL_CREATION |
| 218 | SkDebugf("intvl %d: extending from %d to %d\n", fUniqueID, fEnd, newEnd); |
| 219 | #endif |
Chris Dalton | 8816b93 | 2017-11-29 16:48:25 -0700 | [diff] [blame] | 220 | } |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 221 | } |
| 222 | |
Adlai Holler | 4cfbe53 | 2021-03-17 10:36:39 -0400 | [diff] [blame] | 223 | SkDEBUGCODE(uint32_t uniqueID() const { return fUniqueID; }) |
| 224 | |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 225 | private: |
Robert Phillips | 5b65a84 | 2017-11-13 15:48:12 -0500 | [diff] [blame] | 226 | GrSurfaceProxy* fProxy; |
Robert Phillips | 5b65a84 | 2017-11-13 15:48:12 -0500 | [diff] [blame] | 227 | unsigned int fStart; |
| 228 | unsigned int fEnd; |
Adlai Holler | 1143b1b | 2021-03-16 13:07:40 -0400 | [diff] [blame] | 229 | Interval* fNext = nullptr; |
Robert Phillips | c73666f | 2019-04-24 08:49:48 -0400 | [diff] [blame] | 230 | unsigned int fUses = 0; |
Adlai Holler | 4cfbe53 | 2021-03-17 10:36:39 -0400 | [diff] [blame] | 231 | Register* fRegister = nullptr; |
Robert Phillips | da1be46 | 2018-07-27 07:18:06 -0400 | [diff] [blame] | 232 | |
Adlai Holler | 4cfbe53 | 2021-03-17 10:36:39 -0400 | [diff] [blame] | 233 | #ifdef SK_DEBUG |
Robert Phillips | da1be46 | 2018-07-27 07:18:06 -0400 | [diff] [blame] | 234 | uint32_t fUniqueID; |
| 235 | |
Adlai Holler | da16367 | 2021-03-15 11:03:37 -0400 | [diff] [blame] | 236 | static uint32_t CreateUniqueID(); |
Robert Phillips | da1be46 | 2018-07-27 07:18:06 -0400 | [diff] [blame] | 237 | #endif |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 238 | }; |
| 239 | |
| 240 | class IntervalList { |
| 241 | public: |
| 242 | IntervalList() = default; |
Adlai Holler | b51dd57 | 2021-04-15 11:01:46 -0400 | [diff] [blame^] | 243 | // N.B. No need for a destructor – the arena allocator will clean up for us. |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 244 | |
Robert Phillips | df25e3a | 2018-08-08 12:48:40 -0400 | [diff] [blame] | 245 | bool empty() const { |
| 246 | SkASSERT(SkToBool(fHead) == SkToBool(fTail)); |
| 247 | return !SkToBool(fHead); |
| 248 | } |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 249 | const Interval* peekHead() const { return fHead; } |
Robert Phillips | c73666f | 2019-04-24 08:49:48 -0400 | [diff] [blame] | 250 | Interval* peekHead() { return fHead; } |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 251 | Interval* popHead(); |
| 252 | void insertByIncreasingStart(Interval*); |
| 253 | void insertByIncreasingEnd(Interval*); |
| 254 | |
| 255 | private: |
Robert Phillips | df25e3a | 2018-08-08 12:48:40 -0400 | [diff] [blame] | 256 | SkDEBUGCODE(void validate() const;) |
| 257 | |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 258 | Interval* fHead = nullptr; |
Robert Phillips | df25e3a | 2018-08-08 12:48:40 -0400 | [diff] [blame] | 259 | Interval* fTail = nullptr; |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 260 | }; |
| 261 | |
Brian Salomon | 309b053 | 2018-10-10 17:22:00 -0400 | [diff] [blame] | 262 | // Compositing use cases can create > 80 intervals. |
| 263 | static const int kInitialArenaSize = 128 * sizeof(Interval); |
Robert Phillips | 8186cbe | 2017-11-01 17:32:39 -0400 | [diff] [blame] | 264 | |
Adlai Holler | ca1137b | 2021-04-08 11:39:55 -0400 | [diff] [blame] | 265 | GrDirectContext* fDContext; |
Brian Salomon | 577aa0f | 2018-11-30 13:32:23 -0500 | [diff] [blame] | 266 | FreePoolMultiMap fFreePool; // Recently created/used GrSurfaces |
| 267 | IntvlHash fIntvlHash; // All the intervals, hashed by proxyID |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 268 | |
Brian Salomon | 577aa0f | 2018-11-30 13:32:23 -0500 | [diff] [blame] | 269 | IntervalList fIntvlList; // All the intervals sorted by increasing start |
| 270 | IntervalList fActiveIntvls; // List of live intervals during assignment |
Robert Phillips | c476e5d | 2019-03-26 14:50:08 -0400 | [diff] [blame] | 271 | // (sorted by increasing end) |
Adlai Holler | 7df8d22 | 2021-03-19 12:27:49 -0400 | [diff] [blame] | 272 | IntervalList fFinishedIntvls; // All the completed intervals |
| 273 | // (sorted by increasing start) |
| 274 | UniqueKeyRegisterHash fUniqueKeyRegisters; |
Robert Phillips | c476e5d | 2019-03-26 14:50:08 -0400 | [diff] [blame] | 275 | unsigned int fNumOps = 0; |
Robert Phillips | 8186cbe | 2017-11-01 17:32:39 -0400 | [diff] [blame] | 276 | |
Adlai Holler | ee2837b | 2021-04-09 16:52:48 -0400 | [diff] [blame] | 277 | SkDEBUGCODE(bool fPlanned = false;) |
Brian Salomon | 577aa0f | 2018-11-30 13:32:23 -0500 | [diff] [blame] | 278 | SkDEBUGCODE(bool fAssigned = false;) |
Robert Phillips | eafd48a | 2017-11-16 07:52:08 -0500 | [diff] [blame] | 279 | |
Adlai Holler | b51dd57 | 2021-04-15 11:01:46 -0400 | [diff] [blame^] | 280 | SkSTArenaAllocWithReset<kInitialArenaSize> fInternalAllocator; // intervals & registers |
| 281 | bool fFailedInstantiation = false; |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 282 | }; |
| 283 | |
| 284 | #endif // GrResourceAllocator_DEFINED |