blob: e702d610512fbaad55c96db0a7da6400f0c247b8 [file] [log] [blame]
Robert Phillips5af44de2017-07-18 14:49:38 -04001/*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrResourceAllocator_DEFINED
9#define GrResourceAllocator_DEFINED
10
Adlai Holler539db2f2021-03-16 09:45:05 -040011#include "include/private/SkTHash.h"
12
Mike Kleinc0bd9f92019-04-23 12:05:21 -050013#include "src/gpu/GrGpuResourcePriv.h"
Adlai Holler539db2f2021-03-16 09:45:05 -040014#include "src/gpu/GrHashMapWithCache.h"
Greg Daniel456f9b52020-03-05 19:14:18 +000015#include "src/gpu/GrSurface.h"
Greg Danielf91aeb22019-06-18 09:58:02 -040016#include "src/gpu/GrSurfaceProxy.h"
Robert Phillips8186cbe2017-11-01 17:32:39 -040017
Ben Wagner729a23f2019-05-17 16:29:34 -040018#include "src/core/SkArenaAlloc.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050019#include "src/core/SkTMultiMap.h"
Robert Phillips5af44de2017-07-18 14:49:38 -040020
21class GrResourceProvider;
22
Robert Phillips715d08c2018-07-18 13:56:48 -040023// Print out explicit allocation information
24#define GR_ALLOCATION_SPEW 0
25
Robert Phillipsda1be462018-07-27 07:18:06 -040026// Print out information about interval creation
27#define GR_TRACK_INTERVAL_CREATION 0
28
Robert Phillips5af44de2017-07-18 14:49:38 -040029/*
30 * The ResourceAllocator explicitly distributes GPU resources at flush time. It operates by
31 * being given the usage intervals of the various proxies. It keeps these intervals in a singly
32 * linked list sorted by increasing start index. (It also maintains a hash table from proxyID
33 * to interval to find proxy reuse). When it comes time to allocate the resources it
34 * traverses the sorted list and:
35 * removes intervals from the active list that have completed (returning their GrSurfaces
36 * to the free pool)
37
38 * allocates a new resource (preferably from the free pool) for the new interval
39 * adds the new interval to the active list (that is sorted by increasing end index)
40 *
41 * Note: the op indices (used in the usage intervals) come from the order of the ops in
Greg Danielf41b2bd2019-08-22 16:19:24 -040042 * their opsTasks after the opsTask DAG has been linearized.
Robert Phillips82774f82019-06-20 14:38:27 -040043 *
44 *************************************************************************************************
45 * How does instantiation failure handling work when explicitly allocating?
46 *
47 * In the gather usage intervals pass all the GrSurfaceProxies used in the flush should be
Greg Danielf41b2bd2019-08-22 16:19:24 -040048 * gathered (i.e., in GrOpsTask::gatherProxyIntervals).
Robert Phillips82774f82019-06-20 14:38:27 -040049 *
50 * The allocator will churn through this list but could fail anywhere.
51 *
52 * Allocation failure handling occurs at two levels:
53 *
Greg Danielf41b2bd2019-08-22 16:19:24 -040054 * 1) If the GrSurface backing an opsTask fails to allocate then the entire opsTask is dropped.
Robert Phillips82774f82019-06-20 14:38:27 -040055 *
56 * 2) If an individual GrSurfaceProxy fails to allocate then any ops that use it are dropped
Greg Danielf41b2bd2019-08-22 16:19:24 -040057 * (via GrOpsTask::purgeOpsWithUninstantiatedProxies)
Robert Phillips82774f82019-06-20 14:38:27 -040058 *
Greg Danielf41b2bd2019-08-22 16:19:24 -040059 * The pass to determine which ops to drop is a bit laborious so we only check the opsTasks and
Robert Phillips82774f82019-06-20 14:38:27 -040060 * individual ops when something goes wrong in allocation (i.e., when the return code from
61 * GrResourceAllocator::assign is bad)
62 *
63 * All together this means we should never attempt to draw an op which is missing some
64 * required GrSurface.
65 *
66 * One wrinkle in this plan is that promise images are fulfilled during the gather interval pass.
67 * If any of the promise images fail at this stage then the allocator is set into an error
68 * state and all allocations are then scanned for failures during the main allocation pass.
Robert Phillips5af44de2017-07-18 14:49:38 -040069 */
70class GrResourceAllocator {
71public:
Brian Salomonbeb7f522019-08-30 16:19:42 -040072 GrResourceAllocator(GrResourceProvider* resourceProvider SkDEBUGCODE(, int numOpsTasks))
Adlai Hollerc616e1c2021-02-11 15:18:17 -050073 : fResourceProvider(resourceProvider) {}
Robert Phillips5af44de2017-07-18 14:49:38 -040074
Robert Phillips5b65a842017-11-13 15:48:12 -050075 ~GrResourceAllocator();
76
Robert Phillips5af44de2017-07-18 14:49:38 -040077 unsigned int curOp() const { return fNumOps; }
78 void incOps() { fNumOps++; }
Robert Phillips5af44de2017-07-18 14:49:38 -040079
Adlai Holler7f7a5df2021-02-09 17:41:10 +000080 /** Indicates whether a given call to addInterval represents an actual usage of the
81 * provided proxy. This is mainly here to accommodate deferred proxies attached to opsTasks.
82 * In that case we need to create an extra long interval for them (due to the upload) but
83 * don't want to count that usage/reference towards the proxy's recyclability.
84 */
85 enum class ActualUse : bool {
86 kNo = false,
87 kYes = true
88 };
89
Robert Phillipseafd48a2017-11-16 07:52:08 -050090 // Add a usage interval from 'start' to 'end' inclusive. This is usually used for renderTargets.
Robert Phillips5af44de2017-07-18 14:49:38 -040091 // If an existing interval already exists it will be expanded to include the new range.
Adlai Holler7f7a5df2021-02-09 17:41:10 +000092 void addInterval(GrSurfaceProxy*, unsigned int start, unsigned int end, ActualUse actualUse
Chris Dalton8816b932017-11-29 16:48:25 -070093 SkDEBUGCODE(, bool isDirectDstRead = false));
Robert Phillips5af44de2017-07-18 14:49:38 -040094
Adlai Holler19fd5142021-03-08 10:19:30 -070095 // Assign resources to all proxies. Returns whether the assignment was successful.
96 bool assign();
Robert Phillips5af44de2017-07-18 14:49:38 -040097
Robert Phillips715d08c2018-07-18 13:56:48 -040098#if GR_ALLOCATION_SPEW
99 void dumpIntervals();
100#endif
101
Robert Phillips5af44de2017-07-18 14:49:38 -0400102private:
103 class Interval;
Adlai Holler4cfbe532021-03-17 10:36:39 -0400104 class Register;
Robert Phillips5af44de2017-07-18 14:49:38 -0400105
106 // Remove dead intervals from the active list
107 void expire(unsigned int curIndex);
108
109 // These two methods wrap the interactions with the free pool
Adlai Holler4cfbe532021-03-17 10:36:39 -0400110 void recycleRegister(Register* r);
111 Register* findRegisterFor(const GrSurfaceProxy* proxy);
Robert Phillips5af44de2017-07-18 14:49:38 -0400112
Robert Phillips57aa3672017-07-21 11:38:13 -0400113 struct FreePoolTraits {
Adlai Holler4cfbe532021-03-17 10:36:39 -0400114 static const GrScratchKey& GetKey(const Register& r) {
115 return r.scratchKey();
Robert Phillips57aa3672017-07-21 11:38:13 -0400116 }
Robert Phillips5af44de2017-07-18 14:49:38 -0400117
Robert Phillips57aa3672017-07-21 11:38:13 -0400118 static uint32_t Hash(const GrScratchKey& key) { return key.hash(); }
Adlai Holler4cfbe532021-03-17 10:36:39 -0400119 static void OnFree(Register* r) { }
Robert Phillips5af44de2017-07-18 14:49:38 -0400120 };
Adlai Holler4cfbe532021-03-17 10:36:39 -0400121 typedef SkTMultiMap<Register, GrScratchKey, FreePoolTraits> FreePoolMultiMap;
Robert Phillips57aa3672017-07-21 11:38:13 -0400122
Adlai Holler539db2f2021-03-16 09:45:05 -0400123 typedef SkTHashMap<uint32_t, Interval*, GrCheapHash> IntvlHash;
Robert Phillips5af44de2017-07-18 14:49:38 -0400124
Adlai Holler4cfbe532021-03-17 10:36:39 -0400125 // Right now this is just a wrapper around an actual SkSurface.
126 // In the future this will be a placeholder for an SkSurface that will be
127 // created later, after the user of the resource allocator commits to
128 // a specific series of intervals.
129 class Register {
130 public:
131 Register(sk_sp<GrSurface> s) : fSurface(std::move(s)) {
132 SkASSERT(fSurface);
133 SkDEBUGCODE(fUniqueID = CreateUniqueID();)
134 }
135 ~Register() = default;
136
137 const GrScratchKey& scratchKey() const {
138 return fSurface->resourcePriv().getScratchKey();
139 }
140
141 GrSurface* surface() const { return fSurface.get(); }
142 sk_sp<GrSurface> refSurface() const { return fSurface; }
143
144 SkDEBUGCODE(uint32_t uniqueID() const { return fUniqueID; })
145
146 private:
147 sk_sp<GrSurface> fSurface;
148
149#ifdef SK_DEBUG
150 uint32_t fUniqueID;
151
152 static uint32_t CreateUniqueID();
153#endif
154 };
155
Robert Phillips5af44de2017-07-18 14:49:38 -0400156 class Interval {
157 public:
158 Interval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end)
159 : fProxy(proxy)
Robert Phillips5af44de2017-07-18 14:49:38 -0400160 , fStart(start)
Adlai Holler1143b1b2021-03-16 13:07:40 -0400161 , fEnd(end) {
Robert Phillips5af44de2017-07-18 14:49:38 -0400162 SkASSERT(proxy);
Adlai Holler4cfbe532021-03-17 10:36:39 -0400163 SkDEBUGCODE(fUniqueID = CreateUniqueID());
Robert Phillipsda1be462018-07-27 07:18:06 -0400164#if GR_TRACK_INTERVAL_CREATION
Robert Phillips047d5bb2021-01-08 13:39:19 -0500165 SkString proxyStr = proxy->dump();
166 SkDebugf("New intvl %d: %s [%d, %d]\n", fUniqueID, proxyStr.c_str(), start, end);
Robert Phillipsda1be462018-07-27 07:18:06 -0400167#endif
Robert Phillips5af44de2017-07-18 14:49:38 -0400168 }
169
Robert Phillipsf8e25022017-11-08 15:24:31 -0500170 const GrSurfaceProxy* proxy() const { return fProxy; }
171 GrSurfaceProxy* proxy() { return fProxy; }
Robert Phillipsc73666f2019-04-24 08:49:48 -0400172
Robert Phillipsf8e25022017-11-08 15:24:31 -0500173 unsigned int start() const { return fStart; }
174 unsigned int end() const { return fEnd; }
Robert Phillipsc73666f2019-04-24 08:49:48 -0400175
176 void setNext(Interval* next) { fNext = next; }
Robert Phillipsf8e25022017-11-08 15:24:31 -0500177 const Interval* next() const { return fNext; }
178 Interval* next() { return fNext; }
179
Adlai Holler4cfbe532021-03-17 10:36:39 -0400180 Register* getRegister() const { return fRegister; }
181 void setRegister(Register* r) { fRegister = r; }
182
Adlai Holler1143b1b2021-03-16 13:07:40 -0400183 bool isSurfaceRecyclable() const;
Robert Phillipsc73666f2019-04-24 08:49:48 -0400184
185 void addUse() { fUses++; }
Adlai Holler1143b1b2021-03-16 13:07:40 -0400186 int uses() const { return fUses; }
Robert Phillipsf8e25022017-11-08 15:24:31 -0500187
188 void extendEnd(unsigned int newEnd) {
Chris Dalton8816b932017-11-29 16:48:25 -0700189 if (newEnd > fEnd) {
190 fEnd = newEnd;
Robert Phillipsda1be462018-07-27 07:18:06 -0400191#if GR_TRACK_INTERVAL_CREATION
192 SkDebugf("intvl %d: extending from %d to %d\n", fUniqueID, fEnd, newEnd);
193#endif
Chris Dalton8816b932017-11-29 16:48:25 -0700194 }
Robert Phillipsf8e25022017-11-08 15:24:31 -0500195 }
196
Adlai Holler4cfbe532021-03-17 10:36:39 -0400197 SkDEBUGCODE(uint32_t uniqueID() const { return fUniqueID; })
198
Robert Phillipsf8e25022017-11-08 15:24:31 -0500199 private:
Robert Phillips5b65a842017-11-13 15:48:12 -0500200 GrSurfaceProxy* fProxy;
Robert Phillips5b65a842017-11-13 15:48:12 -0500201 unsigned int fStart;
202 unsigned int fEnd;
Adlai Holler1143b1b2021-03-16 13:07:40 -0400203 Interval* fNext = nullptr;
Robert Phillipsc73666f2019-04-24 08:49:48 -0400204 unsigned int fUses = 0;
Adlai Holler4cfbe532021-03-17 10:36:39 -0400205 Register* fRegister = nullptr;
Robert Phillipsda1be462018-07-27 07:18:06 -0400206
Adlai Holler4cfbe532021-03-17 10:36:39 -0400207#ifdef SK_DEBUG
Robert Phillipsda1be462018-07-27 07:18:06 -0400208 uint32_t fUniqueID;
209
Adlai Hollerda163672021-03-15 11:03:37 -0400210 static uint32_t CreateUniqueID();
Robert Phillipsda1be462018-07-27 07:18:06 -0400211#endif
Robert Phillips5af44de2017-07-18 14:49:38 -0400212 };
213
214 class IntervalList {
215 public:
216 IntervalList() = default;
217 ~IntervalList() {
Robert Phillips8186cbe2017-11-01 17:32:39 -0400218 // The only time we delete an IntervalList is in the GrResourceAllocator dtor.
219 // Since the arena allocator will clean up for us we don't bother here.
Robert Phillips5af44de2017-07-18 14:49:38 -0400220 }
221
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400222 bool empty() const {
223 SkASSERT(SkToBool(fHead) == SkToBool(fTail));
224 return !SkToBool(fHead);
225 }
Robert Phillips5af44de2017-07-18 14:49:38 -0400226 const Interval* peekHead() const { return fHead; }
Robert Phillipsc73666f2019-04-24 08:49:48 -0400227 Interval* peekHead() { return fHead; }
Robert Phillips5af44de2017-07-18 14:49:38 -0400228 Interval* popHead();
229 void insertByIncreasingStart(Interval*);
230 void insertByIncreasingEnd(Interval*);
231
232 private:
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400233 SkDEBUGCODE(void validate() const;)
234
Robert Phillips5af44de2017-07-18 14:49:38 -0400235 Interval* fHead = nullptr;
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400236 Interval* fTail = nullptr;
Robert Phillips5af44de2017-07-18 14:49:38 -0400237 };
238
Brian Salomon309b0532018-10-10 17:22:00 -0400239 // Compositing use cases can create > 80 intervals.
240 static const int kInitialArenaSize = 128 * sizeof(Interval);
Robert Phillips8186cbe2017-11-01 17:32:39 -0400241
Brian Salomon577aa0f2018-11-30 13:32:23 -0500242 GrResourceProvider* fResourceProvider;
Brian Salomon577aa0f2018-11-30 13:32:23 -0500243 FreePoolMultiMap fFreePool; // Recently created/used GrSurfaces
244 IntvlHash fIntvlHash; // All the intervals, hashed by proxyID
Robert Phillips5af44de2017-07-18 14:49:38 -0400245
Brian Salomon577aa0f2018-11-30 13:32:23 -0500246 IntervalList fIntvlList; // All the intervals sorted by increasing start
247 IntervalList fActiveIntvls; // List of live intervals during assignment
Robert Phillipsc476e5d2019-03-26 14:50:08 -0400248 // (sorted by increasing end)
249 unsigned int fNumOps = 0;
Robert Phillips8186cbe2017-11-01 17:32:39 -0400250
Brian Salomon577aa0f2018-11-30 13:32:23 -0500251 SkDEBUGCODE(bool fAssigned = false;)
Robert Phillipseafd48a2017-11-16 07:52:08 -0500252
Adlai Holler4cfbe532021-03-17 10:36:39 -0400253 SkSTArenaAlloc<kInitialArenaSize> fInternalAllocator; // intervals & registers live here
Adlai Holler19fd5142021-03-08 10:19:30 -0700254 bool fFailedInstantiation = false;
Robert Phillips5af44de2017-07-18 14:49:38 -0400255};
256
257#endif // GrResourceAllocator_DEFINED