blob: ec9eab204744ffa0a3be6e0af6ee054400291e49 [file] [log] [blame]
Robert Phillips5af44de2017-07-18 14:49:38 -04001/*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrResourceAllocator_DEFINED
9#define GrResourceAllocator_DEFINED
10
Mike Kleinc0bd9f92019-04-23 12:05:21 -050011#include "src/gpu/GrGpuResourcePriv.h"
Greg Daniel456f9b52020-03-05 19:14:18 +000012#include "src/gpu/GrSurface.h"
Greg Danielf91aeb22019-06-18 09:58:02 -040013#include "src/gpu/GrSurfaceProxy.h"
Robert Phillips8186cbe2017-11-01 17:32:39 -040014
Ben Wagner729a23f2019-05-17 16:29:34 -040015#include "src/core/SkArenaAlloc.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050016#include "src/core/SkTDynamicHash.h"
17#include "src/core/SkTMultiMap.h"
Robert Phillips5af44de2017-07-18 14:49:38 -040018
19class GrResourceProvider;
20
Robert Phillips715d08c2018-07-18 13:56:48 -040021// Print out explicit allocation information
22#define GR_ALLOCATION_SPEW 0
23
Robert Phillipsda1be462018-07-27 07:18:06 -040024// Print out information about interval creation
25#define GR_TRACK_INTERVAL_CREATION 0
26
Robert Phillips5af44de2017-07-18 14:49:38 -040027/*
28 * The ResourceAllocator explicitly distributes GPU resources at flush time. It operates by
29 * being given the usage intervals of the various proxies. It keeps these intervals in a singly
30 * linked list sorted by increasing start index. (It also maintains a hash table from proxyID
31 * to interval to find proxy reuse). When it comes time to allocate the resources it
32 * traverses the sorted list and:
33 * removes intervals from the active list that have completed (returning their GrSurfaces
34 * to the free pool)
35
36 * allocates a new resource (preferably from the free pool) for the new interval
37 * adds the new interval to the active list (that is sorted by increasing end index)
38 *
39 * Note: the op indices (used in the usage intervals) come from the order of the ops in
Greg Danielf41b2bd2019-08-22 16:19:24 -040040 * their opsTasks after the opsTask DAG has been linearized.
Robert Phillips82774f82019-06-20 14:38:27 -040041 *
42 *************************************************************************************************
43 * How does instantiation failure handling work when explicitly allocating?
44 *
45 * In the gather usage intervals pass all the GrSurfaceProxies used in the flush should be
Greg Danielf41b2bd2019-08-22 16:19:24 -040046 * gathered (i.e., in GrOpsTask::gatherProxyIntervals).
Robert Phillips82774f82019-06-20 14:38:27 -040047 *
48 * The allocator will churn through this list but could fail anywhere.
49 *
50 * Allocation failure handling occurs at two levels:
51 *
Greg Danielf41b2bd2019-08-22 16:19:24 -040052 * 1) If the GrSurface backing an opsTask fails to allocate then the entire opsTask is dropped.
Robert Phillips82774f82019-06-20 14:38:27 -040053 *
54 * 2) If an individual GrSurfaceProxy fails to allocate then any ops that use it are dropped
Greg Danielf41b2bd2019-08-22 16:19:24 -040055 * (via GrOpsTask::purgeOpsWithUninstantiatedProxies)
Robert Phillips82774f82019-06-20 14:38:27 -040056 *
Greg Danielf41b2bd2019-08-22 16:19:24 -040057 * The pass to determine which ops to drop is a bit laborious so we only check the opsTasks and
Robert Phillips82774f82019-06-20 14:38:27 -040058 * individual ops when something goes wrong in allocation (i.e., when the return code from
59 * GrResourceAllocator::assign is bad)
60 *
61 * All together this means we should never attempt to draw an op which is missing some
62 * required GrSurface.
63 *
64 * One wrinkle in this plan is that promise images are fulfilled during the gather interval pass.
65 * If any of the promise images fail at this stage then the allocator is set into an error
66 * state and all allocations are then scanned for failures during the main allocation pass.
Robert Phillips5af44de2017-07-18 14:49:38 -040067 */
68class GrResourceAllocator {
69public:
Brian Salomonbeb7f522019-08-30 16:19:42 -040070 GrResourceAllocator(GrResourceProvider* resourceProvider SkDEBUGCODE(, int numOpsTasks))
71 : fResourceProvider(resourceProvider) SkDEBUGCODE(, fNumOpsTasks(numOpsTasks)) {}
Robert Phillips5af44de2017-07-18 14:49:38 -040072
Robert Phillips5b65a842017-11-13 15:48:12 -050073 ~GrResourceAllocator();
74
Robert Phillips5af44de2017-07-18 14:49:38 -040075 unsigned int curOp() const { return fNumOps; }
76 void incOps() { fNumOps++; }
Robert Phillips5af44de2017-07-18 14:49:38 -040077
Robert Phillipsc73666f2019-04-24 08:49:48 -040078 /** Indicates whether a given call to addInterval represents an actual usage of the
Ravi Mistry2c98edf2020-12-10 11:57:59 -050079 * provided proxy. This is mainly here to accommodate deferred proxies attached to opsTasks.
Robert Phillipsc73666f2019-04-24 08:49:48 -040080 * In that case we need to create an extra long interval for them (due to the upload) but
81 * don't want to count that usage/reference towards the proxy's recyclability.
82 */
83 enum class ActualUse : bool {
84 kNo = false,
85 kYes = true
86 };
87
Robert Phillipseafd48a2017-11-16 07:52:08 -050088 // Add a usage interval from 'start' to 'end' inclusive. This is usually used for renderTargets.
Robert Phillips5af44de2017-07-18 14:49:38 -040089 // If an existing interval already exists it will be expanded to include the new range.
Robert Phillipsc73666f2019-04-24 08:49:48 -040090 void addInterval(GrSurfaceProxy*, unsigned int start, unsigned int end, ActualUse actualUse
Chris Dalton8816b932017-11-29 16:48:25 -070091 SkDEBUGCODE(, bool isDirectDstRead = false));
Robert Phillips5af44de2017-07-18 14:49:38 -040092
Greg Danielaa3dfbe2018-01-29 10:34:25 -050093 enum class AssignError {
94 kNoError,
95 kFailedProxyInstantiation
96 };
97
Greg Danielf41b2bd2019-08-22 16:19:24 -040098 // Returns true when the opsTasks from 'startIndex' to 'stopIndex' should be executed;
Robert Phillipseafd48a2017-11-16 07:52:08 -050099 // false when nothing remains to be executed.
Greg Danielaa3dfbe2018-01-29 10:34:25 -0500100 // If any proxy fails to instantiate, the AssignError will be set to kFailedProxyInstantiation.
101 // If this happens, the caller should remove all ops which reference an uninstantiated proxy.
Greg Danielf41b2bd2019-08-22 16:19:24 -0400102 // This is used to execute a portion of the queued opsTasks in order to reduce the total
Robert Phillipseafd48a2017-11-16 07:52:08 -0500103 // amount of GPU resources required.
Brian Salomon577aa0f2018-11-30 13:32:23 -0500104 bool assign(int* startIndex, int* stopIndex, AssignError* outError);
Robert Phillipseafd48a2017-11-16 07:52:08 -0500105
Robert Phillipsc73666f2019-04-24 08:49:48 -0400106 void determineRecyclability();
Greg Danielf41b2bd2019-08-22 16:19:24 -0400107 void markEndOfOpsTask(int opsTaskIndex);
Robert Phillips5af44de2017-07-18 14:49:38 -0400108
Robert Phillips715d08c2018-07-18 13:56:48 -0400109#if GR_ALLOCATION_SPEW
110 void dumpIntervals();
111#endif
112
Robert Phillips5af44de2017-07-18 14:49:38 -0400113private:
114 class Interval;
115
116 // Remove dead intervals from the active list
117 void expire(unsigned int curIndex);
118
Greg Danielf41b2bd2019-08-22 16:19:24 -0400119 bool onOpsTaskBoundary() const;
Robert Phillipsc476e5d2019-03-26 14:50:08 -0400120 void forceIntermediateFlush(int* stopIndex);
121
Robert Phillips5af44de2017-07-18 14:49:38 -0400122 // These two methods wrap the interactions with the free pool
Robert Phillips715d08c2018-07-18 13:56:48 -0400123 void recycleSurface(sk_sp<GrSurface> surface);
Chris Dalton0b68dda2019-11-07 21:08:03 -0700124 sk_sp<GrSurface> findSurfaceFor(const GrSurfaceProxy* proxy);
Robert Phillips5af44de2017-07-18 14:49:38 -0400125
Robert Phillips57aa3672017-07-21 11:38:13 -0400126 struct FreePoolTraits {
127 static const GrScratchKey& GetKey(const GrSurface& s) {
128 return s.resourcePriv().getScratchKey();
129 }
Robert Phillips5af44de2017-07-18 14:49:38 -0400130
Robert Phillips57aa3672017-07-21 11:38:13 -0400131 static uint32_t Hash(const GrScratchKey& key) { return key.hash(); }
Robert Phillipsf8e25022017-11-08 15:24:31 -0500132 static void OnFree(GrSurface* s) { s->unref(); }
Robert Phillips5af44de2017-07-18 14:49:38 -0400133 };
Robert Phillips57aa3672017-07-21 11:38:13 -0400134 typedef SkTMultiMap<GrSurface, GrScratchKey, FreePoolTraits> FreePoolMultiMap;
135
Robert Phillips5af44de2017-07-18 14:49:38 -0400136 typedef SkTDynamicHash<Interval, unsigned int> IntvlHash;
137
138 class Interval {
139 public:
140 Interval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end)
141 : fProxy(proxy)
142 , fProxyID(proxy->uniqueID().asUInt())
143 , fStart(start)
144 , fEnd(end)
145 , fNext(nullptr) {
146 SkASSERT(proxy);
Robert Phillipsda1be462018-07-27 07:18:06 -0400147#if GR_TRACK_INTERVAL_CREATION
148 fUniqueID = CreateUniqueID();
149 SkDebugf("New intvl %d: proxyID: %d [ %d, %d ]\n",
150 fUniqueID, proxy->uniqueID().asUInt(), start, end);
151#endif
Robert Phillips5af44de2017-07-18 14:49:38 -0400152 }
153
Robert Phillips39667382019-04-17 16:03:30 -0400154 // Used when recycling an interval
Robert Phillips8186cbe2017-11-01 17:32:39 -0400155 void resetTo(GrSurfaceProxy* proxy, unsigned int start, unsigned int end) {
156 SkASSERT(proxy);
Robert Phillips39667382019-04-17 16:03:30 -0400157 SkASSERT(!fProxy && !fNext);
Robert Phillips8186cbe2017-11-01 17:32:39 -0400158
Robert Phillipsc73666f2019-04-24 08:49:48 -0400159 fUses = 0;
Robert Phillips8186cbe2017-11-01 17:32:39 -0400160 fProxy = proxy;
161 fProxyID = proxy->uniqueID().asUInt();
162 fStart = start;
163 fEnd = end;
164 fNext = nullptr;
Robert Phillipsda1be462018-07-27 07:18:06 -0400165#if GR_TRACK_INTERVAL_CREATION
166 fUniqueID = CreateUniqueID();
167 SkDebugf("New intvl %d: proxyID: %d [ %d, %d ]\n",
168 fUniqueID, proxy->uniqueID().asUInt(), start, end);
169#endif
Robert Phillips8186cbe2017-11-01 17:32:39 -0400170 }
171
Robert Phillips5b65a842017-11-13 15:48:12 -0500172 ~Interval() {
173 SkASSERT(!fAssignedSurface);
174 }
175
Robert Phillipsf8e25022017-11-08 15:24:31 -0500176 const GrSurfaceProxy* proxy() const { return fProxy; }
177 GrSurfaceProxy* proxy() { return fProxy; }
Robert Phillipsc73666f2019-04-24 08:49:48 -0400178
Robert Phillipsf8e25022017-11-08 15:24:31 -0500179 unsigned int start() const { return fStart; }
180 unsigned int end() const { return fEnd; }
Robert Phillipsc73666f2019-04-24 08:49:48 -0400181
182 void setNext(Interval* next) { fNext = next; }
Robert Phillipsf8e25022017-11-08 15:24:31 -0500183 const Interval* next() const { return fNext; }
184 Interval* next() { return fNext; }
185
Robert Phillipsc73666f2019-04-24 08:49:48 -0400186 void markAsRecyclable() { fIsRecyclable = true;}
187 bool isRecyclable() const { return fIsRecyclable; }
188
189 void addUse() { fUses++; }
190 int uses() { return fUses; }
Robert Phillipsf8e25022017-11-08 15:24:31 -0500191
192 void extendEnd(unsigned int newEnd) {
Chris Dalton8816b932017-11-29 16:48:25 -0700193 if (newEnd > fEnd) {
194 fEnd = newEnd;
Robert Phillipsda1be462018-07-27 07:18:06 -0400195#if GR_TRACK_INTERVAL_CREATION
196 SkDebugf("intvl %d: extending from %d to %d\n", fUniqueID, fEnd, newEnd);
197#endif
Chris Dalton8816b932017-11-29 16:48:25 -0700198 }
Robert Phillipsf8e25022017-11-08 15:24:31 -0500199 }
200
Robert Phillips5b65a842017-11-13 15:48:12 -0500201 void assign(sk_sp<GrSurface>);
Ben Wagner5d1adbf2018-05-28 13:35:39 -0400202 bool wasAssignedSurface() const { return fAssignedSurface != nullptr; }
Robert Phillips5b65a842017-11-13 15:48:12 -0500203 sk_sp<GrSurface> detachSurface() { return std::move(fAssignedSurface); }
204
Robert Phillips5af44de2017-07-18 14:49:38 -0400205 // for SkTDynamicHash
206 static const uint32_t& GetKey(const Interval& intvl) {
207 return intvl.fProxyID;
208 }
209 static uint32_t Hash(const uint32_t& key) { return key; }
210
Robert Phillipsf8e25022017-11-08 15:24:31 -0500211 private:
Robert Phillips5b65a842017-11-13 15:48:12 -0500212 sk_sp<GrSurface> fAssignedSurface;
213 GrSurfaceProxy* fProxy;
214 uint32_t fProxyID; // This is here b.c. DynamicHash requires a ref to the key
215 unsigned int fStart;
216 unsigned int fEnd;
217 Interval* fNext;
Robert Phillipsc73666f2019-04-24 08:49:48 -0400218 unsigned int fUses = 0;
219 bool fIsRecyclable = false;
Robert Phillipsda1be462018-07-27 07:18:06 -0400220
221#if GR_TRACK_INTERVAL_CREATION
222 uint32_t fUniqueID;
223
224 uint32_t CreateUniqueID();
225#endif
Robert Phillips5af44de2017-07-18 14:49:38 -0400226 };
227
228 class IntervalList {
229 public:
230 IntervalList() = default;
231 ~IntervalList() {
Robert Phillips8186cbe2017-11-01 17:32:39 -0400232 // The only time we delete an IntervalList is in the GrResourceAllocator dtor.
233 // Since the arena allocator will clean up for us we don't bother here.
Robert Phillips5af44de2017-07-18 14:49:38 -0400234 }
235
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400236 bool empty() const {
237 SkASSERT(SkToBool(fHead) == SkToBool(fTail));
238 return !SkToBool(fHead);
239 }
Robert Phillips5af44de2017-07-18 14:49:38 -0400240 const Interval* peekHead() const { return fHead; }
Robert Phillipsc73666f2019-04-24 08:49:48 -0400241 Interval* peekHead() { return fHead; }
Robert Phillips5af44de2017-07-18 14:49:38 -0400242 Interval* popHead();
243 void insertByIncreasingStart(Interval*);
244 void insertByIncreasingEnd(Interval*);
Robert Phillips4150eea2018-02-07 17:08:21 -0500245 Interval* detachAll();
Robert Phillips5af44de2017-07-18 14:49:38 -0400246
247 private:
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400248 SkDEBUGCODE(void validate() const;)
249
Robert Phillips5af44de2017-07-18 14:49:38 -0400250 Interval* fHead = nullptr;
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400251 Interval* fTail = nullptr;
Robert Phillips5af44de2017-07-18 14:49:38 -0400252 };
253
Brian Salomon309b0532018-10-10 17:22:00 -0400254 // Compositing use cases can create > 80 intervals.
255 static const int kInitialArenaSize = 128 * sizeof(Interval);
Robert Phillips8186cbe2017-11-01 17:32:39 -0400256
Brian Salomon577aa0f2018-11-30 13:32:23 -0500257 GrResourceProvider* fResourceProvider;
Brian Salomon577aa0f2018-11-30 13:32:23 -0500258 FreePoolMultiMap fFreePool; // Recently created/used GrSurfaces
259 IntvlHash fIntvlHash; // All the intervals, hashed by proxyID
Robert Phillips5af44de2017-07-18 14:49:38 -0400260
Brian Salomon577aa0f2018-11-30 13:32:23 -0500261 IntervalList fIntvlList; // All the intervals sorted by increasing start
262 IntervalList fActiveIntvls; // List of live intervals during assignment
Robert Phillipsc476e5d2019-03-26 14:50:08 -0400263 // (sorted by increasing end)
264 unsigned int fNumOps = 0;
Greg Danielf41b2bd2019-08-22 16:19:24 -0400265 SkTArray<unsigned int> fEndOfOpsTaskOpIndices;
266 int fCurOpsTaskIndex = 0;
267 SkDEBUGCODE(const int fNumOpsTasks = -1;)
Robert Phillips8186cbe2017-11-01 17:32:39 -0400268
Brian Salomon577aa0f2018-11-30 13:32:23 -0500269 SkDEBUGCODE(bool fAssigned = false;)
Robert Phillipseafd48a2017-11-16 07:52:08 -0500270
Brian Salomon577aa0f2018-11-30 13:32:23 -0500271 char fStorage[kInitialArenaSize];
272 SkArenaAlloc fIntervalAllocator{fStorage, kInitialArenaSize, kInitialArenaSize};
273 Interval* fFreeIntervalList = nullptr;
Robert Phillips82774f82019-06-20 14:38:27 -0400274 bool fLazyInstantiationError = false;
Robert Phillips5af44de2017-07-18 14:49:38 -0400275};
276
277#endif // GrResourceAllocator_DEFINED