blob: 21528b0768ef4ca1c84aaf4f9dd6807870c7c1c4 [file] [log] [blame]
Robert Phillips5af44de2017-07-18 14:49:38 -04001/*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrResourceAllocator_DEFINED
9#define GrResourceAllocator_DEFINED
10
Robert Phillips57aa3672017-07-21 11:38:13 -040011#include "GrGpuResourcePriv.h"
12#include "GrSurface.h"
Robert Phillips5af44de2017-07-18 14:49:38 -040013#include "GrSurfaceProxy.h"
Robert Phillips8186cbe2017-11-01 17:32:39 -040014
15#include "SkArenaAlloc.h"
Robert Phillips5af44de2017-07-18 14:49:38 -040016#include "SkTDynamicHash.h"
Robert Phillips57aa3672017-07-21 11:38:13 -040017#include "SkTMultiMap.h"
Robert Phillips5af44de2017-07-18 14:49:38 -040018
19class GrResourceProvider;
Greg Daniel4684f822018-03-08 15:27:36 -050020class GrUninstantiateProxyTracker;
Robert Phillips5af44de2017-07-18 14:49:38 -040021
Robert Phillips715d08c2018-07-18 13:56:48 -040022// Print out explicit allocation information
23#define GR_ALLOCATION_SPEW 0
24
Robert Phillipsda1be462018-07-27 07:18:06 -040025// Print out information about interval creation
26#define GR_TRACK_INTERVAL_CREATION 0
27
Robert Phillips5af44de2017-07-18 14:49:38 -040028/*
29 * The ResourceAllocator explicitly distributes GPU resources at flush time. It operates by
30 * being given the usage intervals of the various proxies. It keeps these intervals in a singly
31 * linked list sorted by increasing start index. (It also maintains a hash table from proxyID
32 * to interval to find proxy reuse). When it comes time to allocate the resources it
33 * traverses the sorted list and:
34 * removes intervals from the active list that have completed (returning their GrSurfaces
35 * to the free pool)
36
37 * allocates a new resource (preferably from the free pool) for the new interval
38 * adds the new interval to the active list (that is sorted by increasing end index)
39 *
40 * Note: the op indices (used in the usage intervals) come from the order of the ops in
41 * their opLists after the opList DAG has been linearized.
42 */
43class GrResourceAllocator {
44public:
45 GrResourceAllocator(GrResourceProvider* resourceProvider)
46 : fResourceProvider(resourceProvider) {
47 }
48
Robert Phillips5b65a842017-11-13 15:48:12 -050049 ~GrResourceAllocator();
50
Robert Phillips5af44de2017-07-18 14:49:38 -040051 unsigned int curOp() const { return fNumOps; }
52 void incOps() { fNumOps++; }
53 unsigned int numOps() const { return fNumOps; }
54
Robert Phillipseafd48a2017-11-16 07:52:08 -050055 // Add a usage interval from 'start' to 'end' inclusive. This is usually used for renderTargets.
Robert Phillips5af44de2017-07-18 14:49:38 -040056 // If an existing interval already exists it will be expanded to include the new range.
Chris Dalton8816b932017-11-29 16:48:25 -070057 void addInterval(GrSurfaceProxy*, unsigned int start, unsigned int end
58 SkDEBUGCODE(, bool isDirectDstRead = false));
Robert Phillips5af44de2017-07-18 14:49:38 -040059
60 // Add an interval that spans just the current op. Usually this is for texture uses.
61 // If an existing interval already exists it will be expanded to include the new operation.
Chris Dalton8816b932017-11-29 16:48:25 -070062 void addInterval(GrSurfaceProxy* proxy
63 SkDEBUGCODE(, bool isDirectDstRead = false)) {
64 this->addInterval(proxy, fNumOps, fNumOps SkDEBUGCODE(, isDirectDstRead));
Robert Phillips5af44de2017-07-18 14:49:38 -040065 }
66
Greg Danielaa3dfbe2018-01-29 10:34:25 -050067 enum class AssignError {
68 kNoError,
69 kFailedProxyInstantiation
70 };
71
Robert Phillipseafd48a2017-11-16 07:52:08 -050072 // Returns true when the opLists from 'startIndex' to 'stopIndex' should be executed;
73 // false when nothing remains to be executed.
Greg Danielaa3dfbe2018-01-29 10:34:25 -050074 // If any proxy fails to instantiate, the AssignError will be set to kFailedProxyInstantiation.
75 // If this happens, the caller should remove all ops which reference an uninstantiated proxy.
Robert Phillipseafd48a2017-11-16 07:52:08 -050076 // This is used to execute a portion of the queued opLists in order to reduce the total
77 // amount of GPU resources required.
Greg Daniel4684f822018-03-08 15:27:36 -050078 bool assign(int* startIndex, int* stopIndex, GrUninstantiateProxyTracker*,
79 AssignError* outError);
Robert Phillipseafd48a2017-11-16 07:52:08 -050080
81 void markEndOfOpList(int opListIndex);
Robert Phillips5af44de2017-07-18 14:49:38 -040082
Robert Phillips715d08c2018-07-18 13:56:48 -040083#if GR_ALLOCATION_SPEW
84 void dumpIntervals();
85#endif
86
Robert Phillips5af44de2017-07-18 14:49:38 -040087private:
88 class Interval;
89
90 // Remove dead intervals from the active list
91 void expire(unsigned int curIndex);
92
93 // These two methods wrap the interactions with the free pool
Robert Phillips715d08c2018-07-18 13:56:48 -040094 void recycleSurface(sk_sp<GrSurface> surface);
Robert Phillipseafd48a2017-11-16 07:52:08 -050095 sk_sp<GrSurface> findSurfaceFor(const GrSurfaceProxy* proxy, bool needsStencil);
Robert Phillips5af44de2017-07-18 14:49:38 -040096
Robert Phillips57aa3672017-07-21 11:38:13 -040097 struct FreePoolTraits {
98 static const GrScratchKey& GetKey(const GrSurface& s) {
99 return s.resourcePriv().getScratchKey();
100 }
Robert Phillips5af44de2017-07-18 14:49:38 -0400101
Robert Phillips57aa3672017-07-21 11:38:13 -0400102 static uint32_t Hash(const GrScratchKey& key) { return key.hash(); }
Robert Phillipsf8e25022017-11-08 15:24:31 -0500103 static void OnFree(GrSurface* s) { s->unref(); }
Robert Phillips5af44de2017-07-18 14:49:38 -0400104 };
Robert Phillips57aa3672017-07-21 11:38:13 -0400105 typedef SkTMultiMap<GrSurface, GrScratchKey, FreePoolTraits> FreePoolMultiMap;
106
Robert Phillips5af44de2017-07-18 14:49:38 -0400107 typedef SkTDynamicHash<Interval, unsigned int> IntvlHash;
108
109 class Interval {
110 public:
111 Interval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end)
112 : fProxy(proxy)
113 , fProxyID(proxy->uniqueID().asUInt())
114 , fStart(start)
115 , fEnd(end)
116 , fNext(nullptr) {
117 SkASSERT(proxy);
Robert Phillipsda1be462018-07-27 07:18:06 -0400118#if GR_TRACK_INTERVAL_CREATION
119 fUniqueID = CreateUniqueID();
120 SkDebugf("New intvl %d: proxyID: %d [ %d, %d ]\n",
121 fUniqueID, proxy->uniqueID().asUInt(), start, end);
122#endif
Robert Phillips5af44de2017-07-18 14:49:38 -0400123 }
124
Robert Phillips8186cbe2017-11-01 17:32:39 -0400125 void resetTo(GrSurfaceProxy* proxy, unsigned int start, unsigned int end) {
126 SkASSERT(proxy);
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400127 SkASSERT(!fNext);
Robert Phillips8186cbe2017-11-01 17:32:39 -0400128
129 fProxy = proxy;
130 fProxyID = proxy->uniqueID().asUInt();
131 fStart = start;
132 fEnd = end;
133 fNext = nullptr;
Robert Phillipsda1be462018-07-27 07:18:06 -0400134#if GR_TRACK_INTERVAL_CREATION
135 fUniqueID = CreateUniqueID();
136 SkDebugf("New intvl %d: proxyID: %d [ %d, %d ]\n",
137 fUniqueID, proxy->uniqueID().asUInt(), start, end);
138#endif
Robert Phillips8186cbe2017-11-01 17:32:39 -0400139 }
140
Robert Phillips5b65a842017-11-13 15:48:12 -0500141 ~Interval() {
142 SkASSERT(!fAssignedSurface);
143 }
144
Robert Phillipsf8e25022017-11-08 15:24:31 -0500145 const GrSurfaceProxy* proxy() const { return fProxy; }
146 GrSurfaceProxy* proxy() { return fProxy; }
147 unsigned int start() const { return fStart; }
148 unsigned int end() const { return fEnd; }
149 const Interval* next() const { return fNext; }
150 Interval* next() { return fNext; }
151
152 void setNext(Interval* next) { fNext = next; }
153
154 void extendEnd(unsigned int newEnd) {
Chris Dalton8816b932017-11-29 16:48:25 -0700155 if (newEnd > fEnd) {
156 fEnd = newEnd;
Robert Phillipsda1be462018-07-27 07:18:06 -0400157#if GR_TRACK_INTERVAL_CREATION
158 SkDebugf("intvl %d: extending from %d to %d\n", fUniqueID, fEnd, newEnd);
159#endif
Chris Dalton8816b932017-11-29 16:48:25 -0700160 }
Robert Phillipsf8e25022017-11-08 15:24:31 -0500161 }
162
Robert Phillips5b65a842017-11-13 15:48:12 -0500163 void assign(sk_sp<GrSurface>);
Ben Wagner5d1adbf2018-05-28 13:35:39 -0400164 bool wasAssignedSurface() const { return fAssignedSurface != nullptr; }
Robert Phillips5b65a842017-11-13 15:48:12 -0500165 sk_sp<GrSurface> detachSurface() { return std::move(fAssignedSurface); }
166
Robert Phillips5af44de2017-07-18 14:49:38 -0400167 // for SkTDynamicHash
168 static const uint32_t& GetKey(const Interval& intvl) {
169 return intvl.fProxyID;
170 }
171 static uint32_t Hash(const uint32_t& key) { return key; }
172
Robert Phillipsf8e25022017-11-08 15:24:31 -0500173 private:
Robert Phillips5b65a842017-11-13 15:48:12 -0500174 sk_sp<GrSurface> fAssignedSurface;
175 GrSurfaceProxy* fProxy;
176 uint32_t fProxyID; // This is here b.c. DynamicHash requires a ref to the key
177 unsigned int fStart;
178 unsigned int fEnd;
179 Interval* fNext;
Robert Phillipsda1be462018-07-27 07:18:06 -0400180
181#if GR_TRACK_INTERVAL_CREATION
182 uint32_t fUniqueID;
183
184 uint32_t CreateUniqueID();
185#endif
Robert Phillips5af44de2017-07-18 14:49:38 -0400186 };
187
188 class IntervalList {
189 public:
190 IntervalList() = default;
191 ~IntervalList() {
Robert Phillips8186cbe2017-11-01 17:32:39 -0400192 // The only time we delete an IntervalList is in the GrResourceAllocator dtor.
193 // Since the arena allocator will clean up for us we don't bother here.
Robert Phillips5af44de2017-07-18 14:49:38 -0400194 }
195
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400196 bool empty() const {
197 SkASSERT(SkToBool(fHead) == SkToBool(fTail));
198 return !SkToBool(fHead);
199 }
Robert Phillips5af44de2017-07-18 14:49:38 -0400200 const Interval* peekHead() const { return fHead; }
201 Interval* popHead();
202 void insertByIncreasingStart(Interval*);
203 void insertByIncreasingEnd(Interval*);
Robert Phillips4150eea2018-02-07 17:08:21 -0500204 Interval* detachAll();
Robert Phillips5af44de2017-07-18 14:49:38 -0400205
206 private:
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400207 SkDEBUGCODE(void validate() const;)
208
Robert Phillips5af44de2017-07-18 14:49:38 -0400209 Interval* fHead = nullptr;
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400210 Interval* fTail = nullptr;
Robert Phillips5af44de2017-07-18 14:49:38 -0400211 };
212
Robert Phillips8186cbe2017-11-01 17:32:39 -0400213 // Gathered statistics indicate that 99% of flushes will be covered by <= 12 Intervals
214 static const int kInitialArenaSize = 12 * sizeof(Interval);
215
Robert Phillipseafd48a2017-11-16 07:52:08 -0500216 GrResourceProvider* fResourceProvider;
217 FreePoolMultiMap fFreePool; // Recently created/used GrSurfaces
218 IntvlHash fIntvlHash; // All the intervals, hashed by proxyID
Robert Phillips5af44de2017-07-18 14:49:38 -0400219
Robert Phillipseafd48a2017-11-16 07:52:08 -0500220 IntervalList fIntvlList; // All the intervals sorted by increasing start
221 IntervalList fActiveIntvls; // List of live intervals during assignment
222 // (sorted by increasing end)
Robert Phillips51b20f22017-12-01 15:32:35 -0500223 unsigned int fNumOps = 1; // op # 0 is reserved for uploads at the start
224 // of a flush
Robert Phillipseafd48a2017-11-16 07:52:08 -0500225 SkTArray<unsigned int> fEndOfOpListOpIndices;
226 int fCurOpListIndex = 0;
Robert Phillips8186cbe2017-11-01 17:32:39 -0400227
Robert Phillipseafd48a2017-11-16 07:52:08 -0500228 SkDEBUGCODE(bool fAssigned = false;)
229
230 char fStorage[kInitialArenaSize];
231 SkArenaAlloc fIntervalAllocator { fStorage, kInitialArenaSize, 0 };
232 Interval* fFreeIntervalList = nullptr;
Robert Phillips5af44de2017-07-18 14:49:38 -0400233};
234
235#endif // GrResourceAllocator_DEFINED