blob: d7e685156bcad0cadc89a4226b0789e33613f3d7 [file] [log] [blame]
Robert Phillips5af44de2017-07-18 14:49:38 -04001/*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
Mike Kleinc0bd9f92019-04-23 12:05:21 -05008#include "src/gpu/GrResourceAllocator.h"
Robert Phillips5af44de2017-07-18 14:49:38 -04009
Mike Kleinc0bd9f92019-04-23 12:05:21 -050010#include "src/gpu/GrGpuResourcePriv.h"
Greg Danielf41b2bd2019-08-22 16:19:24 -040011#include "src/gpu/GrOpsTask.h"
Greg Danielf91aeb22019-06-18 09:58:02 -040012#include "src/gpu/GrRenderTargetProxy.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050013#include "src/gpu/GrResourceProvider.h"
Greg Danielf91aeb22019-06-18 09:58:02 -040014#include "src/gpu/GrSurfaceProxy.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050015#include "src/gpu/GrSurfaceProxyPriv.h"
Greg Danielf91aeb22019-06-18 09:58:02 -040016#include "src/gpu/GrTextureProxy.h"
Robert Phillips5af44de2017-07-18 14:49:38 -040017
Robert Phillipsda1be462018-07-27 07:18:06 -040018#if GR_TRACK_INTERVAL_CREATION
Mike Klein0ec1c572018-12-04 11:52:51 -050019 #include <atomic>
20
21 uint32_t GrResourceAllocator::Interval::CreateUniqueID() {
22 static std::atomic<uint32_t> nextID{1};
23 uint32_t id;
24 do {
Adlai Holler4888cda2020-11-06 16:37:37 -050025 id = nextID.fetch_add(1, std::memory_order_relaxed);
Mike Klein0ec1c572018-12-04 11:52:51 -050026 } while (id == SK_InvalidUniqueID);
27 return id;
28 }
Robert Phillipsda1be462018-07-27 07:18:06 -040029#endif
30
Robert Phillips5b65a842017-11-13 15:48:12 -050031void GrResourceAllocator::Interval::assign(sk_sp<GrSurface> s) {
32 SkASSERT(!fAssignedSurface);
33 fAssignedSurface = s;
34 fProxy->priv().assign(std::move(s));
35}
36
Robert Phillipsc73666f2019-04-24 08:49:48 -040037void GrResourceAllocator::determineRecyclability() {
38 for (Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
39 if (cur->proxy()->canSkipResourceAllocator()) {
40 // These types of proxies can slip in here if they require a stencil buffer
41 continue;
42 }
43
Brian Salomon557e8122019-10-24 10:37:08 -040044 if (!cur->proxy()->refCntGreaterThan(cur->uses())) {
Robert Phillipsc73666f2019-04-24 08:49:48 -040045 // All the refs on the proxy are known to the resource allocator thus no one
46 // should be holding onto it outside of Ganesh.
Robert Phillipsc73666f2019-04-24 08:49:48 -040047 cur->markAsRecyclable();
48 }
49 }
50}
51
Greg Danielf41b2bd2019-08-22 16:19:24 -040052void GrResourceAllocator::markEndOfOpsTask(int opsTaskIndex) {
53 SkASSERT(!fAssigned); // We shouldn't be adding any opsTasks after (or during) assignment
Robert Phillipseafd48a2017-11-16 07:52:08 -050054
Greg Danielf41b2bd2019-08-22 16:19:24 -040055 SkASSERT(fEndOfOpsTaskOpIndices.count() == opsTaskIndex);
56 if (!fEndOfOpsTaskOpIndices.empty()) {
57 SkASSERT(fEndOfOpsTaskOpIndices.back() < this->curOp());
Robert Phillipseafd48a2017-11-16 07:52:08 -050058 }
59
Greg Danielf41b2bd2019-08-22 16:19:24 -040060 // This is the first op index of the next opsTask
61 fEndOfOpsTaskOpIndices.push_back(this->curOp());
62 SkASSERT(fEndOfOpsTaskOpIndices.count() <= fNumOpsTasks);
Robert Phillipseafd48a2017-11-16 07:52:08 -050063}
64
Robert Phillips5b65a842017-11-13 15:48:12 -050065GrResourceAllocator::~GrResourceAllocator() {
Robert Phillips5b65a842017-11-13 15:48:12 -050066 SkASSERT(fIntvlList.empty());
67 SkASSERT(fActiveIntvls.empty());
68 SkASSERT(!fIntvlHash.count());
Robert Phillips5b65a842017-11-13 15:48:12 -050069}
70
Adlai Holler7f7a5df2021-02-09 17:41:10 +000071void GrResourceAllocator::addInterval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end,
72 ActualUse actualUse
Chris Dalton8816b932017-11-29 16:48:25 -070073 SkDEBUGCODE(, bool isDirectDstRead)) {
Brian Salomonbeb7f522019-08-30 16:19:42 -040074 SkASSERT(start <= end);
75 SkASSERT(!fAssigned); // We shouldn't be adding any intervals after (or during) assignment
Robert Phillips5f78adf2019-04-22 12:41:39 -040076
Chris Dalton97155592019-06-13 13:40:20 -060077 if (proxy->canSkipResourceAllocator()) {
Robert Phillips5f78adf2019-04-22 12:41:39 -040078 return;
79 }
80
Brian Salomon9cadc312018-12-05 15:09:19 -050081 // If a proxy is read only it must refer to a texture with specific content that cannot be
82 // recycled. We don't need to assign a texture to it and no other proxy can be instantiated
83 // with the same texture.
84 if (proxy->readOnly()) {
Brian Salomonbeb7f522019-08-30 16:19:42 -040085 if (proxy->isLazy() && !proxy->priv().doLazyInstantiation(fResourceProvider)) {
86 fLazyInstantiationError = true;
Brian Salomon9cadc312018-12-05 15:09:19 -050087 } else {
Brian Salomonbeb7f522019-08-30 16:19:42 -040088 // Since we aren't going to add an interval we won't revisit this proxy in assign(). So
89 // must already be instantiated or it must be a lazy proxy that we instantiated above.
90 SkASSERT(proxy->isInstantiated());
Brian Salomon9cadc312018-12-05 15:09:19 -050091 }
Brian Salomonbeb7f522019-08-30 16:19:42 -040092 return;
93 }
94 if (Interval* intvl = fIntvlHash.find(proxy->uniqueID().asUInt())) {
95 // Revise the interval for an existing use
96#ifdef SK_DEBUG
Adlai Hollera3987cc2021-02-05 15:52:28 -050097 if (isDirectDstRead) {
Brian Salomonbeb7f522019-08-30 16:19:42 -040098 // Direct reads from the render target itself should occur w/in the existing
99 // interval
100 SkASSERT(intvl->start() <= start && intvl->end() >= end);
101 } else {
102 SkASSERT(intvl->end() <= start && intvl->end() <= end);
103 }
104#endif
Adlai Holler7f7a5df2021-02-09 17:41:10 +0000105 if (ActualUse::kYes == actualUse) {
106 intvl->addUse();
107 }
Brian Salomonbeb7f522019-08-30 16:19:42 -0400108 intvl->extendEnd(end);
109 return;
110 }
111 Interval* newIntvl;
112 if (fFreeIntervalList) {
113 newIntvl = fFreeIntervalList;
114 fFreeIntervalList = newIntvl->next();
115 newIntvl->setNext(nullptr);
116 newIntvl->resetTo(proxy, start, end);
117 } else {
118 newIntvl = fIntervalAllocator.make<Interval>(proxy, start, end);
Brian Salomonc6093532018-12-05 21:34:36 +0000119 }
120
Adlai Holler7f7a5df2021-02-09 17:41:10 +0000121 if (ActualUse::kYes == actualUse) {
122 newIntvl->addUse();
123 }
Brian Salomonbeb7f522019-08-30 16:19:42 -0400124 fIntvlList.insertByIncreasingStart(newIntvl);
125 fIntvlHash.add(newIntvl);
Robert Phillips5af44de2017-07-18 14:49:38 -0400126}
127
128GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::popHead() {
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400129 SkDEBUGCODE(this->validate());
130
Robert Phillips5af44de2017-07-18 14:49:38 -0400131 Interval* temp = fHead;
132 if (temp) {
Robert Phillipsf8e25022017-11-08 15:24:31 -0500133 fHead = temp->next();
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400134 if (!fHead) {
135 fTail = nullptr;
136 }
137 temp->setNext(nullptr);
Robert Phillips5af44de2017-07-18 14:49:38 -0400138 }
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400139
140 SkDEBUGCODE(this->validate());
Robert Phillips5af44de2017-07-18 14:49:38 -0400141 return temp;
142}
143
144// TODO: fuse this with insertByIncreasingEnd
145void GrResourceAllocator::IntervalList::insertByIncreasingStart(Interval* intvl) {
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400146 SkDEBUGCODE(this->validate());
147 SkASSERT(!intvl->next());
148
Robert Phillips5af44de2017-07-18 14:49:38 -0400149 if (!fHead) {
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400150 // 14%
151 fHead = fTail = intvl;
Robert Phillipsf8e25022017-11-08 15:24:31 -0500152 } else if (intvl->start() <= fHead->start()) {
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400153 // 3%
Robert Phillipsf8e25022017-11-08 15:24:31 -0500154 intvl->setNext(fHead);
Robert Phillips5af44de2017-07-18 14:49:38 -0400155 fHead = intvl;
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400156 } else if (fTail->start() <= intvl->start()) {
157 // 83%
158 fTail->setNext(intvl);
159 fTail = intvl;
Robert Phillips5af44de2017-07-18 14:49:38 -0400160 } else {
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400161 // almost never
Robert Phillips5af44de2017-07-18 14:49:38 -0400162 Interval* prev = fHead;
Robert Phillipsf8e25022017-11-08 15:24:31 -0500163 Interval* next = prev->next();
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400164 for (; intvl->start() > next->start(); prev = next, next = next->next()) {
Robert Phillips5af44de2017-07-18 14:49:38 -0400165 }
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400166
167 SkASSERT(next);
Robert Phillipsf8e25022017-11-08 15:24:31 -0500168 intvl->setNext(next);
169 prev->setNext(intvl);
Robert Phillips5af44de2017-07-18 14:49:38 -0400170 }
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400171
172 SkDEBUGCODE(this->validate());
Robert Phillips5af44de2017-07-18 14:49:38 -0400173}
174
175// TODO: fuse this with insertByIncreasingStart
176void GrResourceAllocator::IntervalList::insertByIncreasingEnd(Interval* intvl) {
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400177 SkDEBUGCODE(this->validate());
178 SkASSERT(!intvl->next());
179
Robert Phillips5af44de2017-07-18 14:49:38 -0400180 if (!fHead) {
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400181 // 14%
182 fHead = fTail = intvl;
Robert Phillipsf8e25022017-11-08 15:24:31 -0500183 } else if (intvl->end() <= fHead->end()) {
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400184 // 64%
Robert Phillipsf8e25022017-11-08 15:24:31 -0500185 intvl->setNext(fHead);
Robert Phillips5af44de2017-07-18 14:49:38 -0400186 fHead = intvl;
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400187 } else if (fTail->end() <= intvl->end()) {
188 // 3%
189 fTail->setNext(intvl);
190 fTail = intvl;
Robert Phillips5af44de2017-07-18 14:49:38 -0400191 } else {
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400192 // 19% but 81% of those land right after the list's head
Robert Phillips5af44de2017-07-18 14:49:38 -0400193 Interval* prev = fHead;
Robert Phillipsf8e25022017-11-08 15:24:31 -0500194 Interval* next = prev->next();
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400195 for (; intvl->end() > next->end(); prev = next, next = next->next()) {
Robert Phillips5af44de2017-07-18 14:49:38 -0400196 }
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400197
198 SkASSERT(next);
Robert Phillipsf8e25022017-11-08 15:24:31 -0500199 intvl->setNext(next);
200 prev->setNext(intvl);
Robert Phillips5af44de2017-07-18 14:49:38 -0400201 }
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400202
203 SkDEBUGCODE(this->validate());
Robert Phillips5af44de2017-07-18 14:49:38 -0400204}
205
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400206#ifdef SK_DEBUG
207void GrResourceAllocator::IntervalList::validate() const {
208 SkASSERT(SkToBool(fHead) == SkToBool(fTail));
209
210 Interval* prev = nullptr;
211 for (Interval* cur = fHead; cur; prev = cur, cur = cur->next()) {
212 }
213
214 SkASSERT(fTail == prev);
215}
216#endif
Robert Phillips4150eea2018-02-07 17:08:21 -0500217
218 GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::detachAll() {
219 Interval* tmp = fHead;
220 fHead = nullptr;
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400221 fTail = nullptr;
Robert Phillips4150eea2018-02-07 17:08:21 -0500222 return tmp;
223}
224
Robert Phillips5af44de2017-07-18 14:49:38 -0400225// 'surface' can be reused. Add it back to the free pool.
Robert Phillips715d08c2018-07-18 13:56:48 -0400226void GrResourceAllocator::recycleSurface(sk_sp<GrSurface> surface) {
Robert Phillips57aa3672017-07-21 11:38:13 -0400227 const GrScratchKey &key = surface->resourcePriv().getScratchKey();
228
229 if (!key.isValid()) {
230 return; // can't do it w/o a valid scratch key
231 }
232
Robert Phillipsf8e25022017-11-08 15:24:31 -0500233 if (surface->getUniqueKey().isValid()) {
234 // If the surface has a unique key we throw it back into the resource cache.
235 // If things get really tight 'findSurfaceFor' may pull it back out but there is
236 // no need to have it in tight rotation.
237 return;
238 }
239
Robert Phillips715d08c2018-07-18 13:56:48 -0400240#if GR_ALLOCATION_SPEW
241 SkDebugf("putting surface %d back into pool\n", surface->uniqueID().asUInt());
242#endif
Robert Phillips57aa3672017-07-21 11:38:13 -0400243 // TODO: fix this insertion so we get a more LRU-ish behavior
Robert Phillips5b65a842017-11-13 15:48:12 -0500244 fFreePool.insert(key, surface.release());
Robert Phillips5af44de2017-07-18 14:49:38 -0400245}
246
247// First try to reuse one of the recently allocated/used GrSurfaces in the free pool.
248// If we can't find a useable one, create a new one.
Chris Dalton0b68dda2019-11-07 21:08:03 -0700249sk_sp<GrSurface> GrResourceAllocator::findSurfaceFor(const GrSurfaceProxy* proxy) {
Robert Phillips0790f8a2018-09-18 13:11:03 -0400250 if (proxy->asTextureProxy() && proxy->asTextureProxy()->getUniqueKey().isValid()) {
251 // First try to reattach to a cached version if the proxy is uniquely keyed
Chris Dalton0b68dda2019-11-07 21:08:03 -0700252 if (sk_sp<GrSurface> surface = fResourceProvider->findByUniqueKey<GrSurface>(
253 proxy->asTextureProxy()->getUniqueKey())) {
Robert Phillips0790f8a2018-09-18 13:11:03 -0400254 return surface;
255 }
256 }
257
Robert Phillips57aa3672017-07-21 11:38:13 -0400258 // First look in the free pool
259 GrScratchKey key;
Robert Phillips5af44de2017-07-18 14:49:38 -0400260
Greg Danield51fa2f2020-01-22 16:53:38 -0500261 proxy->priv().computeScratchKey(*fResourceProvider->caps(), &key);
Robert Phillips57aa3672017-07-21 11:38:13 -0400262
Robert Phillips10d17212019-04-24 14:09:10 -0400263 auto filter = [] (const GrSurface* s) {
264 return true;
Robert Phillipsf8e25022017-11-08 15:24:31 -0500265 };
266 sk_sp<GrSurface> surface(fFreePool.findAndRemove(key, filter));
Robert Phillips57aa3672017-07-21 11:38:13 -0400267 if (surface) {
Robert Phillipsf8e25022017-11-08 15:24:31 -0500268 if (SkBudgeted::kYes == proxy->isBudgeted() &&
Brian Salomonfa2ebea2019-01-24 15:58:58 -0500269 GrBudgetedType::kBudgeted != surface->resourcePriv().budgetedType()) {
Robert Phillipsf8e25022017-11-08 15:24:31 -0500270 // This gets the job done but isn't quite correct. It would be better to try to
Brian Salomonfa2ebea2019-01-24 15:58:58 -0500271 // match budgeted proxies w/ budgeted surfaces and unbudgeted w/ unbudgeted.
Robert Phillipsf8e25022017-11-08 15:24:31 -0500272 surface->resourcePriv().makeBudgeted();
273 }
Robert Phillips0790f8a2018-09-18 13:11:03 -0400274 SkASSERT(!surface->getUniqueKey().isValid());
Robert Phillipsf8e25022017-11-08 15:24:31 -0500275 return surface;
Robert Phillips57aa3672017-07-21 11:38:13 -0400276 }
277
278 // Failing that, try to grab a new one from the resource cache
Robert Phillips5af44de2017-07-18 14:49:38 -0400279 return proxy->priv().createSurface(fResourceProvider);
280}
281
282// Remove any intervals that end before the current index. Return their GrSurfaces
Robert Phillips39667382019-04-17 16:03:30 -0400283// to the free pool if possible.
Robert Phillips5af44de2017-07-18 14:49:38 -0400284void GrResourceAllocator::expire(unsigned int curIndex) {
Robert Phillipsf8e25022017-11-08 15:24:31 -0500285 while (!fActiveIntvls.empty() && fActiveIntvls.peekHead()->end() < curIndex) {
Robert Phillips5af44de2017-07-18 14:49:38 -0400286 Interval* temp = fActiveIntvls.popHead();
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400287 SkASSERT(!temp->next());
Robert Phillips5b65a842017-11-13 15:48:12 -0500288
289 if (temp->wasAssignedSurface()) {
Robert Phillips715d08c2018-07-18 13:56:48 -0400290 sk_sp<GrSurface> surface = temp->detachSurface();
291
Robert Phillipsc73666f2019-04-24 08:49:48 -0400292 if (temp->isRecyclable()) {
Robert Phillips715d08c2018-07-18 13:56:48 -0400293 this->recycleSurface(std::move(surface));
294 }
Robert Phillips5b65a842017-11-13 15:48:12 -0500295 }
Robert Phillips8186cbe2017-11-01 17:32:39 -0400296
297 // Add temp to the free interval list so it can be reused
Robert Phillips715d08c2018-07-18 13:56:48 -0400298 SkASSERT(!temp->wasAssignedSurface()); // it had better not have a ref on a surface
Robert Phillipsf8e25022017-11-08 15:24:31 -0500299 temp->setNext(fFreeIntervalList);
Robert Phillips8186cbe2017-11-01 17:32:39 -0400300 fFreeIntervalList = temp;
Robert Phillips5af44de2017-07-18 14:49:38 -0400301 }
302}
303
Greg Danielf41b2bd2019-08-22 16:19:24 -0400304bool GrResourceAllocator::onOpsTaskBoundary() const {
Robert Phillipsc476e5d2019-03-26 14:50:08 -0400305 if (fIntvlList.empty()) {
Greg Danielf41b2bd2019-08-22 16:19:24 -0400306 SkASSERT(fCurOpsTaskIndex+1 <= fNumOpsTasks);
307 // Although technically on an opsTask boundary there is no need to force an
Robert Phillipsc476e5d2019-03-26 14:50:08 -0400308 // intermediate flush here
309 return false;
310 }
311
312 const Interval* tmp = fIntvlList.peekHead();
Greg Danielf41b2bd2019-08-22 16:19:24 -0400313 return fEndOfOpsTaskOpIndices[fCurOpsTaskIndex] <= tmp->start();
Robert Phillipsc476e5d2019-03-26 14:50:08 -0400314}
315
316void GrResourceAllocator::forceIntermediateFlush(int* stopIndex) {
Greg Danielf41b2bd2019-08-22 16:19:24 -0400317 *stopIndex = fCurOpsTaskIndex+1;
Robert Phillipsc476e5d2019-03-26 14:50:08 -0400318
319 // This is interrupting the allocation of resources for this flush. We need to
320 // proactively clear the active interval list of any intervals that aren't
321 // guaranteed to survive the partial flush lest they become zombies (i.e.,
322 // holding a deleted surface proxy).
323 const Interval* tmp = fIntvlList.peekHead();
Greg Danielf41b2bd2019-08-22 16:19:24 -0400324 SkASSERT(fEndOfOpsTaskOpIndices[fCurOpsTaskIndex] <= tmp->start());
Robert Phillipsc476e5d2019-03-26 14:50:08 -0400325
Greg Danielf41b2bd2019-08-22 16:19:24 -0400326 fCurOpsTaskIndex++;
327 SkASSERT(fCurOpsTaskIndex < fNumOpsTasks);
Robert Phillipsc476e5d2019-03-26 14:50:08 -0400328
329 this->expire(tmp->start());
330}
331
Brian Salomon577aa0f2018-11-30 13:32:23 -0500332bool GrResourceAllocator::assign(int* startIndex, int* stopIndex, AssignError* outError) {
Greg Danielaa3dfbe2018-01-29 10:34:25 -0500333 SkASSERT(outError);
Robert Phillips82774f82019-06-20 14:38:27 -0400334 *outError = fLazyInstantiationError ? AssignError::kFailedProxyInstantiation
335 : AssignError::kNoError;
Greg Danielaa3dfbe2018-01-29 10:34:25 -0500336
Greg Danielf41b2bd2019-08-22 16:19:24 -0400337 SkASSERT(fNumOpsTasks == fEndOfOpsTaskOpIndices.count());
Mike Klein6350cb02019-04-22 12:09:45 +0000338
Robert Phillips5f78adf2019-04-22 12:41:39 -0400339 fIntvlHash.reset(); // we don't need the interval hash anymore
340
Greg Danielf41b2bd2019-08-22 16:19:24 -0400341 if (fCurOpsTaskIndex >= fEndOfOpsTaskOpIndices.count()) {
Robert Phillips5f78adf2019-04-22 12:41:39 -0400342 return false; // nothing to render
343 }
344
Greg Danielf41b2bd2019-08-22 16:19:24 -0400345 *startIndex = fCurOpsTaskIndex;
346 *stopIndex = fEndOfOpsTaskOpIndices.count();
Robert Phillipseafd48a2017-11-16 07:52:08 -0500347
Robert Phillips5f78adf2019-04-22 12:41:39 -0400348 if (fIntvlList.empty()) {
Greg Danielf41b2bd2019-08-22 16:19:24 -0400349 fCurOpsTaskIndex = fEndOfOpsTaskOpIndices.count();
Robert Phillips5f78adf2019-04-22 12:41:39 -0400350 return true; // no resources to assign
351 }
352
Robert Phillips3bf3d4a2019-03-27 07:09:09 -0400353#if GR_ALLOCATION_SPEW
Greg Danielf41b2bd2019-08-22 16:19:24 -0400354 SkDebugf("assigning opsTasks %d through %d out of %d numOpsTasks\n",
355 *startIndex, *stopIndex, fNumOpsTasks);
356 SkDebugf("EndOfOpsTaskIndices: ");
357 for (int i = 0; i < fEndOfOpsTaskOpIndices.count(); ++i) {
358 SkDebugf("%d ", fEndOfOpsTaskOpIndices[i]);
Robert Phillips3bf3d4a2019-03-27 07:09:09 -0400359 }
360 SkDebugf("\n");
361#endif
362
Robert Phillips5af44de2017-07-18 14:49:38 -0400363 SkDEBUGCODE(fAssigned = true;)
364
Robert Phillips715d08c2018-07-18 13:56:48 -0400365#if GR_ALLOCATION_SPEW
366 this->dumpIntervals();
367#endif
Robert Phillips5af44de2017-07-18 14:49:38 -0400368 while (Interval* cur = fIntvlList.popHead()) {
Greg Danield72dd4d2019-08-29 14:37:46 -0400369 while (fEndOfOpsTaskOpIndices[fCurOpsTaskIndex] <= cur->start()) {
Greg Danielf41b2bd2019-08-22 16:19:24 -0400370 fCurOpsTaskIndex++;
371 SkASSERT(fCurOpsTaskIndex < fNumOpsTasks);
Robert Phillipseafd48a2017-11-16 07:52:08 -0500372 }
373
Robert Phillipsf8e25022017-11-08 15:24:31 -0500374 this->expire(cur->start());
Robert Phillips57aa3672017-07-21 11:38:13 -0400375
Brian Salomonfd98c2c2018-07-31 17:25:29 -0400376 if (cur->proxy()->isInstantiated()) {
Robert Phillips57aa3672017-07-21 11:38:13 -0400377 fActiveIntvls.insertByIncreasingEnd(cur);
Robert Phillipseafd48a2017-11-16 07:52:08 -0500378
379 if (fResourceProvider->overBudget()) {
Greg Danielf41b2bd2019-08-22 16:19:24 -0400380 // Only force intermediate draws on opsTask boundaries
381 if (this->onOpsTaskBoundary()) {
Robert Phillipsc476e5d2019-03-26 14:50:08 -0400382 this->forceIntermediateFlush(stopIndex);
Robert Phillipseafd48a2017-11-16 07:52:08 -0500383 return true;
384 }
385 }
386
Robert Phillips57aa3672017-07-21 11:38:13 -0400387 continue;
388 }
389
Brian Salomonbeb7f522019-08-30 16:19:42 -0400390 if (cur->proxy()->isLazy()) {
Greg Danielaa3dfbe2018-01-29 10:34:25 -0500391 if (!cur->proxy()->priv().doLazyInstantiation(fResourceProvider)) {
392 *outError = AssignError::kFailedProxyInstantiation;
393 }
Chris Dalton0b68dda2019-11-07 21:08:03 -0700394 } else if (sk_sp<GrSurface> surface = this->findSurfaceFor(cur->proxy())) {
Robert Phillipsf8e25022017-11-08 15:24:31 -0500395 // TODO: make getUniqueKey virtual on GrSurfaceProxy
Robert Phillips0790f8a2018-09-18 13:11:03 -0400396 GrTextureProxy* texProxy = cur->proxy()->asTextureProxy();
397
398 if (texProxy && texProxy->getUniqueKey().isValid()) {
399 if (!surface->getUniqueKey().isValid()) {
400 fResourceProvider->assignUniqueKeyToResource(texProxy->getUniqueKey(),
401 surface.get());
402 }
403 SkASSERT(surface->getUniqueKey() == texProxy->getUniqueKey());
Robert Phillipsf8e25022017-11-08 15:24:31 -0500404 }
405
Robert Phillips715d08c2018-07-18 13:56:48 -0400406#if GR_ALLOCATION_SPEW
407 SkDebugf("Assigning %d to %d\n",
408 surface->uniqueID().asUInt(),
409 cur->proxy()->uniqueID().asUInt());
410#endif
411
Robert Phillips5b65a842017-11-13 15:48:12 -0500412 cur->assign(std::move(surface));
Greg Danielaa3dfbe2018-01-29 10:34:25 -0500413 } else {
Brian Salomonfd98c2c2018-07-31 17:25:29 -0400414 SkASSERT(!cur->proxy()->isInstantiated());
Greg Danielaa3dfbe2018-01-29 10:34:25 -0500415 *outError = AssignError::kFailedProxyInstantiation;
Robert Phillips5af44de2017-07-18 14:49:38 -0400416 }
Robert Phillipseafd48a2017-11-16 07:52:08 -0500417
Robert Phillips5af44de2017-07-18 14:49:38 -0400418 fActiveIntvls.insertByIncreasingEnd(cur);
Robert Phillipseafd48a2017-11-16 07:52:08 -0500419
420 if (fResourceProvider->overBudget()) {
Greg Danielf41b2bd2019-08-22 16:19:24 -0400421 // Only force intermediate draws on opsTask boundaries
422 if (this->onOpsTaskBoundary()) {
Robert Phillipsc476e5d2019-03-26 14:50:08 -0400423 this->forceIntermediateFlush(stopIndex);
Robert Phillipseafd48a2017-11-16 07:52:08 -0500424 return true;
425 }
426 }
Robert Phillips5af44de2017-07-18 14:49:38 -0400427 }
Robert Phillips5b65a842017-11-13 15:48:12 -0500428
429 // expire all the remaining intervals to drain the active interval list
430 this->expire(std::numeric_limits<unsigned int>::max());
Robert Phillipseafd48a2017-11-16 07:52:08 -0500431 return true;
Robert Phillips5af44de2017-07-18 14:49:38 -0400432}
Robert Phillips715d08c2018-07-18 13:56:48 -0400433
434#if GR_ALLOCATION_SPEW
435void GrResourceAllocator::dumpIntervals() {
Robert Phillips715d08c2018-07-18 13:56:48 -0400436 // Print all the intervals while computing their range
Robert Phillips3bf3d4a2019-03-27 07:09:09 -0400437 SkDebugf("------------------------------------------------------------\n");
438 unsigned int min = std::numeric_limits<unsigned int>::max();
Robert Phillips715d08c2018-07-18 13:56:48 -0400439 unsigned int max = 0;
440 for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
Greg Danielc61d7e32020-02-04 14:27:45 -0500441 SkDebugf("{ %3d,%3d }: [%2d, %2d] - refProxys:%d surfaceRefs:%d\n",
Robert Phillips715d08c2018-07-18 13:56:48 -0400442 cur->proxy()->uniqueID().asUInt(),
Brian Salomonfd98c2c2018-07-31 17:25:29 -0400443 cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1,
Robert Phillips715d08c2018-07-18 13:56:48 -0400444 cur->start(),
445 cur->end(),
446 cur->proxy()->priv().getProxyRefCnt(),
Robert Phillipsb5204762019-06-19 14:12:13 -0400447 cur->proxy()->testingOnly_getBackingRefCnt());
Brian Osman788b9162020-02-07 10:36:46 -0500448 min = std::min(min, cur->start());
449 max = std::max(max, cur->end());
Robert Phillips715d08c2018-07-18 13:56:48 -0400450 }
451
452 // Draw a graph of the useage intervals
453 for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
454 SkDebugf("{ %3d,%3d }: ",
455 cur->proxy()->uniqueID().asUInt(),
Brian Salomonfd98c2c2018-07-31 17:25:29 -0400456 cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1);
Robert Phillips715d08c2018-07-18 13:56:48 -0400457 for (unsigned int i = min; i <= max; ++i) {
458 if (i >= cur->start() && i <= cur->end()) {
459 SkDebugf("x");
460 } else {
461 SkDebugf(" ");
462 }
463 }
464 SkDebugf("\n");
465 }
466}
467#endif