blob: f5a48fa679079532c94da4caa6138a7a6dd2be9f [file] [log] [blame]
Robert Phillips5af44de2017-07-18 14:49:38 -04001/*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
Mike Kleinc0bd9f92019-04-23 12:05:21 -05008#include "src/gpu/GrResourceAllocator.h"
Robert Phillips5af44de2017-07-18 14:49:38 -04009
Mike Kleinc0bd9f92019-04-23 12:05:21 -050010#include "src/gpu/GrGpuResourcePriv.h"
Greg Danielf41b2bd2019-08-22 16:19:24 -040011#include "src/gpu/GrOpsTask.h"
Greg Danielf91aeb22019-06-18 09:58:02 -040012#include "src/gpu/GrRenderTargetProxy.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050013#include "src/gpu/GrResourceProvider.h"
Greg Danielf91aeb22019-06-18 09:58:02 -040014#include "src/gpu/GrSurfaceProxy.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050015#include "src/gpu/GrSurfaceProxyPriv.h"
Robert Phillips5af44de2017-07-18 14:49:38 -040016
Adlai Holler4cfbe532021-03-17 10:36:39 -040017#ifdef SK_DEBUG
18#include <atomic>
Mike Klein0ec1c572018-12-04 11:52:51 -050019
Adlai Holler4cfbe532021-03-17 10:36:39 -040020uint32_t GrResourceAllocator::Interval::CreateUniqueID() {
21 static std::atomic<uint32_t> nextID{1};
22 uint32_t id;
23 do {
24 id = nextID.fetch_add(1, std::memory_order_relaxed);
25 } while (id == SK_InvalidUniqueID);
26 return id;
27}
28
29uint32_t GrResourceAllocator::Register::CreateUniqueID() {
30 static std::atomic<uint32_t> nextID{1};
31 uint32_t id;
32 do {
33 id = nextID.fetch_add(1, std::memory_order_relaxed);
34 } while (id == SK_InvalidUniqueID);
35 return id;
36}
Robert Phillipsda1be462018-07-27 07:18:06 -040037#endif
38
Robert Phillips5b65a842017-11-13 15:48:12 -050039GrResourceAllocator::~GrResourceAllocator() {
Robert Phillips5b65a842017-11-13 15:48:12 -050040 SkASSERT(fIntvlList.empty());
41 SkASSERT(fActiveIntvls.empty());
42 SkASSERT(!fIntvlHash.count());
Robert Phillips5b65a842017-11-13 15:48:12 -050043}
44
Adlai Holler7f7a5df2021-02-09 17:41:10 +000045void GrResourceAllocator::addInterval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end,
46 ActualUse actualUse
Chris Dalton8816b932017-11-29 16:48:25 -070047 SkDEBUGCODE(, bool isDirectDstRead)) {
Brian Salomonbeb7f522019-08-30 16:19:42 -040048 SkASSERT(start <= end);
49 SkASSERT(!fAssigned); // We shouldn't be adding any intervals after (or during) assignment
Robert Phillips5f78adf2019-04-22 12:41:39 -040050
Chris Dalton97155592019-06-13 13:40:20 -060051 if (proxy->canSkipResourceAllocator()) {
Robert Phillips5f78adf2019-04-22 12:41:39 -040052 return;
53 }
54
Brian Salomon9cadc312018-12-05 15:09:19 -050055 // If a proxy is read only it must refer to a texture with specific content that cannot be
56 // recycled. We don't need to assign a texture to it and no other proxy can be instantiated
57 // with the same texture.
58 if (proxy->readOnly()) {
Brian Salomonbeb7f522019-08-30 16:19:42 -040059 if (proxy->isLazy() && !proxy->priv().doLazyInstantiation(fResourceProvider)) {
Adlai Holler19fd5142021-03-08 10:19:30 -070060 fFailedInstantiation = true;
Brian Salomon9cadc312018-12-05 15:09:19 -050061 } else {
Brian Salomonbeb7f522019-08-30 16:19:42 -040062 // Since we aren't going to add an interval we won't revisit this proxy in assign(). So
63 // must already be instantiated or it must be a lazy proxy that we instantiated above.
64 SkASSERT(proxy->isInstantiated());
Brian Salomon9cadc312018-12-05 15:09:19 -050065 }
Brian Salomonbeb7f522019-08-30 16:19:42 -040066 return;
67 }
Adlai Holler539db2f2021-03-16 09:45:05 -040068 uint32_t proxyID = proxy->uniqueID().asUInt();
69 if (Interval** intvlPtr = fIntvlHash.find(proxyID)) {
Brian Salomonbeb7f522019-08-30 16:19:42 -040070 // Revise the interval for an existing use
Adlai Holler1143b1b2021-03-16 13:07:40 -040071 Interval* intvl = *intvlPtr;
Brian Salomonbeb7f522019-08-30 16:19:42 -040072#ifdef SK_DEBUG
Adlai Holler9e2c50e2021-02-09 14:41:52 -050073 if (0 == start && 0 == end) {
74 // This interval is for the initial upload to a deferred proxy. Due to the vagaries
75 // of how deferred proxies are collected they can appear as uploads multiple times
76 // in a single opsTasks' list and as uploads in several opsTasks.
77 SkASSERT(0 == intvl->start());
78 } else if (isDirectDstRead) {
Brian Salomonbeb7f522019-08-30 16:19:42 -040079 // Direct reads from the render target itself should occur w/in the existing
80 // interval
81 SkASSERT(intvl->start() <= start && intvl->end() >= end);
82 } else {
83 SkASSERT(intvl->end() <= start && intvl->end() <= end);
84 }
85#endif
Adlai Holler7f7a5df2021-02-09 17:41:10 +000086 if (ActualUse::kYes == actualUse) {
87 intvl->addUse();
88 }
Brian Salomonbeb7f522019-08-30 16:19:42 -040089 intvl->extendEnd(end);
90 return;
91 }
Adlai Holler4cfbe532021-03-17 10:36:39 -040092 Interval* newIntvl = fInternalAllocator.make<Interval>(proxy, start, end);
Brian Salomonc6093532018-12-05 21:34:36 +000093
Adlai Holler7f7a5df2021-02-09 17:41:10 +000094 if (ActualUse::kYes == actualUse) {
95 newIntvl->addUse();
96 }
Brian Salomonbeb7f522019-08-30 16:19:42 -040097 fIntvlList.insertByIncreasingStart(newIntvl);
Adlai Holler539db2f2021-03-16 09:45:05 -040098 fIntvlHash.set(proxyID, newIntvl);
Robert Phillips5af44de2017-07-18 14:49:38 -040099}
100
Adlai Holler7df8d222021-03-19 12:27:49 -0400101bool GrResourceAllocator::Register::isRecyclable(const GrCaps& caps,
102 GrSurfaceProxy* proxy,
103 int knownUseCount) const {
104 if (!caps.reuseScratchTextures() && !proxy->asRenderTargetProxy()) {
105 // Tragically, scratch texture reuse is totally disabled in this case.
106 return false;
107 }
108
109 if (!this->scratchKey().isValid()) {
110 return false; // no scratch key, no free pool
111 }
112 if (this->uniqueKey().isValid()) {
113 return false; // rely on the resource cache to hold onto uniquely-keyed surfaces.
114 }
115 // If all the refs on the proxy are known to the resource allocator then no one
Adlai Holler1143b1b2021-03-16 13:07:40 -0400116 // should be holding onto it outside of Ganesh.
Adlai Holler7df8d222021-03-19 12:27:49 -0400117 return !proxy->refCntGreaterThan(knownUseCount);
118}
119
120bool GrResourceAllocator::Register::instantiateSurface(GrSurfaceProxy* proxy,
121 GrResourceProvider* resourceProvider) {
122 SkASSERT(!proxy->peekSurface());
123
124 sk_sp<GrSurface> surface;
125 if (const auto& uniqueKey = proxy->getUniqueKey(); uniqueKey.isValid()) {
126 SkASSERT(uniqueKey == fOriginatingProxy->getUniqueKey());
127 // First try to reattach to a cached surface if the proxy is uniquely keyed
128 surface = resourceProvider->findByUniqueKey<GrSurface>(uniqueKey);
129 }
130 if (!surface) {
131 if (proxy == fOriginatingProxy) {
132 surface = proxy->priv().createSurface(resourceProvider);
133 } else {
134 surface = sk_ref_sp(fOriginatingProxy->peekSurface());
135 }
136 }
137 if (!surface) {
138 return false;
139 }
140
141 // Make surface budgeted if this proxy is budgeted.
142 if (SkBudgeted::kYes == proxy->isBudgeted() &&
143 GrBudgetedType::kBudgeted != surface->resourcePriv().budgetedType()) {
144 // This gets the job done but isn't quite correct. It would be better to try to
145 // match budgeted proxies w/ budgeted surfaces and unbudgeted w/ unbudgeted.
146 surface->resourcePriv().makeBudgeted();
147 }
148
149 // Propagate the proxy unique key to the surface if we have one.
150 if (const auto& uniqueKey = proxy->getUniqueKey(); uniqueKey.isValid()) {
151 if (!surface->getUniqueKey().isValid()) {
152 resourceProvider->assignUniqueKeyToResource(uniqueKey, surface.get());
153 }
154 SkASSERT(surface->getUniqueKey() == uniqueKey);
155 }
156 proxy->priv().assign(std::move(surface));
157 return true;
Adlai Holler1143b1b2021-03-16 13:07:40 -0400158}
159
Robert Phillips5af44de2017-07-18 14:49:38 -0400160GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::popHead() {
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400161 SkDEBUGCODE(this->validate());
162
Robert Phillips5af44de2017-07-18 14:49:38 -0400163 Interval* temp = fHead;
164 if (temp) {
Robert Phillipsf8e25022017-11-08 15:24:31 -0500165 fHead = temp->next();
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400166 if (!fHead) {
167 fTail = nullptr;
168 }
169 temp->setNext(nullptr);
Robert Phillips5af44de2017-07-18 14:49:38 -0400170 }
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400171
172 SkDEBUGCODE(this->validate());
Robert Phillips5af44de2017-07-18 14:49:38 -0400173 return temp;
174}
175
176// TODO: fuse this with insertByIncreasingEnd
177void GrResourceAllocator::IntervalList::insertByIncreasingStart(Interval* intvl) {
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400178 SkDEBUGCODE(this->validate());
179 SkASSERT(!intvl->next());
180
Robert Phillips5af44de2017-07-18 14:49:38 -0400181 if (!fHead) {
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400182 // 14%
183 fHead = fTail = intvl;
Robert Phillipsf8e25022017-11-08 15:24:31 -0500184 } else if (intvl->start() <= fHead->start()) {
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400185 // 3%
Robert Phillipsf8e25022017-11-08 15:24:31 -0500186 intvl->setNext(fHead);
Robert Phillips5af44de2017-07-18 14:49:38 -0400187 fHead = intvl;
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400188 } else if (fTail->start() <= intvl->start()) {
189 // 83%
190 fTail->setNext(intvl);
191 fTail = intvl;
Robert Phillips5af44de2017-07-18 14:49:38 -0400192 } else {
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400193 // almost never
Robert Phillips5af44de2017-07-18 14:49:38 -0400194 Interval* prev = fHead;
Robert Phillipsf8e25022017-11-08 15:24:31 -0500195 Interval* next = prev->next();
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400196 for (; intvl->start() > next->start(); prev = next, next = next->next()) {
Robert Phillips5af44de2017-07-18 14:49:38 -0400197 }
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400198
199 SkASSERT(next);
Robert Phillipsf8e25022017-11-08 15:24:31 -0500200 intvl->setNext(next);
201 prev->setNext(intvl);
Robert Phillips5af44de2017-07-18 14:49:38 -0400202 }
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400203
204 SkDEBUGCODE(this->validate());
Robert Phillips5af44de2017-07-18 14:49:38 -0400205}
206
207// TODO: fuse this with insertByIncreasingStart
208void GrResourceAllocator::IntervalList::insertByIncreasingEnd(Interval* intvl) {
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400209 SkDEBUGCODE(this->validate());
210 SkASSERT(!intvl->next());
211
Robert Phillips5af44de2017-07-18 14:49:38 -0400212 if (!fHead) {
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400213 // 14%
214 fHead = fTail = intvl;
Robert Phillipsf8e25022017-11-08 15:24:31 -0500215 } else if (intvl->end() <= fHead->end()) {
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400216 // 64%
Robert Phillipsf8e25022017-11-08 15:24:31 -0500217 intvl->setNext(fHead);
Robert Phillips5af44de2017-07-18 14:49:38 -0400218 fHead = intvl;
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400219 } else if (fTail->end() <= intvl->end()) {
220 // 3%
221 fTail->setNext(intvl);
222 fTail = intvl;
Robert Phillips5af44de2017-07-18 14:49:38 -0400223 } else {
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400224 // 19% but 81% of those land right after the list's head
Robert Phillips5af44de2017-07-18 14:49:38 -0400225 Interval* prev = fHead;
Robert Phillipsf8e25022017-11-08 15:24:31 -0500226 Interval* next = prev->next();
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400227 for (; intvl->end() > next->end(); prev = next, next = next->next()) {
Robert Phillips5af44de2017-07-18 14:49:38 -0400228 }
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400229
230 SkASSERT(next);
Robert Phillipsf8e25022017-11-08 15:24:31 -0500231 intvl->setNext(next);
232 prev->setNext(intvl);
Robert Phillips5af44de2017-07-18 14:49:38 -0400233 }
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400234
235 SkDEBUGCODE(this->validate());
Robert Phillips5af44de2017-07-18 14:49:38 -0400236}
237
Robert Phillipsdf25e3a2018-08-08 12:48:40 -0400238#ifdef SK_DEBUG
239void GrResourceAllocator::IntervalList::validate() const {
240 SkASSERT(SkToBool(fHead) == SkToBool(fTail));
241
242 Interval* prev = nullptr;
243 for (Interval* cur = fHead; cur; prev = cur, cur = cur->next()) {
244 }
245
246 SkASSERT(fTail == prev);
247}
248#endif
Robert Phillips4150eea2018-02-07 17:08:21 -0500249
Adlai Holler4cfbe532021-03-17 10:36:39 -0400250// First try to reuse one of the recently allocated/used registers in the free pool.
Adlai Holler7df8d222021-03-19 12:27:49 -0400251GrResourceAllocator::Register* GrResourceAllocator::findOrCreateRegisterFor(GrSurfaceProxy* proxy) {
252 // Handle uniquely keyed proxies
Adlai Hollercc119d92021-03-16 15:17:25 -0400253 if (const auto& uniqueKey = proxy->getUniqueKey(); uniqueKey.isValid()) {
Adlai Holler7df8d222021-03-19 12:27:49 -0400254 if (auto p = fUniqueKeyRegisters.find(uniqueKey)) {
255 return *p;
Robert Phillips0790f8a2018-09-18 13:11:03 -0400256 }
Adlai Holler7df8d222021-03-19 12:27:49 -0400257 // No need for a scratch key. These don't go in the free pool.
258 Register* r = fInternalAllocator.make<Register>(proxy, GrScratchKey());
259 fUniqueKeyRegisters.set(uniqueKey, r);
260 return r;
Robert Phillips0790f8a2018-09-18 13:11:03 -0400261 }
262
Adlai Hollercc119d92021-03-16 15:17:25 -0400263 // Then look in the free pool
Adlai Holler7df8d222021-03-19 12:27:49 -0400264 GrScratchKey scratchKey;
265 proxy->priv().computeScratchKey(*fResourceProvider->caps(), &scratchKey);
Robert Phillips57aa3672017-07-21 11:38:13 -0400266
Adlai Holler4cfbe532021-03-17 10:36:39 -0400267 auto filter = [] (const Register* r) {
Robert Phillips10d17212019-04-24 14:09:10 -0400268 return true;
Robert Phillipsf8e25022017-11-08 15:24:31 -0500269 };
Adlai Holler7df8d222021-03-19 12:27:49 -0400270 if (Register* r = fFreePool.findAndRemove(scratchKey, filter)) {
Adlai Holler4cfbe532021-03-17 10:36:39 -0400271 return r;
Robert Phillips57aa3672017-07-21 11:38:13 -0400272 }
273
Adlai Holler7df8d222021-03-19 12:27:49 -0400274 return fInternalAllocator.make<Register>(proxy, std::move(scratchKey));
Robert Phillips5af44de2017-07-18 14:49:38 -0400275}
276
Adlai Holler7df8d222021-03-19 12:27:49 -0400277// Remove any intervals that end before the current index. Add their registers
Robert Phillips39667382019-04-17 16:03:30 -0400278// to the free pool if possible.
Robert Phillips5af44de2017-07-18 14:49:38 -0400279void GrResourceAllocator::expire(unsigned int curIndex) {
Robert Phillipsf8e25022017-11-08 15:24:31 -0500280 while (!fActiveIntvls.empty() && fActiveIntvls.peekHead()->end() < curIndex) {
Adlai Holler729ba5e2021-03-15 12:34:31 -0400281 Interval* intvl = fActiveIntvls.popHead();
282 SkASSERT(!intvl->next());
Robert Phillips5b65a842017-11-13 15:48:12 -0500283
Adlai Holler7df8d222021-03-19 12:27:49 -0400284 Register* r = intvl->getRegister();
285 if (r && r->isRecyclable(*fResourceProvider->caps(), intvl->proxy(), intvl->uses())) {
286#if GR_ALLOCATION_SPEW
287 SkDebugf("putting register %d back into pool\n", r->uniqueID());
288#endif
289 // TODO: fix this insertion so we get a more LRU-ish behavior
290 fFreePool.insert(r->scratchKey(), r);
Robert Phillips5b65a842017-11-13 15:48:12 -0500291 }
Adlai Holler7df8d222021-03-19 12:27:49 -0400292 fFinishedIntvls.insertByIncreasingStart(intvl);
Robert Phillips5af44de2017-07-18 14:49:38 -0400293 }
294}
295
Adlai Holler19fd5142021-03-08 10:19:30 -0700296bool GrResourceAllocator::assign() {
Robert Phillips5f78adf2019-04-22 12:41:39 -0400297 fIntvlHash.reset(); // we don't need the interval hash anymore
298
Robert Phillips5af44de2017-07-18 14:49:38 -0400299 SkDEBUGCODE(fAssigned = true;)
300
Adlai Hollerc616e1c2021-02-11 15:18:17 -0500301 if (fIntvlList.empty()) {
Adlai Holler19fd5142021-03-08 10:19:30 -0700302 return !fFailedInstantiation; // no resources to assign
Adlai Hollerc616e1c2021-02-11 15:18:17 -0500303 }
304
Robert Phillips715d08c2018-07-18 13:56:48 -0400305#if GR_ALLOCATION_SPEW
Adlai Hollerc616e1c2021-02-11 15:18:17 -0500306 SkDebugf("assigning %d ops\n", fNumOps);
Robert Phillips715d08c2018-07-18 13:56:48 -0400307 this->dumpIntervals();
308#endif
Robert Phillipseafd48a2017-11-16 07:52:08 -0500309
Adlai Holler1143b1b2021-03-16 13:07:40 -0400310 while (Interval* cur = fIntvlList.popHead()) {
Robert Phillipsf8e25022017-11-08 15:24:31 -0500311 this->expire(cur->start());
Robert Phillips57aa3672017-07-21 11:38:13 -0400312
Adlai Holler7df8d222021-03-19 12:27:49 -0400313 // Already-instantiated proxies and lazy proxies don't use registers.
314 // No need to compute scratch keys (or CANT, in the case of fully-lazy).
315 if (cur->proxy()->isInstantiated() || cur->proxy()->isLazy()) {
Robert Phillips57aa3672017-07-21 11:38:13 -0400316 fActiveIntvls.insertByIncreasingEnd(cur);
Robert Phillipseafd48a2017-11-16 07:52:08 -0500317
Robert Phillips57aa3672017-07-21 11:38:13 -0400318 continue;
319 }
320
Adlai Holler7df8d222021-03-19 12:27:49 -0400321 Register* r = this->findOrCreateRegisterFor(cur->proxy());
Robert Phillips715d08c2018-07-18 13:56:48 -0400322#if GR_ALLOCATION_SPEW
Adlai Holler7df8d222021-03-19 12:27:49 -0400323 SkDebugf("Assigning register %d to %d\n",
324 r->uniqueID(),
325 cur->proxy()->uniqueID().asUInt());
Robert Phillips715d08c2018-07-18 13:56:48 -0400326#endif
Adlai Holler7df8d222021-03-19 12:27:49 -0400327 SkASSERT(!cur->proxy()->peekSurface());
328 cur->setRegister(r);
Robert Phillipseafd48a2017-11-16 07:52:08 -0500329
Robert Phillips5af44de2017-07-18 14:49:38 -0400330 fActiveIntvls.insertByIncreasingEnd(cur);
331 }
Robert Phillips5b65a842017-11-13 15:48:12 -0500332
333 // expire all the remaining intervals to drain the active interval list
334 this->expire(std::numeric_limits<unsigned int>::max());
Adlai Holler7df8d222021-03-19 12:27:49 -0400335
336 // TODO: Return here and give the caller a chance to estimate memory cost and bail before
337 // instantiating anything.
338
339 // Instantiate surfaces
340 while (Interval* cur = fFinishedIntvls.popHead()) {
341 if (fFailedInstantiation) {
342 break;
343 }
344 if (cur->proxy()->isInstantiated()) {
345 continue;
346 }
347 if (cur->proxy()->isLazy()) {
348 fFailedInstantiation = !cur->proxy()->priv().doLazyInstantiation(fResourceProvider);
349 continue;
350 }
351 Register* r = cur->getRegister();
352 SkASSERT(r);
353 fFailedInstantiation = !r->instantiateSurface(cur->proxy(), fResourceProvider);
354 }
Adlai Holler19fd5142021-03-08 10:19:30 -0700355 return !fFailedInstantiation;
Robert Phillips5af44de2017-07-18 14:49:38 -0400356}
Robert Phillips715d08c2018-07-18 13:56:48 -0400357
358#if GR_ALLOCATION_SPEW
359void GrResourceAllocator::dumpIntervals() {
Robert Phillips715d08c2018-07-18 13:56:48 -0400360 // Print all the intervals while computing their range
Robert Phillips3bf3d4a2019-03-27 07:09:09 -0400361 SkDebugf("------------------------------------------------------------\n");
362 unsigned int min = std::numeric_limits<unsigned int>::max();
Robert Phillips715d08c2018-07-18 13:56:48 -0400363 unsigned int max = 0;
364 for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
Greg Danielc61d7e32020-02-04 14:27:45 -0500365 SkDebugf("{ %3d,%3d }: [%2d, %2d] - refProxys:%d surfaceRefs:%d\n",
Robert Phillips715d08c2018-07-18 13:56:48 -0400366 cur->proxy()->uniqueID().asUInt(),
Brian Salomonfd98c2c2018-07-31 17:25:29 -0400367 cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1,
Robert Phillips715d08c2018-07-18 13:56:48 -0400368 cur->start(),
369 cur->end(),
370 cur->proxy()->priv().getProxyRefCnt(),
Robert Phillipsb5204762019-06-19 14:12:13 -0400371 cur->proxy()->testingOnly_getBackingRefCnt());
Brian Osman788b9162020-02-07 10:36:46 -0500372 min = std::min(min, cur->start());
373 max = std::max(max, cur->end());
Robert Phillips715d08c2018-07-18 13:56:48 -0400374 }
375
376 // Draw a graph of the useage intervals
377 for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
378 SkDebugf("{ %3d,%3d }: ",
379 cur->proxy()->uniqueID().asUInt(),
Brian Salomonfd98c2c2018-07-31 17:25:29 -0400380 cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1);
Robert Phillips715d08c2018-07-18 13:56:48 -0400381 for (unsigned int i = min; i <= max; ++i) {
382 if (i >= cur->start() && i <= cur->end()) {
383 SkDebugf("x");
384 } else {
385 SkDebugf(" ");
386 }
387 }
388 SkDebugf("\n");
389 }
390}
391#endif