Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2017 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | #include "GrResourceAllocator.h" |
| 9 | |
Brian Salomon | 967df20 | 2018-12-07 11:15:53 -0500 | [diff] [blame] | 10 | #include "GrDeinstantiateProxyTracker.h" |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 11 | #include "GrGpuResourcePriv.h" |
Robert Phillips | 5b65a84 | 2017-11-13 15:48:12 -0500 | [diff] [blame] | 12 | #include "GrOpList.h" |
Robert Phillips | eafd48a | 2017-11-16 07:52:08 -0500 | [diff] [blame] | 13 | #include "GrRenderTargetProxy.h" |
| 14 | #include "GrResourceCache.h" |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 15 | #include "GrResourceProvider.h" |
| 16 | #include "GrSurfacePriv.h" |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 17 | #include "GrSurfaceProxy.h" |
| 18 | #include "GrSurfaceProxyPriv.h" |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 19 | #include "GrTextureProxy.h" |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 20 | |
Robert Phillips | da1be46 | 2018-07-27 07:18:06 -0400 | [diff] [blame] | 21 | #if GR_TRACK_INTERVAL_CREATION |
Mike Klein | 0ec1c57 | 2018-12-04 11:52:51 -0500 | [diff] [blame] | 22 | #include <atomic> |
| 23 | |
| 24 | uint32_t GrResourceAllocator::Interval::CreateUniqueID() { |
| 25 | static std::atomic<uint32_t> nextID{1}; |
| 26 | uint32_t id; |
| 27 | do { |
| 28 | id = nextID++; |
| 29 | } while (id == SK_InvalidUniqueID); |
| 30 | return id; |
| 31 | } |
Robert Phillips | da1be46 | 2018-07-27 07:18:06 -0400 | [diff] [blame] | 32 | #endif |
| 33 | |
Robert Phillips | 5b65a84 | 2017-11-13 15:48:12 -0500 | [diff] [blame] | 34 | void GrResourceAllocator::Interval::assign(sk_sp<GrSurface> s) { |
| 35 | SkASSERT(!fAssignedSurface); |
| 36 | fAssignedSurface = s; |
| 37 | fProxy->priv().assign(std::move(s)); |
| 38 | } |
| 39 | |
Robert Phillips | eafd48a | 2017-11-16 07:52:08 -0500 | [diff] [blame] | 40 | |
| 41 | void GrResourceAllocator::markEndOfOpList(int opListIndex) { |
| 42 | SkASSERT(!fAssigned); // We shouldn't be adding any opLists after (or during) assignment |
| 43 | |
| 44 | SkASSERT(fEndOfOpListOpIndices.count() == opListIndex); |
| 45 | if (!fEndOfOpListOpIndices.empty()) { |
| 46 | SkASSERT(fEndOfOpListOpIndices.back() < this->curOp()); |
| 47 | } |
| 48 | |
| 49 | fEndOfOpListOpIndices.push_back(this->curOp()); // This is the first op index of the next opList |
| 50 | } |
| 51 | |
Robert Phillips | 5b65a84 | 2017-11-13 15:48:12 -0500 | [diff] [blame] | 52 | GrResourceAllocator::~GrResourceAllocator() { |
Robert Phillips | 5b65a84 | 2017-11-13 15:48:12 -0500 | [diff] [blame] | 53 | SkASSERT(fIntvlList.empty()); |
| 54 | SkASSERT(fActiveIntvls.empty()); |
| 55 | SkASSERT(!fIntvlHash.count()); |
Robert Phillips | 5b65a84 | 2017-11-13 15:48:12 -0500 | [diff] [blame] | 56 | } |
| 57 | |
Chris Dalton | 8816b93 | 2017-11-29 16:48:25 -0700 | [diff] [blame] | 58 | void GrResourceAllocator::addInterval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end |
| 59 | SkDEBUGCODE(, bool isDirectDstRead)) { |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 60 | SkASSERT(start <= end); |
| 61 | SkASSERT(!fAssigned); // We shouldn't be adding any intervals after (or during) assignment |
| 62 | |
Brian Salomon | 9cadc31 | 2018-12-05 15:09:19 -0500 | [diff] [blame] | 63 | // If a proxy is read only it must refer to a texture with specific content that cannot be |
| 64 | // recycled. We don't need to assign a texture to it and no other proxy can be instantiated |
| 65 | // with the same texture. |
| 66 | if (proxy->readOnly()) { |
| 67 | // Since we aren't going to add an interval we won't revisit this proxy in assign(). So it |
| 68 | // must already be instantiated or it must be a lazy proxy that we will instantiate below. |
| 69 | SkASSERT(proxy->isInstantiated() || |
| 70 | GrSurfaceProxy::LazyState::kNot != proxy->lazyInstantiationState()); |
Brian Salomon | c609353 | 2018-12-05 21:34:36 +0000 | [diff] [blame] | 71 | } else { |
Brian Salomon | 9cadc31 | 2018-12-05 15:09:19 -0500 | [diff] [blame] | 72 | if (Interval* intvl = fIntvlHash.find(proxy->uniqueID().asUInt())) { |
| 73 | // Revise the interval for an existing use |
| 74 | #ifdef SK_DEBUG |
| 75 | if (0 == start && 0 == end) { |
| 76 | // This interval is for the initial upload to a deferred proxy. Due to the vagaries |
| 77 | // of how deferred proxies are collected they can appear as uploads multiple times |
| 78 | // in a single opLists' list and as uploads in several opLists. |
| 79 | SkASSERT(0 == intvl->start()); |
| 80 | } else if (isDirectDstRead) { |
| 81 | // Direct reads from the render target itself should occur w/in the existing |
| 82 | // interval |
| 83 | SkASSERT(intvl->start() <= start && intvl->end() >= end); |
| 84 | } else { |
| 85 | SkASSERT(intvl->end() <= start && intvl->end() <= end); |
| 86 | } |
| 87 | #endif |
| 88 | intvl->extendEnd(end); |
| 89 | return; |
| 90 | } |
| 91 | Interval* newIntvl; |
| 92 | if (fFreeIntervalList) { |
| 93 | newIntvl = fFreeIntervalList; |
| 94 | fFreeIntervalList = newIntvl->next(); |
| 95 | newIntvl->setNext(nullptr); |
| 96 | newIntvl->resetTo(proxy, start, end); |
| 97 | } else { |
| 98 | newIntvl = fIntervalAllocator.make<Interval>(proxy, start, end); |
| 99 | } |
| 100 | |
| 101 | fIntvlList.insertByIncreasingStart(newIntvl); |
| 102 | fIntvlHash.add(newIntvl); |
Brian Salomon | c609353 | 2018-12-05 21:34:36 +0000 | [diff] [blame] | 103 | } |
| 104 | |
Brian Salomon | 9cadc31 | 2018-12-05 15:09:19 -0500 | [diff] [blame] | 105 | // Because readOnly proxies do not get a usage interval we must instantiate them here (since it |
| 106 | // won't occur in GrResourceAllocator::assign) |
| 107 | if (proxy->readOnly() || !fResourceProvider->explicitlyAllocateGPUResources()) { |
Robert Phillips | 4150eea | 2018-02-07 17:08:21 -0500 | [diff] [blame] | 108 | // FIXME: remove this once we can do the lazy instantiation from assign instead. |
| 109 | if (GrSurfaceProxy::LazyState::kNot != proxy->lazyInstantiationState()) { |
Brian Salomon | 577aa0f | 2018-11-30 13:32:23 -0500 | [diff] [blame] | 110 | if (proxy->priv().doLazyInstantiation(fResourceProvider)) { |
| 111 | if (proxy->priv().lazyInstantiationType() == |
Brian Salomon | 967df20 | 2018-12-07 11:15:53 -0500 | [diff] [blame] | 112 | GrSurfaceProxy::LazyInstantiationType::kDeinstantiate) { |
| 113 | fDeinstantiateTracker->addProxy(proxy); |
Brian Salomon | 577aa0f | 2018-11-30 13:32:23 -0500 | [diff] [blame] | 114 | } |
| 115 | } |
Robert Phillips | 4150eea | 2018-02-07 17:08:21 -0500 | [diff] [blame] | 116 | } |
Chris Dalton | 706a6ff | 2017-11-29 22:01:06 -0700 | [diff] [blame] | 117 | } |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 118 | } |
| 119 | |
| 120 | GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::popHead() { |
Robert Phillips | df25e3a | 2018-08-08 12:48:40 -0400 | [diff] [blame] | 121 | SkDEBUGCODE(this->validate()); |
| 122 | |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 123 | Interval* temp = fHead; |
| 124 | if (temp) { |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 125 | fHead = temp->next(); |
Robert Phillips | df25e3a | 2018-08-08 12:48:40 -0400 | [diff] [blame] | 126 | if (!fHead) { |
| 127 | fTail = nullptr; |
| 128 | } |
| 129 | temp->setNext(nullptr); |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 130 | } |
Robert Phillips | df25e3a | 2018-08-08 12:48:40 -0400 | [diff] [blame] | 131 | |
| 132 | SkDEBUGCODE(this->validate()); |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 133 | return temp; |
| 134 | } |
| 135 | |
| 136 | // TODO: fuse this with insertByIncreasingEnd |
| 137 | void GrResourceAllocator::IntervalList::insertByIncreasingStart(Interval* intvl) { |
Robert Phillips | df25e3a | 2018-08-08 12:48:40 -0400 | [diff] [blame] | 138 | SkDEBUGCODE(this->validate()); |
| 139 | SkASSERT(!intvl->next()); |
| 140 | |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 141 | if (!fHead) { |
Robert Phillips | df25e3a | 2018-08-08 12:48:40 -0400 | [diff] [blame] | 142 | // 14% |
| 143 | fHead = fTail = intvl; |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 144 | } else if (intvl->start() <= fHead->start()) { |
Robert Phillips | df25e3a | 2018-08-08 12:48:40 -0400 | [diff] [blame] | 145 | // 3% |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 146 | intvl->setNext(fHead); |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 147 | fHead = intvl; |
Robert Phillips | df25e3a | 2018-08-08 12:48:40 -0400 | [diff] [blame] | 148 | } else if (fTail->start() <= intvl->start()) { |
| 149 | // 83% |
| 150 | fTail->setNext(intvl); |
| 151 | fTail = intvl; |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 152 | } else { |
Robert Phillips | df25e3a | 2018-08-08 12:48:40 -0400 | [diff] [blame] | 153 | // almost never |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 154 | Interval* prev = fHead; |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 155 | Interval* next = prev->next(); |
Robert Phillips | df25e3a | 2018-08-08 12:48:40 -0400 | [diff] [blame] | 156 | for (; intvl->start() > next->start(); prev = next, next = next->next()) { |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 157 | } |
Robert Phillips | df25e3a | 2018-08-08 12:48:40 -0400 | [diff] [blame] | 158 | |
| 159 | SkASSERT(next); |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 160 | intvl->setNext(next); |
| 161 | prev->setNext(intvl); |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 162 | } |
Robert Phillips | df25e3a | 2018-08-08 12:48:40 -0400 | [diff] [blame] | 163 | |
| 164 | SkDEBUGCODE(this->validate()); |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 165 | } |
| 166 | |
| 167 | // TODO: fuse this with insertByIncreasingStart |
| 168 | void GrResourceAllocator::IntervalList::insertByIncreasingEnd(Interval* intvl) { |
Robert Phillips | df25e3a | 2018-08-08 12:48:40 -0400 | [diff] [blame] | 169 | SkDEBUGCODE(this->validate()); |
| 170 | SkASSERT(!intvl->next()); |
| 171 | |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 172 | if (!fHead) { |
Robert Phillips | df25e3a | 2018-08-08 12:48:40 -0400 | [diff] [blame] | 173 | // 14% |
| 174 | fHead = fTail = intvl; |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 175 | } else if (intvl->end() <= fHead->end()) { |
Robert Phillips | df25e3a | 2018-08-08 12:48:40 -0400 | [diff] [blame] | 176 | // 64% |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 177 | intvl->setNext(fHead); |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 178 | fHead = intvl; |
Robert Phillips | df25e3a | 2018-08-08 12:48:40 -0400 | [diff] [blame] | 179 | } else if (fTail->end() <= intvl->end()) { |
| 180 | // 3% |
| 181 | fTail->setNext(intvl); |
| 182 | fTail = intvl; |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 183 | } else { |
Robert Phillips | df25e3a | 2018-08-08 12:48:40 -0400 | [diff] [blame] | 184 | // 19% but 81% of those land right after the list's head |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 185 | Interval* prev = fHead; |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 186 | Interval* next = prev->next(); |
Robert Phillips | df25e3a | 2018-08-08 12:48:40 -0400 | [diff] [blame] | 187 | for (; intvl->end() > next->end(); prev = next, next = next->next()) { |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 188 | } |
Robert Phillips | df25e3a | 2018-08-08 12:48:40 -0400 | [diff] [blame] | 189 | |
| 190 | SkASSERT(next); |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 191 | intvl->setNext(next); |
| 192 | prev->setNext(intvl); |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 193 | } |
Robert Phillips | df25e3a | 2018-08-08 12:48:40 -0400 | [diff] [blame] | 194 | |
| 195 | SkDEBUGCODE(this->validate()); |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 196 | } |
| 197 | |
Robert Phillips | df25e3a | 2018-08-08 12:48:40 -0400 | [diff] [blame] | 198 | #ifdef SK_DEBUG |
| 199 | void GrResourceAllocator::IntervalList::validate() const { |
| 200 | SkASSERT(SkToBool(fHead) == SkToBool(fTail)); |
| 201 | |
| 202 | Interval* prev = nullptr; |
| 203 | for (Interval* cur = fHead; cur; prev = cur, cur = cur->next()) { |
| 204 | } |
| 205 | |
| 206 | SkASSERT(fTail == prev); |
| 207 | } |
| 208 | #endif |
Robert Phillips | 4150eea | 2018-02-07 17:08:21 -0500 | [diff] [blame] | 209 | |
| 210 | GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::detachAll() { |
| 211 | Interval* tmp = fHead; |
| 212 | fHead = nullptr; |
Robert Phillips | df25e3a | 2018-08-08 12:48:40 -0400 | [diff] [blame] | 213 | fTail = nullptr; |
Robert Phillips | 4150eea | 2018-02-07 17:08:21 -0500 | [diff] [blame] | 214 | return tmp; |
| 215 | } |
| 216 | |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 217 | // 'surface' can be reused. Add it back to the free pool. |
Robert Phillips | 715d08c | 2018-07-18 13:56:48 -0400 | [diff] [blame] | 218 | void GrResourceAllocator::recycleSurface(sk_sp<GrSurface> surface) { |
Robert Phillips | 57aa367 | 2017-07-21 11:38:13 -0400 | [diff] [blame] | 219 | const GrScratchKey &key = surface->resourcePriv().getScratchKey(); |
| 220 | |
| 221 | if (!key.isValid()) { |
| 222 | return; // can't do it w/o a valid scratch key |
| 223 | } |
| 224 | |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 225 | if (surface->getUniqueKey().isValid()) { |
| 226 | // If the surface has a unique key we throw it back into the resource cache. |
| 227 | // If things get really tight 'findSurfaceFor' may pull it back out but there is |
| 228 | // no need to have it in tight rotation. |
| 229 | return; |
| 230 | } |
| 231 | |
Robert Phillips | 715d08c | 2018-07-18 13:56:48 -0400 | [diff] [blame] | 232 | #if GR_ALLOCATION_SPEW |
| 233 | SkDebugf("putting surface %d back into pool\n", surface->uniqueID().asUInt()); |
| 234 | #endif |
Robert Phillips | 57aa367 | 2017-07-21 11:38:13 -0400 | [diff] [blame] | 235 | // TODO: fix this insertion so we get a more LRU-ish behavior |
Robert Phillips | 5b65a84 | 2017-11-13 15:48:12 -0500 | [diff] [blame] | 236 | fFreePool.insert(key, surface.release()); |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 237 | } |
| 238 | |
| 239 | // First try to reuse one of the recently allocated/used GrSurfaces in the free pool. |
| 240 | // If we can't find a useable one, create a new one. |
Robert Phillips | eafd48a | 2017-11-16 07:52:08 -0500 | [diff] [blame] | 241 | sk_sp<GrSurface> GrResourceAllocator::findSurfaceFor(const GrSurfaceProxy* proxy, |
| 242 | bool needsStencil) { |
Robert Phillips | 0790f8a | 2018-09-18 13:11:03 -0400 | [diff] [blame] | 243 | |
| 244 | if (proxy->asTextureProxy() && proxy->asTextureProxy()->getUniqueKey().isValid()) { |
| 245 | // First try to reattach to a cached version if the proxy is uniquely keyed |
| 246 | sk_sp<GrSurface> surface = fResourceProvider->findByUniqueKey<GrSurface>( |
| 247 | proxy->asTextureProxy()->getUniqueKey()); |
| 248 | if (surface) { |
| 249 | if (!GrSurfaceProxyPriv::AttachStencilIfNeeded(fResourceProvider, surface.get(), |
| 250 | needsStencil)) { |
| 251 | return nullptr; |
| 252 | } |
| 253 | |
| 254 | return surface; |
| 255 | } |
| 256 | } |
| 257 | |
Robert Phillips | 57aa367 | 2017-07-21 11:38:13 -0400 | [diff] [blame] | 258 | // First look in the free pool |
| 259 | GrScratchKey key; |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 260 | |
Robert Phillips | 57aa367 | 2017-07-21 11:38:13 -0400 | [diff] [blame] | 261 | proxy->priv().computeScratchKey(&key); |
| 262 | |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 263 | auto filter = [&] (const GrSurface* s) { |
| 264 | return !proxy->priv().requiresNoPendingIO() || !s->surfacePriv().hasPendingIO(); |
| 265 | }; |
| 266 | sk_sp<GrSurface> surface(fFreePool.findAndRemove(key, filter)); |
Robert Phillips | 57aa367 | 2017-07-21 11:38:13 -0400 | [diff] [blame] | 267 | if (surface) { |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 268 | if (SkBudgeted::kYes == proxy->isBudgeted() && |
Brian Salomon | fa2ebea | 2019-01-24 15:58:58 -0500 | [diff] [blame] | 269 | GrBudgetedType::kBudgeted != surface->resourcePriv().budgetedType()) { |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 270 | // This gets the job done but isn't quite correct. It would be better to try to |
Brian Salomon | fa2ebea | 2019-01-24 15:58:58 -0500 | [diff] [blame] | 271 | // match budgeted proxies w/ budgeted surfaces and unbudgeted w/ unbudgeted. |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 272 | surface->resourcePriv().makeBudgeted(); |
| 273 | } |
| 274 | |
Robert Phillips | 01a9128 | 2018-07-26 08:03:04 -0400 | [diff] [blame] | 275 | if (!GrSurfaceProxyPriv::AttachStencilIfNeeded(fResourceProvider, surface.get(), |
| 276 | needsStencil)) { |
| 277 | return nullptr; |
| 278 | } |
Robert Phillips | 0790f8a | 2018-09-18 13:11:03 -0400 | [diff] [blame] | 279 | SkASSERT(!surface->getUniqueKey().isValid()); |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 280 | return surface; |
Robert Phillips | 57aa367 | 2017-07-21 11:38:13 -0400 | [diff] [blame] | 281 | } |
| 282 | |
| 283 | // Failing that, try to grab a new one from the resource cache |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 284 | return proxy->priv().createSurface(fResourceProvider); |
| 285 | } |
| 286 | |
| 287 | // Remove any intervals that end before the current index. Return their GrSurfaces |
| 288 | // to the free pool. |
| 289 | void GrResourceAllocator::expire(unsigned int curIndex) { |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 290 | while (!fActiveIntvls.empty() && fActiveIntvls.peekHead()->end() < curIndex) { |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 291 | Interval* temp = fActiveIntvls.popHead(); |
Robert Phillips | df25e3a | 2018-08-08 12:48:40 -0400 | [diff] [blame] | 292 | SkASSERT(!temp->next()); |
Robert Phillips | 5b65a84 | 2017-11-13 15:48:12 -0500 | [diff] [blame] | 293 | |
| 294 | if (temp->wasAssignedSurface()) { |
Robert Phillips | 715d08c | 2018-07-18 13:56:48 -0400 | [diff] [blame] | 295 | sk_sp<GrSurface> surface = temp->detachSurface(); |
| 296 | |
| 297 | // If the proxy has an actual live ref on it that means someone wants to retain its |
| 298 | // contents. In that case we cannot recycle it (until the external holder lets |
| 299 | // go of it). |
| 300 | if (0 == temp->proxy()->priv().getProxyRefCnt()) { |
| 301 | this->recycleSurface(std::move(surface)); |
| 302 | } |
Robert Phillips | 5b65a84 | 2017-11-13 15:48:12 -0500 | [diff] [blame] | 303 | } |
Robert Phillips | 8186cbe | 2017-11-01 17:32:39 -0400 | [diff] [blame] | 304 | |
| 305 | // Add temp to the free interval list so it can be reused |
Robert Phillips | 715d08c | 2018-07-18 13:56:48 -0400 | [diff] [blame] | 306 | SkASSERT(!temp->wasAssignedSurface()); // it had better not have a ref on a surface |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 307 | temp->setNext(fFreeIntervalList); |
Robert Phillips | 8186cbe | 2017-11-01 17:32:39 -0400 | [diff] [blame] | 308 | fFreeIntervalList = temp; |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 309 | } |
| 310 | } |
| 311 | |
Brian Salomon | 577aa0f | 2018-11-30 13:32:23 -0500 | [diff] [blame] | 312 | bool GrResourceAllocator::assign(int* startIndex, int* stopIndex, AssignError* outError) { |
Greg Daniel | aa3dfbe | 2018-01-29 10:34:25 -0500 | [diff] [blame] | 313 | SkASSERT(outError); |
| 314 | *outError = AssignError::kNoError; |
| 315 | |
Robert Phillips | eafd48a | 2017-11-16 07:52:08 -0500 | [diff] [blame] | 316 | fIntvlHash.reset(); // we don't need the interval hash anymore |
| 317 | if (fIntvlList.empty()) { |
| 318 | return false; // nothing to render |
| 319 | } |
| 320 | |
| 321 | *startIndex = fCurOpListIndex; |
| 322 | *stopIndex = fEndOfOpListOpIndices.count(); |
| 323 | |
Robert Phillips | 4150eea | 2018-02-07 17:08:21 -0500 | [diff] [blame] | 324 | if (!fResourceProvider->explicitlyAllocateGPUResources()) { |
| 325 | fIntvlList.detachAll(); // arena allocator will clean these up for us |
| 326 | return true; |
| 327 | } |
| 328 | |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 329 | SkDEBUGCODE(fAssigned = true;) |
| 330 | |
Robert Phillips | 715d08c | 2018-07-18 13:56:48 -0400 | [diff] [blame] | 331 | #if GR_ALLOCATION_SPEW |
| 332 | this->dumpIntervals(); |
| 333 | #endif |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 334 | while (Interval* cur = fIntvlList.popHead()) { |
Robert Phillips | eafd48a | 2017-11-16 07:52:08 -0500 | [diff] [blame] | 335 | if (fEndOfOpListOpIndices[fCurOpListIndex] < cur->start()) { |
| 336 | fCurOpListIndex++; |
| 337 | } |
| 338 | |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 339 | this->expire(cur->start()); |
Robert Phillips | 57aa367 | 2017-07-21 11:38:13 -0400 | [diff] [blame] | 340 | |
Robert Phillips | eafd48a | 2017-11-16 07:52:08 -0500 | [diff] [blame] | 341 | bool needsStencil = cur->proxy()->asRenderTargetProxy() |
| 342 | ? cur->proxy()->asRenderTargetProxy()->needsStencil() |
| 343 | : false; |
| 344 | |
Brian Salomon | fd98c2c | 2018-07-31 17:25:29 -0400 | [diff] [blame] | 345 | if (cur->proxy()->isInstantiated()) { |
| 346 | if (!GrSurfaceProxyPriv::AttachStencilIfNeeded( |
| 347 | fResourceProvider, cur->proxy()->peekSurface(), needsStencil)) { |
Robert Phillips | 01a9128 | 2018-07-26 08:03:04 -0400 | [diff] [blame] | 348 | *outError = AssignError::kFailedProxyInstantiation; |
| 349 | } |
Robert Phillips | eafd48a | 2017-11-16 07:52:08 -0500 | [diff] [blame] | 350 | |
Robert Phillips | 57aa367 | 2017-07-21 11:38:13 -0400 | [diff] [blame] | 351 | fActiveIntvls.insertByIncreasingEnd(cur); |
Robert Phillips | eafd48a | 2017-11-16 07:52:08 -0500 | [diff] [blame] | 352 | |
| 353 | if (fResourceProvider->overBudget()) { |
| 354 | // Only force intermediate draws on opList boundaries |
| 355 | if (!fIntvlList.empty() && |
| 356 | fEndOfOpListOpIndices[fCurOpListIndex] < fIntvlList.peekHead()->start()) { |
| 357 | *stopIndex = fCurOpListIndex+1; |
Robert Phillips | 1734dd3 | 2018-08-21 13:52:09 -0400 | [diff] [blame] | 358 | |
| 359 | // This is interrupting the allocation of resources for this flush. We need to |
| 360 | // proactively clear the active interval list of any intervals that aren't |
| 361 | // guaranteed to survive the partial flush lest they become zombies (i.e., |
| 362 | // holding a deleted surface proxy). |
| 363 | if (const Interval* tmp = fIntvlList.peekHead()) { |
| 364 | this->expire(tmp->start()); |
| 365 | } else { |
| 366 | this->expire(std::numeric_limits<unsigned int>::max()); |
| 367 | } |
Robert Phillips | eafd48a | 2017-11-16 07:52:08 -0500 | [diff] [blame] | 368 | return true; |
| 369 | } |
| 370 | } |
| 371 | |
Robert Phillips | 57aa367 | 2017-07-21 11:38:13 -0400 | [diff] [blame] | 372 | continue; |
| 373 | } |
| 374 | |
Greg Daniel | 65fa8ca | 2018-01-10 17:06:31 -0500 | [diff] [blame] | 375 | if (GrSurfaceProxy::LazyState::kNot != cur->proxy()->lazyInstantiationState()) { |
Greg Daniel | aa3dfbe | 2018-01-29 10:34:25 -0500 | [diff] [blame] | 376 | if (!cur->proxy()->priv().doLazyInstantiation(fResourceProvider)) { |
| 377 | *outError = AssignError::kFailedProxyInstantiation; |
Greg Daniel | 4684f82 | 2018-03-08 15:27:36 -0500 | [diff] [blame] | 378 | } else { |
Brian Salomon | 967df20 | 2018-12-07 11:15:53 -0500 | [diff] [blame] | 379 | if (GrSurfaceProxy::LazyInstantiationType::kDeinstantiate == |
Greg Daniel | 4684f82 | 2018-03-08 15:27:36 -0500 | [diff] [blame] | 380 | cur->proxy()->priv().lazyInstantiationType()) { |
Brian Salomon | 967df20 | 2018-12-07 11:15:53 -0500 | [diff] [blame] | 381 | fDeinstantiateTracker->addProxy(cur->proxy()); |
Greg Daniel | 4684f82 | 2018-03-08 15:27:36 -0500 | [diff] [blame] | 382 | } |
Greg Daniel | aa3dfbe | 2018-01-29 10:34:25 -0500 | [diff] [blame] | 383 | } |
Chris Dalton | 706a6ff | 2017-11-29 22:01:06 -0700 | [diff] [blame] | 384 | } else if (sk_sp<GrSurface> surface = this->findSurfaceFor(cur->proxy(), needsStencil)) { |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 385 | // TODO: make getUniqueKey virtual on GrSurfaceProxy |
Robert Phillips | 0790f8a | 2018-09-18 13:11:03 -0400 | [diff] [blame] | 386 | GrTextureProxy* texProxy = cur->proxy()->asTextureProxy(); |
| 387 | |
| 388 | if (texProxy && texProxy->getUniqueKey().isValid()) { |
| 389 | if (!surface->getUniqueKey().isValid()) { |
| 390 | fResourceProvider->assignUniqueKeyToResource(texProxy->getUniqueKey(), |
| 391 | surface.get()); |
| 392 | } |
| 393 | SkASSERT(surface->getUniqueKey() == texProxy->getUniqueKey()); |
Robert Phillips | f8e2502 | 2017-11-08 15:24:31 -0500 | [diff] [blame] | 394 | } |
| 395 | |
Robert Phillips | 715d08c | 2018-07-18 13:56:48 -0400 | [diff] [blame] | 396 | #if GR_ALLOCATION_SPEW |
| 397 | SkDebugf("Assigning %d to %d\n", |
| 398 | surface->uniqueID().asUInt(), |
| 399 | cur->proxy()->uniqueID().asUInt()); |
| 400 | #endif |
| 401 | |
Robert Phillips | 5b65a84 | 2017-11-13 15:48:12 -0500 | [diff] [blame] | 402 | cur->assign(std::move(surface)); |
Greg Daniel | aa3dfbe | 2018-01-29 10:34:25 -0500 | [diff] [blame] | 403 | } else { |
Brian Salomon | fd98c2c | 2018-07-31 17:25:29 -0400 | [diff] [blame] | 404 | SkASSERT(!cur->proxy()->isInstantiated()); |
Greg Daniel | aa3dfbe | 2018-01-29 10:34:25 -0500 | [diff] [blame] | 405 | *outError = AssignError::kFailedProxyInstantiation; |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 406 | } |
Robert Phillips | eafd48a | 2017-11-16 07:52:08 -0500 | [diff] [blame] | 407 | |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 408 | fActiveIntvls.insertByIncreasingEnd(cur); |
Robert Phillips | eafd48a | 2017-11-16 07:52:08 -0500 | [diff] [blame] | 409 | |
| 410 | if (fResourceProvider->overBudget()) { |
| 411 | // Only force intermediate draws on opList boundaries |
| 412 | if (!fIntvlList.empty() && |
| 413 | fEndOfOpListOpIndices[fCurOpListIndex] < fIntvlList.peekHead()->start()) { |
| 414 | *stopIndex = fCurOpListIndex+1; |
Robert Phillips | 1734dd3 | 2018-08-21 13:52:09 -0400 | [diff] [blame] | 415 | |
| 416 | // This is interrupting the allocation of resources for this flush. We need to |
| 417 | // proactively clear the active interval list of any intervals that aren't |
| 418 | // guaranteed to survive the partial flush lest they become zombies (i.e., |
| 419 | // holding a deleted surface proxy). |
| 420 | if (const Interval* tmp = fIntvlList.peekHead()) { |
| 421 | this->expire(tmp->start()); |
| 422 | } else { |
| 423 | this->expire(std::numeric_limits<unsigned int>::max()); |
| 424 | } |
Robert Phillips | eafd48a | 2017-11-16 07:52:08 -0500 | [diff] [blame] | 425 | return true; |
| 426 | } |
| 427 | } |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 428 | } |
Robert Phillips | 5b65a84 | 2017-11-13 15:48:12 -0500 | [diff] [blame] | 429 | |
| 430 | // expire all the remaining intervals to drain the active interval list |
| 431 | this->expire(std::numeric_limits<unsigned int>::max()); |
Robert Phillips | eafd48a | 2017-11-16 07:52:08 -0500 | [diff] [blame] | 432 | return true; |
Robert Phillips | 5af44de | 2017-07-18 14:49:38 -0400 | [diff] [blame] | 433 | } |
Robert Phillips | 715d08c | 2018-07-18 13:56:48 -0400 | [diff] [blame] | 434 | |
| 435 | #if GR_ALLOCATION_SPEW |
| 436 | void GrResourceAllocator::dumpIntervals() { |
| 437 | |
| 438 | // Print all the intervals while computing their range |
| 439 | unsigned int min = fNumOps+1; |
| 440 | unsigned int max = 0; |
| 441 | for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) { |
| 442 | SkDebugf("{ %3d,%3d }: [%2d, %2d] - proxyRefs:%d surfaceRefs:%d R:%d W:%d\n", |
| 443 | cur->proxy()->uniqueID().asUInt(), |
Brian Salomon | fd98c2c | 2018-07-31 17:25:29 -0400 | [diff] [blame] | 444 | cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1, |
Robert Phillips | 715d08c | 2018-07-18 13:56:48 -0400 | [diff] [blame] | 445 | cur->start(), |
| 446 | cur->end(), |
| 447 | cur->proxy()->priv().getProxyRefCnt(), |
| 448 | cur->proxy()->getBackingRefCnt_TestOnly(), |
| 449 | cur->proxy()->getPendingReadCnt_TestOnly(), |
| 450 | cur->proxy()->getPendingWriteCnt_TestOnly()); |
| 451 | min = SkTMin(min, cur->start()); |
| 452 | max = SkTMax(max, cur->end()); |
| 453 | } |
| 454 | |
| 455 | // Draw a graph of the useage intervals |
| 456 | for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) { |
| 457 | SkDebugf("{ %3d,%3d }: ", |
| 458 | cur->proxy()->uniqueID().asUInt(), |
Brian Salomon | fd98c2c | 2018-07-31 17:25:29 -0400 | [diff] [blame] | 459 | cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1); |
Robert Phillips | 715d08c | 2018-07-18 13:56:48 -0400 | [diff] [blame] | 460 | for (unsigned int i = min; i <= max; ++i) { |
| 461 | if (i >= cur->start() && i <= cur->end()) { |
| 462 | SkDebugf("x"); |
| 463 | } else { |
| 464 | SkDebugf(" "); |
| 465 | } |
| 466 | } |
| 467 | SkDebugf("\n"); |
| 468 | } |
| 469 | } |
| 470 | #endif |