Track the number of resources that would become purgeable after flush
in GrResourceCache.
Bug: skia:8927
Change-Id: Ia00ba0ea541a22e29e9a8208044e1fabd5296782
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/205484
Reviewed-by: Robert Phillips <robertphillips@google.com>
Commit-Queue: Brian Salomon <bsalomon@google.com>
diff --git a/include/gpu/GrGpuResource.h b/include/gpu/GrGpuResource.h
index 3a165d3..948d17d 100644
--- a/include/gpu/GrGpuResource.h
+++ b/include/gpu/GrGpuResource.h
@@ -308,6 +308,7 @@
private:
bool isPurgeable() const;
+ bool hasRef() const;
bool hasRefOrPendingIO() const;
/**
@@ -380,7 +381,7 @@
ProxyAccess(GrGpuResource* resource) : fResource(resource) {}
/** Proxies are allowed to take a resource from no refs to one ref. */
- void ref() { fResource->addInitialRef(); }
+ void ref(GrResourceCache* cache);
// No taking addresses of this type.
const CacheAccess* operator&() const = delete;
diff --git a/include/private/GrSurfaceProxy.h b/include/private/GrSurfaceProxy.h
index 57dfaed..aeb48f2 100644
--- a/include/private/GrSurfaceProxy.h
+++ b/include/private/GrSurfaceProxy.h
@@ -152,11 +152,11 @@
}
// Privileged method that allows going from ref count = 0 to ref count = 1.
- void addInitialRef() const {
+ void addInitialRef(GrResourceCache* cache) const {
this->validate();
++fRefCnt;
if (fTarget) {
- fTarget->proxyAccess().ref();
+ fTarget->proxyAccess().ref(cache);
}
}
@@ -165,11 +165,22 @@
// refs & unrefs to the new GrSurface
void transferRefs() {
SkASSERT(fTarget);
+ // Make sure we're going to take some ownership of our target.
+ SkASSERT(fRefCnt > 0 || fPendingReads > 0 || fPendingWrites > 0);
- SkASSERT(fTarget->fRefCnt > 0);
- fTarget->fRefCnt += (fRefCnt-1); // don't xfer the proxy's creation ref
+ // Transfer pending read/writes first so that if we decrement the target's ref cnt we don't
+ // cause a purge of the target.
fTarget->fPendingReads += fPendingReads;
fTarget->fPendingWrites += fPendingWrites;
+ SkASSERT(fTarget->fRefCnt > 0);
+ SkASSERT(fRefCnt >= 0);
+ // Don't xfer the proxy's creation ref. If we're going to subtract a ref do it via unref()
+ // so that proper cache notifications occur.
+ if (!fRefCnt) {
+ fTarget->unref();
+ } else {
+ fTarget->fRefCnt += (fRefCnt - 1);
+ }
}
int32_t internalGetProxyRefCnt() const {
@@ -585,7 +596,7 @@
class GrSurfaceProxy::FirstRefAccess {
private:
- void ref() { fProxy->addInitialRef(); }
+ void ref(GrResourceCache* cache) { fProxy->addInitialRef(cache); }
FirstRefAccess(GrSurfaceProxy* proxy) : fProxy(proxy) {}
diff --git a/src/gpu/GrDeinstantiateProxyTracker.cpp b/src/gpu/GrDeinstantiateProxyTracker.cpp
index 4e65133..06d1e93 100644
--- a/src/gpu/GrDeinstantiateProxyTracker.cpp
+++ b/src/gpu/GrDeinstantiateProxyTracker.cpp
@@ -18,7 +18,7 @@
SkASSERT(proxy != fProxies[i].get());
}
#endif
- proxy->firstRefAccess().ref();
+ proxy->firstRefAccess().ref(fCache);
fProxies.push_back(sk_sp<GrSurfaceProxy>(proxy));
}
diff --git a/src/gpu/GrDeinstantiateProxyTracker.h b/src/gpu/GrDeinstantiateProxyTracker.h
index 2555ab1..f144fc2 100644
--- a/src/gpu/GrDeinstantiateProxyTracker.h
+++ b/src/gpu/GrDeinstantiateProxyTracker.h
@@ -11,9 +11,11 @@
#include "GrSurfaceProxy.h"
#include "SkTArray.h"
+class GrResourceCache;
+
class GrDeinstantiateProxyTracker {
public:
- GrDeinstantiateProxyTracker() {}
+ GrDeinstantiateProxyTracker(GrResourceCache* cache) : fCache(cache) {}
// Adds a proxy which will be deinstantiated at the end of flush. The same proxy may not be
// added multiple times.
@@ -23,6 +25,7 @@
void deinstantiateAllProxies();
private:
+ GrResourceCache* fCache;
SkTArray<sk_sp<GrSurfaceProxy>> fProxies;
};
diff --git a/src/gpu/GrDrawingManager.cpp b/src/gpu/GrDrawingManager.cpp
index bf17d6c..8de898d 100644
--- a/src/gpu/GrDrawingManager.cpp
+++ b/src/gpu/GrDrawingManager.cpp
@@ -238,7 +238,8 @@
fCpuBufferCache = GrBufferAllocPool::CpuBufferCache::Make(maxCachedBuffers);
}
- GrOpFlushState flushState(gpu, resourceProvider, &fTokenTracker, fCpuBufferCache);
+ GrOpFlushState flushState(gpu, resourceProvider, resourceCache, &fTokenTracker,
+ fCpuBufferCache);
GrOnFlushResourceProvider onFlushProvider(this);
// TODO: AFAICT the only reason fFlushState is on GrDrawingManager rather than on the
diff --git a/src/gpu/GrGpuResource.cpp b/src/gpu/GrGpuResource.cpp
index 6dc0dfa..3d2c197 100644
--- a/src/gpu/GrGpuResource.cpp
+++ b/src/gpu/GrGpuResource.cpp
@@ -104,6 +104,8 @@
return this->internalHasRef() || this->internalHasPendingIO();
}
+bool GrGpuResource::hasRef() const { return this->internalHasRef(); }
+
SkString GrGpuResource::getResourceName() const {
// Dump resource as "skia/gpu_resources/resource_#".
SkString resourceName("skia/gpu_resources/resource_");
@@ -229,3 +231,10 @@
} while (id == SK_InvalidUniqueID);
return id;
}
+
+//////////////////////////////////////////////////////////////////////////////
+
+void GrGpuResource::ProxyAccess::ref(GrResourceCache* cache) {
+ SkASSERT(cache == fResource->getContext()->priv().getResourceCache());
+ cache->resourceAccess().refResource(fResource);
+}
diff --git a/src/gpu/GrGpuResourceCacheAccess.h b/src/gpu/GrGpuResourceCacheAccess.h
index dd01b72..0e1ace0 100644
--- a/src/gpu/GrGpuResourceCacheAccess.h
+++ b/src/gpu/GrGpuResourceCacheAccess.h
@@ -55,6 +55,9 @@
/** Called by the cache to assign a new unique key. */
void setUniqueKey(const GrUniqueKey& key) { fResource->fUniqueKey = key; }
+ /** Is the resource ref'ed (not counting pending IOs). */
+ bool hasRef() const { return fResource->hasRef(); }
+
/** Called by the cache to make the unique key invalid. */
void removeUniqueKey() { fResource->fUniqueKey.reset(); }
diff --git a/src/gpu/GrOpFlushState.cpp b/src/gpu/GrOpFlushState.cpp
index 7a6aa18..3b59782 100644
--- a/src/gpu/GrOpFlushState.cpp
+++ b/src/gpu/GrOpFlushState.cpp
@@ -16,13 +16,14 @@
//////////////////////////////////////////////////////////////////////////////
GrOpFlushState::GrOpFlushState(GrGpu* gpu, GrResourceProvider* resourceProvider,
- GrTokenTracker* tokenTracker,
+ GrResourceCache* cache, GrTokenTracker* tokenTracker,
sk_sp<GrBufferAllocPool::CpuBufferCache> cpuBufferCache)
: fVertexPool(gpu, cpuBufferCache)
, fIndexPool(gpu, std::move(cpuBufferCache))
, fGpu(gpu)
, fResourceProvider(resourceProvider)
- , fTokenTracker(tokenTracker) {}
+ , fTokenTracker(tokenTracker)
+ , fDeinstantiateProxyTracker(cache) {}
const GrCaps& GrOpFlushState::caps() const {
return *fGpu->caps();
diff --git a/src/gpu/GrOpFlushState.h b/src/gpu/GrOpFlushState.h
index 07983d4..3016061 100644
--- a/src/gpu/GrOpFlushState.h
+++ b/src/gpu/GrOpFlushState.h
@@ -29,7 +29,7 @@
// vertexSpace and indexSpace may either be null or an alloation of size
// GrBufferAllocPool::kDefaultBufferSize. If the latter, then CPU memory is only allocated for
// vertices/indices when a buffer larger than kDefaultBufferSize is required.
- GrOpFlushState(GrGpu*, GrResourceProvider*, GrTokenTracker*,
+ GrOpFlushState(GrGpu*, GrResourceProvider*, GrResourceCache*, GrTokenTracker*,
sk_sp<GrBufferAllocPool::CpuBufferCache> = nullptr);
~GrOpFlushState() final { this->reset(); }
diff --git a/src/gpu/GrProxyProvider.cpp b/src/gpu/GrProxyProvider.cpp
index c831471..f33755d 100644
--- a/src/gpu/GrProxyProvider.cpp
+++ b/src/gpu/GrProxyProvider.cpp
@@ -105,7 +105,11 @@
GrTextureProxy* proxy = fUniquelyKeyedProxies.find(key);
sk_sp<GrTextureProxy> result;
if (proxy) {
- proxy->firstRefAccess().ref();
+ GrResourceCache* cache = nullptr;
+ if (auto directContext = fImageContext->priv().asDirectContext()) {
+ cache = directContext->priv().getResourceCache();
+ }
+ proxy->firstRefAccess().ref(cache);
result.reset(proxy);
SkASSERT(result->origin() == origin);
}
diff --git a/src/gpu/GrResourceCache.cpp b/src/gpu/GrResourceCache.cpp
index 8e91969..3347797 100644
--- a/src/gpu/GrResourceCache.cpp
+++ b/src/gpu/GrResourceCache.cpp
@@ -8,6 +8,8 @@
#include "GrResourceCache.h"
#include <atomic>
#include "GrCaps.h"
+#include "GrContext.h"
+#include "GrContextPriv.h"
#include "GrGpuResourceCacheAccess.h"
#include "GrProxyProvider.h"
#include "GrSingleOwner.h"
@@ -109,28 +111,12 @@
GrResourceCache::GrResourceCache(const GrCaps* caps, GrSingleOwner* singleOwner,
uint32_t contextUniqueID)
- : fProxyProvider(nullptr)
- , fTimestamp(0)
- , fMaxCount(kDefaultMaxCount)
- , fMaxBytes(kDefaultMaxSize)
-#if GR_CACHE_STATS
- , fHighWaterCount(0)
- , fHighWaterBytes(0)
- , fBudgetedHighWaterCount(0)
- , fBudgetedHighWaterBytes(0)
-#endif
- , fBytes(0)
- , fBudgetedCount(0)
- , fBudgetedBytes(0)
- , fPurgeableBytes(0)
- , fInvalidUniqueKeyInbox(contextUniqueID)
+ : fInvalidUniqueKeyInbox(contextUniqueID)
, fFreedGpuResourceInbox(contextUniqueID)
, fContextUniqueID(contextUniqueID)
, fSingleOwner(singleOwner)
, fPreferVRAMUseOverFlushes(caps->preferVRAMUseOverFlushes()) {
SkASSERT(contextUniqueID != SK_InvalidUniqueID);
- SkDEBUGCODE(fCount = 0;)
- SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr;)
}
GrResourceCache::~GrResourceCache() {
@@ -281,6 +267,17 @@
SkASSERT(!fResourcesAwaitingUnref.count());
}
+void GrResourceCache::refResource(GrGpuResource* resource) {
+ SkASSERT(resource);
+ SkASSERT(resource->getContext()->priv().getResourceCache() == this);
+ if (resource->cacheAccess().hasRef()) {
+ resource->ref();
+ } else {
+ this->refAndMakeResourceMRU(resource);
+ }
+ this->validate();
+}
+
class GrResourceCache::AvailableForScratchUse {
public:
AvailableForScratchUse(bool rejectPendingIO) : fRejectPendingIO(rejectPendingIO) { }
@@ -409,6 +406,10 @@
fPurgeableBytes -= resource->gpuMemorySize();
fPurgeableQueue.remove(resource);
this->addToNonpurgeableArray(resource);
+ } else if (!resource->cacheAccess().hasRef() &&
+ resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
+ SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable > 0);
+ fNumBudgetedResourcesFlushWillMakePurgeable--;
}
resource->cacheAccess().ref();
@@ -437,6 +438,19 @@
#endif
resource->cacheAccess().setTimestamp(this->getNextTimestamp());
SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr);
+ if (!resource->resourcePriv().isPurgeable() &&
+ resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
+ SkASSERT(resource->resourcePriv().hasPendingIO_debugOnly());
+ ++fNumBudgetedResourcesFlushWillMakePurgeable;
+ }
+ } else {
+ // If this is budgeted and just became purgeable by dropping the last pending IO
+ // then it clearly no longer needs a flush to become purgeable.
+ if (resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted &&
+ resource->resourcePriv().isPurgeable()) {
+ SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable > 0);
+ fNumBudgetedResourcesFlushWillMakePurgeable--;
+ }
}
if (!SkToBool(ResourceAccess::kAllCntsReachedZero_RefNotificationFlag & flags)) {
@@ -508,11 +522,17 @@
fBudgetedHighWaterBytes = SkTMax(fBudgetedBytes, fBudgetedHighWaterBytes);
fBudgetedHighWaterCount = SkTMax(fBudgetedCount, fBudgetedHighWaterCount);
#endif
+ if (!resource->resourcePriv().isPurgeable() && !resource->cacheAccess().hasRef()) {
+ ++fNumBudgetedResourcesFlushWillMakePurgeable;
+ }
this->purgeAsNeeded();
} else {
SkASSERT(resource->resourcePriv().budgetedType() != GrBudgetedType::kUnbudgetedCacheable);
--fBudgetedCount;
fBudgetedBytes -= size;
+ if (!resource->resourcePriv().isPurgeable() && !resource->cacheAccess().hasRef()) {
+ --fNumBudgetedResourcesFlushWillMakePurgeable;
+ }
}
SkASSERT(wasPurgeable == resource->resourcePriv().isPurgeable());
TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
@@ -891,12 +911,19 @@
Stats stats(this);
size_t purgeableBytes = 0;
+ int numBudgetedResourcesFlushWillMakePurgeable = 0;
for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
SkASSERT(!fNonpurgeableResources[i]->resourcePriv().isPurgeable() ||
fNewlyPurgeableResourceForValidation == fNonpurgeableResources[i]);
SkASSERT(*fNonpurgeableResources[i]->cacheAccess().accessCacheIndex() == i);
SkASSERT(!fNonpurgeableResources[i]->wasDestroyed());
+ if (fNonpurgeableResources[i]->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted &&
+ !fNonpurgeableResources[i]->cacheAccess().hasRef() &&
+ fNewlyPurgeableResourceForValidation != fNonpurgeableResources[i]) {
+ SkASSERT(fNonpurgeableResources[i]->resourcePriv().hasPendingIO_debugOnly());
+ ++numBudgetedResourcesFlushWillMakePurgeable;
+ }
stats.update(fNonpurgeableResources[i]);
}
for (int i = 0; i < fPurgeableQueue.count(); ++i) {
@@ -911,6 +938,8 @@
SkASSERT(fBudgetedCount <= fCount);
SkASSERT(fBudgetedBytes <= fBytes);
SkASSERT(stats.fBytes == fBytes);
+ SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable ==
+ numBudgetedResourcesFlushWillMakePurgeable);
SkASSERT(stats.fBudgetedBytes == fBudgetedBytes);
SkASSERT(stats.fBudgetedCount == fBudgetedCount);
SkASSERT(purgeableBytes == fPurgeableBytes);
diff --git a/src/gpu/GrResourceCache.h b/src/gpu/GrResourceCache.h
index 84378e6..ffaa6f3 100644
--- a/src/gpu/GrResourceCache.h
+++ b/src/gpu/GrResourceCache.h
@@ -265,9 +265,10 @@
void removeUniqueKey(GrGpuResource*);
void willRemoveScratchKey(const GrGpuResource*);
void didChangeBudgetStatus(GrGpuResource*);
- void refAndMakeResourceMRU(GrGpuResource*);
+ void refResource(GrGpuResource* resource);
/// @}
+ void refAndMakeResourceMRU(GrGpuResource*);
void processFreedGpuResources();
void addToNonpurgeableArray(GrGpuResource*);
void removeFromNonpurgeableArray(GrGpuResource*);
@@ -338,11 +339,11 @@
typedef SkTDPQueue<GrGpuResource*, CompareTimestamp, AccessResourceIndex> PurgeableQueue;
typedef SkTDArray<GrGpuResource*> ResourceArray;
- GrProxyProvider* fProxyProvider;
+ GrProxyProvider* fProxyProvider = nullptr;
// Whenever a resource is added to the cache or the result of a cache lookup, fTimestamp is
// assigned as the resource's timestamp and then incremented. fPurgeableQueue orders the
// purgeable resources by this value, and thus is used to purge resources in LRU order.
- uint32_t fTimestamp;
+ uint32_t fTimestamp = 0;
PurgeableQueue fPurgeableQueue;
ResourceArray fNonpurgeableResources;
@@ -352,37 +353,38 @@
UniqueHash fUniqueHash;
// our budget, used in purgeAsNeeded()
- int fMaxCount;
- size_t fMaxBytes;
+ int fMaxCount = kDefaultMaxCount;
+ size_t fMaxBytes = kDefaultMaxSize;
#if GR_CACHE_STATS
- int fHighWaterCount;
- size_t fHighWaterBytes;
- int fBudgetedHighWaterCount;
- size_t fBudgetedHighWaterBytes;
+ int fHighWaterCount = 0;
+ size_t fHighWaterBytes = 0;
+ int fBudgetedHighWaterCount = 0;
+ size_t fBudgetedHighWaterBytes = 0;
#endif
// our current stats for all resources
- SkDEBUGCODE(int fCount;)
- size_t fBytes;
+ SkDEBUGCODE(int fCount = 0;)
+ size_t fBytes = 0;
// our current stats for resources that count against the budget
- int fBudgetedCount;
- size_t fBudgetedBytes;
- size_t fPurgeableBytes;
+ int fBudgetedCount = 0;
+ size_t fBudgetedBytes = 0;
+ size_t fPurgeableBytes = 0;
+ int fNumBudgetedResourcesFlushWillMakePurgeable = 0;
InvalidUniqueKeyInbox fInvalidUniqueKeyInbox;
FreedGpuResourceInbox fFreedGpuResourceInbox;
ReourcesAwaitingUnref fResourcesAwaitingUnref;
- uint32_t fContextUniqueID;
- GrSingleOwner* fSingleOwner;
+ uint32_t fContextUniqueID = SK_InvalidUniqueID;
+ GrSingleOwner* fSingleOwner = nullptr;
// This resource is allowed to be in the nonpurgeable array for the sake of validate() because
// we're in the midst of converting it to purgeable status.
- SkDEBUGCODE(GrGpuResource* fNewlyPurgeableResourceForValidation;)
+ SkDEBUGCODE(GrGpuResource* fNewlyPurgeableResourceForValidation = nullptr;)
- bool fPreferVRAMUseOverFlushes;
+ bool fPreferVRAMUseOverFlushes = false;
};
GR_MAKE_BITFIELD_CLASS_OPS(GrResourceCache::ScratchFlags);
@@ -404,6 +406,12 @@
void removeResource(GrGpuResource* resource) { fCache->removeResource(resource); }
/**
+ * Adds a ref to a resource with proper tracking if the resource has 0 refs prior to
+ * adding the ref.
+ */
+ void refResource(GrGpuResource* resource) { fCache->refResource(resource); }
+
+ /**
* Notifications that should be sent to the cache when the ref/io cnt status of resources
* changes.
*/
diff --git a/tests/DrawOpAtlasTest.cpp b/tests/DrawOpAtlasTest.cpp
index 91d57ec..dc6cd2a 100644
--- a/tests/DrawOpAtlasTest.cpp
+++ b/tests/DrawOpAtlasTest.cpp
@@ -184,6 +184,7 @@
auto gpu = context->priv().getGpu();
auto resourceProvider = context->priv().resourceProvider();
+ auto resourceCache = context->priv().getResourceCache();
auto drawingManager = context->priv().drawingManager();
auto textContext = drawingManager->getTextContext();
auto opMemoryPool = context->priv().opMemoryPool();
@@ -211,7 +212,8 @@
TestingUploadTarget uploadTarget;
- GrOpFlushState flushState(gpu, resourceProvider, uploadTarget.writeableTokenTracker());
+ GrOpFlushState flushState(gpu, resourceProvider, resourceCache,
+ uploadTarget.writeableTokenTracker());
GrOpFlushState::OpArgs opArgs = {
op.get(),
rtc->asRenderTargetProxy(),
diff --git a/tests/OpChainTest.cpp b/tests/OpChainTest.cpp
index 092ccf2..27c7dbe 100644
--- a/tests/OpChainTest.cpp
+++ b/tests/OpChainTest.cpp
@@ -203,7 +203,9 @@
init_combinable(g, &combinable, &random);
GrTokenTracker tracker;
GrOpFlushState flushState(context->priv().getGpu(),
- context->priv().resourceProvider(), &tracker);
+ context->priv().resourceProvider(),
+ context->priv().getResourceCache(),
+ &tracker);
GrRenderTargetOpList opList(context->priv().resourceProvider(),
sk_ref_sp(context->priv().opMemoryPool()),
proxy->asRenderTargetProxy(),
diff --git a/tests/ResourceAllocatorTest.cpp b/tests/ResourceAllocatorTest.cpp
index e968a51..2e107a1 100644
--- a/tests/ResourceAllocatorTest.cpp
+++ b/tests/ResourceAllocatorTest.cpp
@@ -90,8 +90,9 @@
// Basic test that two proxies with overlapping intervals and compatible descriptors are
// assigned different GrSurfaces.
static void overlap_test(skiatest::Reporter* reporter, GrResourceProvider* resourceProvider,
- GrSurfaceProxy* p1, GrSurfaceProxy* p2, bool expectedResult) {
- GrDeinstantiateProxyTracker deinstantiateTracker;
+ GrResourceCache* resourceCache, GrSurfaceProxy* p1, GrSurfaceProxy* p2,
+ bool expectedResult) {
+ GrDeinstantiateProxyTracker deinstantiateTracker(resourceCache);
GrResourceAllocator alloc(resourceProvider, &deinstantiateTracker SkDEBUGCODE(, 1));
alloc.addInterval(p1, 0, 4);
@@ -114,9 +115,9 @@
// Test various cases when two proxies do not have overlapping intervals.
// This mainly acts as a test of the ResourceAllocator's free pool.
static void non_overlap_test(skiatest::Reporter* reporter, GrResourceProvider* resourceProvider,
- GrSurfaceProxy* p1, GrSurfaceProxy* p2,
+ GrResourceCache* resourceCache, GrSurfaceProxy* p1, GrSurfaceProxy* p2,
bool expectedResult) {
- GrDeinstantiateProxyTracker deinstantiateTracker;
+ GrDeinstantiateProxyTracker deinstantiateTracker(resourceCache);
GrResourceAllocator alloc(resourceProvider, &deinstantiateTracker SkDEBUGCODE(, 1));
alloc.incOps();
@@ -151,6 +152,7 @@
const GrCaps* caps = ctxInfo.grContext()->priv().caps();
GrProxyProvider* proxyProvider = ctxInfo.grContext()->priv().proxyProvider();
GrResourceProvider* resourceProvider = ctxInfo.grContext()->priv().resourceProvider();
+ GrResourceCache* resourceCache = ctxInfo.grContext()->priv().getResourceCache();
bool orig = resourceProvider->testingOnly_setExplicitlyAllocateGPUResources(true);
@@ -192,7 +194,7 @@
for (auto test : gOverlappingTests) {
GrSurfaceProxy* p1 = make_deferred(proxyProvider, caps, test.fP1);
GrSurfaceProxy* p2 = make_deferred(proxyProvider, caps, test.fP2);
- overlap_test(reporter, resourceProvider, p1, p2, test.fExpectation);
+ overlap_test(reporter, resourceProvider, resourceCache, p1, p2, test.fExpectation);
p1->completedRead();
p2->completedRead();
}
@@ -239,7 +241,7 @@
continue; // creation can fail (i.e., for msaa4 on iOS)
}
- non_overlap_test(reporter, resourceProvider, p1, p2, test.fExpectation);
+ non_overlap_test(reporter, resourceProvider, resourceCache, p1, p2, test.fExpectation);
p1->completedRead();
p2->completedRead();
@@ -255,7 +257,7 @@
GrSurfaceProxy* p1 = make_backend(ctxInfo.grContext(), t[0].fP1, &backEndTex);
GrSurfaceProxy* p2 = make_deferred(proxyProvider, caps, t[0].fP2);
- non_overlap_test(reporter, resourceProvider, p1, p2, t[0].fExpectation);
+ non_overlap_test(reporter, resourceProvider, resourceCache, p1, p2, t[0].fExpectation);
p1->completedRead();
p2->completedRead();
@@ -332,6 +334,7 @@
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(LazyDeinstantiation, reporter, ctxInfo) {
GrContext* context = ctxInfo.grContext();
GrResourceProvider* resourceProvider = ctxInfo.grContext()->priv().resourceProvider();
+ GrResourceCache* resourceCache = ctxInfo.grContext()->priv().getResourceCache();
for (auto explicitlyAllocating : {false, true}) {
resourceProvider->testingOnly_setExplicitlyAllocateGPUResources(explicitlyAllocating);
ProxyParams texParams;
@@ -352,7 +355,7 @@
auto p2 = make_lazy(proxyProvider, caps, rtParams, true);
auto p3 = make_lazy(proxyProvider, caps, rtParams, false);
- GrDeinstantiateProxyTracker deinstantiateTracker;
+ GrDeinstantiateProxyTracker deinstantiateTracker(resourceCache);
{
GrResourceAllocator alloc(resourceProvider, &deinstantiateTracker SkDEBUGCODE(, 1));
alloc.addInterval(p0.get(), 0, 1);
@@ -381,6 +384,7 @@
const GrCaps* caps = context->priv().caps();
GrProxyProvider* proxyProvider = context->priv().proxyProvider();
GrResourceProvider* resourceProvider = context->priv().resourceProvider();
+ GrResourceCache* resourceCache = context->priv().getResourceCache();
bool orig = resourceProvider->testingOnly_setExplicitlyAllocateGPUResources(true);
@@ -401,7 +405,7 @@
GrSurfaceProxy* p3 = make_deferred(proxyProvider, caps, params);
GrSurfaceProxy* p4 = make_deferred(proxyProvider, caps, params);
- GrDeinstantiateProxyTracker deinstantiateTracker;
+ GrDeinstantiateProxyTracker deinstantiateTracker(resourceCache);
GrResourceAllocator alloc(resourceProvider, &deinstantiateTracker SkDEBUGCODE(, 2));
alloc.addInterval(p1, 0, 0);