Fix bug in GrResourceAllocator's intermediate flushing (take 2)
Without the missing increment of fCurOpListIndex in the intermediate flush case, the drawing manager could end up re-executing an entire opList.
TBR=bsalomon@google.com
Bug: 942538
Change-Id: I8298c02071e87a343ec03d809959d06049071d93
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/203726
Reviewed-by: Robert Phillips <robertphillips@google.com>
Commit-Queue: Robert Phillips <robertphillips@google.com>
diff --git a/src/gpu/GrDrawingManager.cpp b/src/gpu/GrDrawingManager.cpp
index 10da8f9..ae36031 100644
--- a/src/gpu/GrDrawingManager.cpp
+++ b/src/gpu/GrDrawingManager.cpp
@@ -285,7 +285,8 @@
bool flushed = false;
{
- GrResourceAllocator alloc(resourceProvider, flushState.deinstantiateProxyTracker());
+ GrResourceAllocator alloc(resourceProvider, flushState.deinstantiateProxyTracker()
+ SkDEBUGCODE(, fDAG.numOpLists()));
for (int i = 0; i < fDAG.numOpLists(); ++i) {
if (fDAG.opList(i)) {
fDAG.opList(i)->gatherProxyIntervals(&alloc);
diff --git a/src/gpu/GrResourceAllocator.cpp b/src/gpu/GrResourceAllocator.cpp
index 0d3c34e..f824023 100644
--- a/src/gpu/GrResourceAllocator.cpp
+++ b/src/gpu/GrResourceAllocator.cpp
@@ -47,6 +47,7 @@
}
fEndOfOpListOpIndices.push_back(this->curOp()); // This is the first op index of the next opList
+ SkASSERT(fEndOfOpListOpIndices.count() <= fNumOpLists);
}
GrResourceAllocator::~GrResourceAllocator() {
@@ -309,6 +310,34 @@
}
}
+bool GrResourceAllocator::onOpListBoundary() const {
+ if (fIntvlList.empty()) {
+ SkASSERT(fCurOpListIndex+1 <= fNumOpLists);
+ // Although technically on an opList boundary there is no need to force an
+ // intermediate flush here
+ return false;
+ }
+
+ const Interval* tmp = fIntvlList.peekHead();
+ return fEndOfOpListOpIndices[fCurOpListIndex] <= tmp->start();
+}
+
+void GrResourceAllocator::forceIntermediateFlush(int* stopIndex) {
+ *stopIndex = fCurOpListIndex+1;
+
+ // This is interrupting the allocation of resources for this flush. We need to
+ // proactively clear the active interval list of any intervals that aren't
+ // guaranteed to survive the partial flush lest they become zombies (i.e.,
+ // holding a deleted surface proxy).
+ const Interval* tmp = fIntvlList.peekHead();
+ SkASSERT(fEndOfOpListOpIndices[fCurOpListIndex] <= tmp->start());
+
+ fCurOpListIndex++;
+ SkASSERT(fCurOpListIndex < fNumOpLists);
+
+ this->expire(tmp->start());
+}
+
bool GrResourceAllocator::assign(int* startIndex, int* stopIndex, AssignError* outError) {
SkASSERT(outError);
*outError = AssignError::kNoError;
@@ -318,6 +347,9 @@
return false; // nothing to render
}
+ SkASSERT(fCurOpListIndex < fNumOpLists);
+ SkASSERT(fNumOpLists == fEndOfOpListOpIndices.count());
+
*startIndex = fCurOpListIndex;
*stopIndex = fEndOfOpListOpIndices.count();
@@ -332,8 +364,9 @@
this->dumpIntervals();
#endif
while (Interval* cur = fIntvlList.popHead()) {
- if (fEndOfOpListOpIndices[fCurOpListIndex] < cur->start()) {
+ if (fEndOfOpListOpIndices[fCurOpListIndex] <= cur->start()) {
fCurOpListIndex++;
+ SkASSERT(fCurOpListIndex < fNumOpLists);
}
this->expire(cur->start());
@@ -352,19 +385,8 @@
if (fResourceProvider->overBudget()) {
// Only force intermediate draws on opList boundaries
- if (!fIntvlList.empty() &&
- fEndOfOpListOpIndices[fCurOpListIndex] < fIntvlList.peekHead()->start()) {
- *stopIndex = fCurOpListIndex+1;
-
- // This is interrupting the allocation of resources for this flush. We need to
- // proactively clear the active interval list of any intervals that aren't
- // guaranteed to survive the partial flush lest they become zombies (i.e.,
- // holding a deleted surface proxy).
- if (const Interval* tmp = fIntvlList.peekHead()) {
- this->expire(tmp->start());
- } else {
- this->expire(std::numeric_limits<unsigned int>::max());
- }
+ if (this->onOpListBoundary()) {
+ this->forceIntermediateFlush(stopIndex);
return true;
}
}
@@ -409,19 +431,8 @@
if (fResourceProvider->overBudget()) {
// Only force intermediate draws on opList boundaries
- if (!fIntvlList.empty() &&
- fEndOfOpListOpIndices[fCurOpListIndex] < fIntvlList.peekHead()->start()) {
- *stopIndex = fCurOpListIndex+1;
-
- // This is interrupting the allocation of resources for this flush. We need to
- // proactively clear the active interval list of any intervals that aren't
- // guaranteed to survive the partial flush lest they become zombies (i.e.,
- // holding a deleted surface proxy).
- if (const Interval* tmp = fIntvlList.peekHead()) {
- this->expire(tmp->start());
- } else {
- this->expire(std::numeric_limits<unsigned int>::max());
- }
+ if (this->onOpListBoundary()) {
+ this->forceIntermediateFlush(stopIndex);
return true;
}
}
diff --git a/src/gpu/GrResourceAllocator.h b/src/gpu/GrResourceAllocator.h
index ea1250f..9453d36 100644
--- a/src/gpu/GrResourceAllocator.h
+++ b/src/gpu/GrResourceAllocator.h
@@ -42,8 +42,13 @@
*/
class GrResourceAllocator {
public:
- GrResourceAllocator(GrResourceProvider* resourceProvider, GrDeinstantiateProxyTracker* tracker)
- : fResourceProvider(resourceProvider), fDeinstantiateTracker(tracker) {}
+ GrResourceAllocator(GrResourceProvider* resourceProvider,
+ GrDeinstantiateProxyTracker* tracker
+ SkDEBUGCODE(, int numOpLists))
+ : fResourceProvider(resourceProvider)
+ , fDeinstantiateTracker(tracker)
+ SkDEBUGCODE(, fNumOpLists(numOpLists)) {
+ }
~GrResourceAllocator();
@@ -88,6 +93,9 @@
// Remove dead intervals from the active list
void expire(unsigned int curIndex);
+ bool onOpListBoundary() const;
+ void forceIntermediateFlush(int* stopIndex);
+
// These two methods wrap the interactions with the free pool
void recycleSurface(sk_sp<GrSurface> surface);
sk_sp<GrSurface> findSurfaceFor(const GrSurfaceProxy* proxy, bool needsStencil);
@@ -218,11 +226,11 @@
IntervalList fIntvlList; // All the intervals sorted by increasing start
IntervalList fActiveIntvls; // List of live intervals during assignment
- // (sorted by increasing end)
- unsigned int fNumOps = 1; // op # 0 is reserved for uploads at the start
- // of a flush
+ // (sorted by increasing end)
+ unsigned int fNumOps = 0;
SkTArray<unsigned int> fEndOfOpListOpIndices;
int fCurOpListIndex = 0;
+ SkDEBUGCODE(const int fNumOpLists = -1;)
SkDEBUGCODE(bool fAssigned = false;)
diff --git a/tests/ResourceAllocatorTest.cpp b/tests/ResourceAllocatorTest.cpp
index a71221d..0a1a19a 100644
--- a/tests/ResourceAllocatorTest.cpp
+++ b/tests/ResourceAllocatorTest.cpp
@@ -28,6 +28,7 @@
SkBackingFit fFit;
int fSampleCnt;
GrSurfaceOrigin fOrigin;
+ SkBudgeted fBudgeted;
// TODO: do we care about mipmapping
};
@@ -45,7 +46,7 @@
const GrBackendFormat format = caps->getBackendFormatFromColorType(p.fColorType);
- auto tmp = proxyProvider->createProxy(format, desc, p.fOrigin, p.fFit, SkBudgeted::kNo);
+ auto tmp = proxyProvider->createProxy(format, desc, p.fOrigin, p.fFit, p.fBudgeted);
if (!tmp) {
return nullptr;
}
@@ -91,10 +92,12 @@
static void overlap_test(skiatest::Reporter* reporter, GrResourceProvider* resourceProvider,
GrSurfaceProxy* p1, GrSurfaceProxy* p2, bool expectedResult) {
GrDeinstantiateProxyTracker deinstantiateTracker;
- GrResourceAllocator alloc(resourceProvider, &deinstantiateTracker);
+ GrResourceAllocator alloc(resourceProvider, &deinstantiateTracker SkDEBUGCODE(, 1));
alloc.addInterval(p1, 0, 4);
+ alloc.incOps();
alloc.addInterval(p2, 1, 2);
+ alloc.incOps();
alloc.markEndOfOpList(0);
int startIndex, stopIndex;
@@ -114,7 +117,14 @@
GrSurfaceProxy* p1, GrSurfaceProxy* p2,
bool expectedResult) {
GrDeinstantiateProxyTracker deinstantiateTracker;
- GrResourceAllocator alloc(resourceProvider, &deinstantiateTracker);
+ GrResourceAllocator alloc(resourceProvider, &deinstantiateTracker SkDEBUGCODE(, 1));
+
+ alloc.incOps();
+ alloc.incOps();
+ alloc.incOps();
+ alloc.incOps();
+ alloc.incOps();
+ alloc.incOps();
alloc.addInterval(p1, 0, 2);
alloc.addInterval(p2, 3, 5);
@@ -167,14 +177,16 @@
const GrSurfaceOrigin kTL = kTopLeft_GrSurfaceOrigin;
const GrSurfaceOrigin kBL = kBottomLeft_GrSurfaceOrigin;
+ const SkBudgeted kNotB = SkBudgeted::kNo;
+
//--------------------------------------------------------------------------------------------
TestCase gOverlappingTests[] = {
//----------------------------------------------------------------------------------------
// Two proxies with overlapping intervals and compatible descriptors should never share
// RT version
- { { 64, kRT, kRGBA, kA, 0, kTL }, { 64, kRT, kRGBA, kA, 0, kTL }, kDontShare },
+ { { 64, kRT, kRGBA, kA, 0, kTL, kNotB }, { 64, kRT, kRGBA, kA, 0, kTL, kNotB }, kDontShare },
// non-RT version
- { { 64, kNotRT, kRGBA, kA, 0, kTL }, { 64, kNotRT, kRGBA, kA, 0, kTL }, kDontShare },
+ { { 64, kNotRT, kRGBA, kA, 0, kTL, kNotB }, { 64, kNotRT, kRGBA, kA, 0, kTL, kNotB }, kDontShare },
};
for (auto test : gOverlappingTests) {
@@ -195,28 +207,28 @@
//----------------------------------------------------------------------------------------
// Two non-overlapping intervals w/ compatible proxies should share
// both same size & approx
- { { 64, kRT, kRGBA, kA, 0, kTL }, { 64, kRT, kRGBA, kA, 0, kTL }, kShare },
- { { 64, kNotRT, kRGBA, kA, 0, kTL }, { 64, kNotRT, kRGBA, kA, 0, kTL }, kConditionallyShare },
+ { { 64, kRT, kRGBA, kA, 0, kTL, kNotB }, { 64, kRT, kRGBA, kA, 0, kTL, kNotB }, kShare },
+ { { 64, kNotRT, kRGBA, kA, 0, kTL, kNotB }, { 64, kNotRT, kRGBA, kA, 0, kTL, kNotB }, kConditionallyShare },
// diffs sizes but still approx
- { { 64, kRT, kRGBA, kA, 0, kTL }, { 50, kRT, kRGBA, kA, 0, kTL }, kShare },
- { { 64, kNotRT, kRGBA, kA, 0, kTL }, { 50, kNotRT, kRGBA, kA, 0, kTL }, kConditionallyShare },
+ { { 64, kRT, kRGBA, kA, 0, kTL, kNotB }, { 50, kRT, kRGBA, kA, 0, kTL, kNotB }, kShare },
+ { { 64, kNotRT, kRGBA, kA, 0, kTL, kNotB }, { 50, kNotRT, kRGBA, kA, 0, kTL, kNotB }, kConditionallyShare },
// sames sizes but exact
- { { 64, kRT, kRGBA, kE, 0, kTL }, { 64, kRT, kRGBA, kE, 0, kTL }, kShare },
- { { 64, kNotRT, kRGBA, kE, 0, kTL }, { 64, kNotRT, kRGBA, kE, 0, kTL }, kConditionallyShare },
+ { { 64, kRT, kRGBA, kE, 0, kTL, kNotB }, { 64, kRT, kRGBA, kE, 0, kTL, kNotB }, kShare },
+ { { 64, kNotRT, kRGBA, kE, 0, kTL, kNotB }, { 64, kNotRT, kRGBA, kE, 0, kTL, kNotB }, kConditionallyShare },
//----------------------------------------------------------------------------------------
// Two non-overlapping intervals w/ different exact sizes should not share
- { { 56, kRT, kRGBA, kE, 0, kTL }, { 54, kRT, kRGBA, kE, 0, kTL }, kDontShare },
+ { { 56, kRT, kRGBA, kE, 0, kTL, kNotB }, { 54, kRT, kRGBA, kE, 0, kTL, kNotB }, kDontShare },
// Two non-overlapping intervals w/ _very different_ approx sizes should not share
- { { 255, kRT, kRGBA, kA, 0, kTL }, { 127, kRT, kRGBA, kA, 0, kTL }, kDontShare },
+ { { 255, kRT, kRGBA, kA, 0, kTL, kNotB }, { 127, kRT, kRGBA, kA, 0, kTL, kNotB }, kDontShare },
// Two non-overlapping intervals w/ different MSAA sample counts should not share
- { { 64, kRT, kRGBA, kA, k2, kTL },{ 64, kRT, kRGBA, kA, k4, kTL}, k2 == k4 },
+ { { 64, kRT, kRGBA, kA, k2, kTL, kNotB },{ 64, kRT, kRGBA, kA, k4,kTL, kNotB}, k2 == k4 },
// Two non-overlapping intervals w/ different configs should not share
- { { 64, kRT, kRGBA, kA, 0, kTL }, { 64, kRT, kBGRA, kA, 0, kTL }, kDontShare },
+ { { 64, kRT, kRGBA, kA, 0, kTL, kNotB }, { 64, kRT, kBGRA, kA, 0, kTL, kNotB }, kDontShare },
// Two non-overlapping intervals w/ different RT classifications should never share
- { { 64, kRT, kRGBA, kA, 0, kTL }, { 64, kNotRT, kRGBA, kA, 0, kTL }, kDontShare },
- { { 64, kNotRT, kRGBA, kA, 0, kTL }, { 64, kRT, kRGBA, kA, 0, kTL }, kDontShare },
+ { { 64, kRT, kRGBA, kA, 0, kTL, kNotB }, { 64, kNotRT, kRGBA, kA, 0, kTL, kNotB }, kDontShare },
+ { { 64, kNotRT, kRGBA, kA, 0, kTL, kNotB }, { 64, kRT, kRGBA, kA, 0, kTL, kNotB }, kDontShare },
// Two non-overlapping intervals w/ different origins should share
- { { 64, kRT, kRGBA, kA, 0, kTL }, { 64, kRT, kRGBA, kA, 0, kBL }, kShare },
+ { { 64, kRT, kRGBA, kA, 0, kTL, kNotB }, { 64, kRT, kRGBA, kA, 0, kBL, kNotB }, kShare },
};
for (auto test : gNonOverlappingTests) {
@@ -236,7 +248,7 @@
{
// Wrapped backend textures should never be reused
TestCase t[1] = {
- { { 64, kNotRT, kRGBA, kE, 0, kTL }, { 64, kNotRT, kRGBA, kE, 0, kTL }, kDontShare }
+ { { 64, kNotRT, kRGBA, kE, 0, kTL, kNotB }, { 64, kNotRT, kRGBA, kE, 0, kTL, kNotB }, kDontShare }
};
GrBackendTexture backEndTex;
@@ -315,7 +327,7 @@
: GrSurfaceProxy::LazyInstantiationType ::kSingleUse;
GrInternalSurfaceFlags flags = GrInternalSurfaceFlags::kNone;
return proxyProvider->createLazyProxy(callback, format, desc, p.fOrigin, GrMipMapped::kNo,
- flags, p.fFit, SkBudgeted::kNo, lazyType);
+ flags, p.fFit, p.fBudgeted, lazyType);
}
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(LazyDeinstantiation, reporter, ctxInfo) {
@@ -330,6 +342,7 @@
texParams.fIsRT = false;
texParams.fSampleCnt = 1;
texParams.fSize = 100;
+ texParams.fBudgeted = SkBudgeted::kNo;
ProxyParams rtParams = texParams;
rtParams.fIsRT = true;
auto proxyProvider = context->priv().proxyProvider();
@@ -342,11 +355,12 @@
GrDeinstantiateProxyTracker deinstantiateTracker;
{
- GrResourceAllocator alloc(resourceProvider, &deinstantiateTracker);
+ GrResourceAllocator alloc(resourceProvider, &deinstantiateTracker SkDEBUGCODE(, 1));
alloc.addInterval(p0.get(), 0, 1);
alloc.addInterval(p1.get(), 0, 1);
alloc.addInterval(p2.get(), 0, 1);
alloc.addInterval(p3.get(), 0, 1);
+ alloc.incOps();
alloc.markEndOfOpList(0);
int startIndex, stopIndex;
GrResourceAllocator::AssignError error;
@@ -359,3 +373,67 @@
REPORTER_ASSERT(reporter, p3->isInstantiated());
}
}
+
+// Set up so there are two opLists that need to be flushed but the resource allocator thinks
+// it is over budget. The two opLists should be flushed separately and the opList indices
+// returned from assign should be correct.
+DEF_GPUTEST_FOR_RENDERING_CONTEXTS(ResourceAllocatorOverBudgetTest, reporter, ctxInfo) {
+ GrContext* context = ctxInfo.grContext();
+ const GrCaps* caps = context->priv().caps();
+ GrProxyProvider* proxyProvider = context->priv().proxyProvider();
+ GrResourceProvider* resourceProvider = context->priv().resourceProvider();
+
+ bool orig = resourceProvider->testingOnly_setExplicitlyAllocateGPUResources(true);
+
+ int origMaxNum;
+ size_t origMaxBytes;
+ context->getResourceCacheLimits(&origMaxNum, &origMaxBytes);
+
+ // Force the resource allocator to always believe it is over budget
+ context->setResourceCacheLimits(0, 0);
+
+ const ProxyParams params = { 64, false, kRGBA_8888_SkColorType,
+ SkBackingFit::kExact, 0, kTopLeft_GrSurfaceOrigin,
+ SkBudgeted::kYes };
+
+ {
+ GrSurfaceProxy* p1 = make_deferred(proxyProvider, caps, params);
+ GrSurfaceProxy* p2 = make_deferred(proxyProvider, caps, params);
+ GrSurfaceProxy* p3 = make_deferred(proxyProvider, caps, params);
+ GrSurfaceProxy* p4 = make_deferred(proxyProvider, caps, params);
+
+ GrDeinstantiateProxyTracker deinstantiateTracker;
+ GrResourceAllocator alloc(resourceProvider, &deinstantiateTracker SkDEBUGCODE(, 2));
+
+ alloc.addInterval(p1, 0, 0);
+ alloc.incOps();
+ alloc.addInterval(p2, 1, 1);
+ alloc.incOps();
+ alloc.markEndOfOpList(0);
+
+ alloc.addInterval(p3, 2, 2);
+ alloc.incOps();
+ alloc.addInterval(p4, 3, 3);
+ alloc.incOps();
+ alloc.markEndOfOpList(1);
+
+ int startIndex, stopIndex;
+ GrResourceAllocator::AssignError error;
+
+ alloc.assign(&startIndex, &stopIndex, &error);
+ REPORTER_ASSERT(reporter, GrResourceAllocator::AssignError::kNoError == error);
+ REPORTER_ASSERT(reporter, 0 == startIndex && 1 == stopIndex);
+
+ alloc.assign(&startIndex, &stopIndex, &error);
+ REPORTER_ASSERT(reporter, GrResourceAllocator::AssignError::kNoError == error);
+ REPORTER_ASSERT(reporter, 1 == startIndex && 2 == stopIndex);
+
+ p1->completedRead();
+ p2->completedRead();
+ p3->completedRead();
+ p4->completedRead();
+ }
+
+ context->setResourceCacheLimits(origMaxNum, origMaxBytes);
+ resourceProvider->testingOnly_setExplicitlyAllocateGPUResources(orig);
+}