Add overbudget handling to GrResourceAllocator

Change-Id: I5536c908310e907c77b5d55441a0edac6a74bf0e
Reviewed-on: https://skia-review.googlesource.com/71182
Reviewed-by: Brian Salomon <bsalomon@google.com>
Commit-Queue: Robert Phillips <robertphillips@google.com>
diff --git a/src/gpu/GrDrawingManager.cpp b/src/gpu/GrDrawingManager.cpp
index cce5471..e42b5f4 100644
--- a/src/gpu/GrDrawingManager.cpp
+++ b/src/gpu/GrDrawingManager.cpp
@@ -102,7 +102,6 @@
         return GrSemaphoresSubmitted::kNo;
     }
     fFlushing = true;
-    bool flushed = false;
 
     for (int i = 0; i < fOpLists.count(); ++i) {
         // Semi-usually the GrOpLists are already closed at this point, but sometimes Ganesh
@@ -136,9 +135,10 @@
 #endif
 
     GrOnFlushResourceProvider onFlushProvider(this);
+    // TODO: AFAICT the only reason fFlushState is on GrDrawingManager rather than on the
+    // stack here is to preserve the flush tokens.
 
     // Prepare any onFlush op lists (e.g. atlases).
-    SkSTArray<8, sk_sp<GrOpList>> onFlushOpLists;
     if (!fOnFlushCBObjects.empty()) {
         // MDB TODO: pre-MDB '1' is the correct pre-allocated size. Post-MDB it will need
         // to be larger.
@@ -158,7 +158,7 @@
                 }
                 onFlushOpList->makeClosed(*fContext->caps());
                 onFlushOpList->prepare(&fFlushState);
-                onFlushOpLists.push_back(std::move(onFlushOpList));
+                fOnFlushOpLists.push_back(std::move(onFlushOpList));
             }
             renderTargetContexts.reset();
         }
@@ -171,71 +171,29 @@
     }
 #endif
 
+    int startIndex, stopIndex;
+    bool flushed = false;
+
     {
         GrResourceAllocator alloc(fContext->resourceProvider());
         for (int i = 0; i < fOpLists.count(); ++i) {
             fOpLists[i]->gatherProxyIntervals(&alloc);
+            alloc.markEndOfOpList(i);
         }
 
-#ifndef SK_DISABLE_EXPLICIT_GPU_RESOURCE_ALLOCATION
-        alloc.assign();
+#ifdef SK_DISABLE_EXPLICIT_GPU_RESOURCE_ALLOCATION
+        startIndex = 0;
+        stopIndex = fOpLists.count();
+#else
+        while (alloc.assign(&startIndex, &stopIndex))
 #endif
-    }
-
-    for (int i = 0; i < fOpLists.count(); ++i) {
-        if (!fOpLists[i]->instantiate(fContext->resourceProvider())) {
-            SkDebugf("OpList failed to instantiate.\n");
-            fOpLists[i] = nullptr;
-            continue;
-        }
-
-        // Instantiate all deferred proxies (being built on worker threads) so we can upload them
-        fOpLists[i]->instantiateDeferredProxies(fContext->resourceProvider());
-        fOpLists[i]->prepare(&fFlushState);
-    }
-
-    // Upload all data to the GPU
-    fFlushState.preExecuteDraws();
-
-    // Execute the onFlush op lists first, if any.
-    for (sk_sp<GrOpList>& onFlushOpList : onFlushOpLists) {
-        if (!onFlushOpList->execute(&fFlushState)) {
-            SkDebugf("WARNING: onFlushOpList failed to execute.\n");
-        }
-        SkASSERT(onFlushOpList->unique());
-        onFlushOpList = nullptr;
-    }
-    onFlushOpLists.reset();
-
-    // Execute the normal op lists.
-    for (int i = 0; i < fOpLists.count(); ++i) {
-        if (!fOpLists[i]) {
-            continue;
-        }
-
-        if (fOpLists[i]->execute(&fFlushState)) {
-            flushed = true;
+        {
+            if (this->executeOpLists(startIndex, stopIndex, &fFlushState)) {
+                flushed = true;
+            }
         }
     }
 
-    SkASSERT(fFlushState.nextDrawToken() == fFlushState.nextTokenToFlush());
-
-    // We reset the flush state before the OpLists so that the last resources to be freed are those
-    // that are written to in the OpLists. This helps to make sure the most recently used resources
-    // are the last to be purged by the resource cache.
-    fFlushState.reset();
-
-    for (int i = 0; i < fOpLists.count(); ++i) {
-        if (!fOpLists[i]) {
-            continue;
-        }
-        if (!fOpLists[i]->unique()) {
-            // TODO: Eventually this should be guaranteed unique.
-            // https://bugs.chromium.org/p/skia/issues/detail?id=7111
-            fOpLists[i]->endFlush();
-        }
-        fOpLists[i] = nullptr;
-    }
     fOpLists.reset();
 
     GrSemaphoresSubmitted result = fContext->getGpu()->finishFlush(numSemaphores,
@@ -253,6 +211,79 @@
     return result;
 }
 
+bool GrDrawingManager::executeOpLists(int startIndex, int stopIndex, GrOpFlushState* flushState) {
+    SkASSERT(startIndex <= stopIndex && stopIndex <= fOpLists.count());
+
+    bool anyOpListsExecuted = false;
+
+    for (int i = startIndex; i < stopIndex; ++i) {
+        if (!fOpLists[i]) {
+             continue;
+        }
+
+#ifdef SK_DISABLE_EXPLICIT_GPU_RESOURCE_ALLOCATION
+        if (!fOpLists[i]->instantiate(fContext->resourceProvider())) {
+            SkDebugf("OpList failed to instantiate.\n");
+            fOpLists[i] = nullptr;
+            continue;
+        }
+#else
+        SkASSERT(fOpLists[i]->isInstantiated());
+#endif
+
+        // TODO: handle this instantiation via lazy surface proxies?
+        // Instantiate all deferred proxies (being built on worker threads) so we can upload them
+        fOpLists[i]->instantiateDeferredProxies(fContext->resourceProvider());
+        fOpLists[i]->prepare(flushState);
+    }
+
+    // Upload all data to the GPU
+    flushState->preExecuteDraws();
+
+    // Execute the onFlush op lists first, if any.
+    for (sk_sp<GrOpList>& onFlushOpList : fOnFlushOpLists) {
+        if (!onFlushOpList->execute(flushState)) {
+            SkDebugf("WARNING: onFlushOpList failed to execute.\n");
+        }
+        SkASSERT(onFlushOpList->unique());
+        onFlushOpList = nullptr;
+    }
+    fOnFlushOpLists.reset();
+
+    // Execute the normal op lists.
+    for (int i = startIndex; i < stopIndex; ++i) {
+        if (!fOpLists[i]) {
+            continue;
+        }
+
+        if (fOpLists[i]->execute(flushState)) {
+            anyOpListsExecuted = true;
+        }
+    }
+
+    SkASSERT(!flushState->commandBuffer());
+    SkASSERT(flushState->nextDrawToken() == flushState->nextTokenToFlush());
+
+    // We reset the flush state before the OpLists so that the last resources to be freed are those
+    // that are written to in the OpLists. This helps to make sure the most recently used resources
+    // are the last to be purged by the resource cache.
+    flushState->reset();
+
+    for (int i = startIndex; i < stopIndex; ++i) {
+        if (!fOpLists[i]) {
+            continue;
+        }
+        if (!fOpLists[i]->unique()) {
+            // TODO: Eventually this should be guaranteed unique.
+            // https://bugs.chromium.org/p/skia/issues/detail?id=7111
+            fOpLists[i]->endFlush();
+        }
+        fOpLists[i] = nullptr;
+    }
+
+    return anyOpListsExecuted;
+}
+
 GrSemaphoresSubmitted GrDrawingManager::prepareSurfaceForExternalIO(
         GrSurfaceProxy* proxy, int numSemaphores, GrBackendSemaphore backendSemaphores[]) {
     if (this->wasAbandoned()) {
diff --git a/src/gpu/GrDrawingManager.h b/src/gpu/GrDrawingManager.h
index d4ec85a..cc70d93 100644
--- a/src/gpu/GrDrawingManager.h
+++ b/src/gpu/GrDrawingManager.h
@@ -100,6 +100,10 @@
 
     void abandon();
     void cleanup();
+
+    // return true if any opLists were actually executed; false otherwise
+    bool executeOpLists(int startIndex, int stopIndex, GrOpFlushState*);
+
     GrSemaphoresSubmitted flush(GrSurfaceProxy* proxy,
                                 int numSemaphores = 0,
                                 GrBackendSemaphore backendSemaphores[] = nullptr) {
@@ -127,6 +131,7 @@
 
     bool                              fAbandoned;
     SkTArray<sk_sp<GrOpList>>         fOpLists;
+    SkSTArray<8, sk_sp<GrOpList>>     fOnFlushOpLists;
 
     std::unique_ptr<GrAtlasTextContext> fAtlasTextContext;
 
diff --git a/src/gpu/GrResourceAllocator.cpp b/src/gpu/GrResourceAllocator.cpp
index 3e34a71..5cb882f 100644
--- a/src/gpu/GrResourceAllocator.cpp
+++ b/src/gpu/GrResourceAllocator.cpp
@@ -9,6 +9,8 @@
 
 #include "GrGpuResourcePriv.h"
 #include "GrOpList.h"
+#include "GrRenderTargetProxy.h"
+#include "GrResourceCache.h"
 #include "GrResourceProvider.h"
 #include "GrSurfacePriv.h"
 #include "GrSurfaceProxy.h"
@@ -21,6 +23,18 @@
     fProxy->priv().assign(std::move(s));
 }
 
+
+void GrResourceAllocator::markEndOfOpList(int opListIndex) {
+    SkASSERT(!fAssigned);      // We shouldn't be adding any opLists after (or during) assignment
+
+    SkASSERT(fEndOfOpListOpIndices.count() == opListIndex);
+    if (!fEndOfOpListOpIndices.empty()) {
+        SkASSERT(fEndOfOpListOpIndices.back() < this->curOp());
+    }
+
+    fEndOfOpListOpIndices.push_back(this->curOp()); // This is the first op index of the next opList
+}
+
 GrResourceAllocator::~GrResourceAllocator() {
 #ifndef SK_DISABLE_EXPLICIT_GPU_RESOURCE_ALLOCATION
     SkASSERT(fIntvlList.empty());
@@ -36,11 +50,8 @@
 
     if (Interval* intvl = fIntvlHash.find(proxy->uniqueID().asUInt())) {
         // Revise the interval for an existing use
-        // TODO: this assert is failing on the copy_on_write_retain GM!
-        SkASSERT(intvl->end() <= start);
-        if (intvl->end() < end) {
-            intvl->extendEnd(end);
-        }
+        SkASSERT(intvl->end() <= start && intvl->end() <= end);
+        intvl->extendEnd(end);
         return;
     }
 
@@ -122,8 +133,8 @@
 
 // First try to reuse one of the recently allocated/used GrSurfaces in the free pool.
 // If we can't find a useable one, create a new one.
-// TODO: handle being overbudget
-sk_sp<GrSurface> GrResourceAllocator::findSurfaceFor(const GrSurfaceProxy* proxy) {
+sk_sp<GrSurface> GrResourceAllocator::findSurfaceFor(const GrSurfaceProxy* proxy,
+                                                     bool needsStencil) {
     // First look in the free pool
     GrScratchKey key;
 
@@ -141,6 +152,7 @@
             surface->resourcePriv().makeBudgeted();
         }
 
+        GrSurfaceProxyPriv::AttachStencilIfNeeded(fResourceProvider, surface.get(), needsStencil);
         return surface;
     }
 
@@ -164,20 +176,48 @@
     }
 }
 
-void GrResourceAllocator::assign() {
-    fIntvlHash.reset(); // we don't need this anymore
+bool GrResourceAllocator::assign(int* startIndex, int* stopIndex) {
+    fIntvlHash.reset(); // we don't need the interval hash anymore
+    if (fIntvlList.empty()) {
+        return false;          // nothing to render
+    }
+
+    *startIndex = fCurOpListIndex;
+    *stopIndex = fEndOfOpListOpIndices.count();
+
     SkDEBUGCODE(fAssigned = true;)
 
     while (Interval* cur = fIntvlList.popHead()) {
+        if (fEndOfOpListOpIndices[fCurOpListIndex] < cur->start()) {
+            fCurOpListIndex++;
+        }
+
         this->expire(cur->start());
 
+        bool needsStencil = cur->proxy()->asRenderTargetProxy()
+                                            ? cur->proxy()->asRenderTargetProxy()->needsStencil()
+                                            : false;
+
         if (cur->proxy()->priv().isInstantiated()) {
+            GrSurfaceProxyPriv::AttachStencilIfNeeded(fResourceProvider,
+                                                      cur->proxy()->priv().peekSurface(),
+                                                      needsStencil);
+
             fActiveIntvls.insertByIncreasingEnd(cur);
+
+            if (fResourceProvider->overBudget()) {
+                // Only force intermediate draws on opList boundaries
+                if (!fIntvlList.empty() &&
+                    fEndOfOpListOpIndices[fCurOpListIndex] < fIntvlList.peekHead()->start()) {
+                    *stopIndex = fCurOpListIndex+1;
+                    return true;
+                }
+            }
+
             continue;
         }
 
-        // TODO: add over budget handling here?
-        sk_sp<GrSurface> surface = this->findSurfaceFor(cur->proxy());
+        sk_sp<GrSurface> surface = this->findSurfaceFor(cur->proxy(), needsStencil);
         if (surface) {
             // TODO: make getUniqueKey virtual on GrSurfaceProxy
             GrTextureProxy* tex = cur->proxy()->asTextureProxy();
@@ -188,11 +228,21 @@
 
             cur->assign(std::move(surface));
         }
-        // TODO: handle resouce allocation failure upstack
+
+        // TODO: handle resource allocation failure upstack
         fActiveIntvls.insertByIncreasingEnd(cur);
+
+        if (fResourceProvider->overBudget()) {
+            // Only force intermediate draws on opList boundaries
+            if (!fIntvlList.empty() &&
+                fEndOfOpListOpIndices[fCurOpListIndex] < fIntvlList.peekHead()->start()) {
+                *stopIndex = fCurOpListIndex+1;
+                return true;
+            }
+        }
     }
 
     // expire all the remaining intervals to drain the active interval list
     this->expire(std::numeric_limits<unsigned int>::max());
+    return true;
 }
-
diff --git a/src/gpu/GrResourceAllocator.h b/src/gpu/GrResourceAllocator.h
index 2f0b00e..f576649 100644
--- a/src/gpu/GrResourceAllocator.h
+++ b/src/gpu/GrResourceAllocator.h
@@ -45,7 +45,7 @@
     void incOps() { fNumOps++; }
     unsigned int numOps() const { return fNumOps; }
 
-    // Add a usage interval from start to end inclusive. This is usually used for renderTargets.
+    // Add a usage interval from 'start' to 'end' inclusive. This is usually used for renderTargets.
     // If an existing interval already exists it will be expanded to include the new range.
     void addInterval(GrSurfaceProxy*, unsigned int start, unsigned int end);
 
@@ -55,7 +55,13 @@
         this->addInterval(proxy, fNumOps, fNumOps);
     }
 
-    void assign();
+    // Returns true when the opLists from 'startIndex' to 'stopIndex' should be executed;
+    // false when nothing remains to be executed.
+    // This is used to execute a portion of the queued opLists in order to reduce the total
+    // amount of GPU resources required.
+    bool assign(int* startIndex, int* stopIndex);
+
+    void markEndOfOpList(int opListIndex);
 
 private:
     class Interval;
@@ -65,7 +71,7 @@
 
     // These two methods wrap the interactions with the free pool
     void freeUpSurface(sk_sp<GrSurface> surface);
-    sk_sp<GrSurface> findSurfaceFor(const GrSurfaceProxy* proxy);
+    sk_sp<GrSurface> findSurfaceFor(const GrSurfaceProxy* proxy, bool needsStencil);
 
     struct FreePoolTraits {
         static const GrScratchKey& GetKey(const GrSurface& s) {
@@ -128,8 +134,6 @@
         }
         static uint32_t Hash(const uint32_t& key) { return key; }
 
-
-
     private:
         sk_sp<GrSurface> fAssignedSurface;
         GrSurfaceProxy*  fProxy;
@@ -160,19 +164,22 @@
     // Gathered statistics indicate that 99% of flushes will be covered by <= 12 Intervals
     static const int kInitialArenaSize = 12 * sizeof(Interval);
 
-    GrResourceProvider* fResourceProvider;
-    FreePoolMultiMap    fFreePool;          // Recently created/used GrSurfaces
-    IntvlHash           fIntvlHash;         // All the intervals, hashed by proxyID
+    GrResourceProvider*    fResourceProvider;
+    FreePoolMultiMap       fFreePool;          // Recently created/used GrSurfaces
+    IntvlHash              fIntvlHash;         // All the intervals, hashed by proxyID
 
-    IntervalList        fIntvlList;         // All the intervals sorted by increasing start
-    IntervalList        fActiveIntvls;      // List of live intervals during assignment
-                                            // (sorted by increasing end)
-    unsigned int        fNumOps = 0;
-    SkDEBUGCODE(bool    fAssigned = false;)
+    IntervalList           fIntvlList;         // All the intervals sorted by increasing start
+    IntervalList           fActiveIntvls;      // List of live intervals during assignment
+                                               // (sorted by increasing end)
+    unsigned int           fNumOps = 0;
+    SkTArray<unsigned int> fEndOfOpListOpIndices;
+    int                    fCurOpListIndex = 0;
 
-    char                fStorage[kInitialArenaSize];
-    SkArenaAlloc        fIntervalAllocator { fStorage, kInitialArenaSize, 0 };
-    Interval*           fFreeIntervalList = nullptr;
+    SkDEBUGCODE(bool       fAssigned = false;)
+
+    char                   fStorage[kInitialArenaSize];
+    SkArenaAlloc           fIntervalAllocator { fStorage, kInitialArenaSize, 0 };
+    Interval*              fFreeIntervalList = nullptr;
 };
 
 #endif // GrResourceAllocator_DEFINED
diff --git a/src/gpu/GrResourceCache.h b/src/gpu/GrResourceCache.h
index 9424532..54be56d 100644
--- a/src/gpu/GrResourceCache.h
+++ b/src/gpu/GrResourceCache.h
@@ -246,6 +246,8 @@
     /** Purge all resources not used since the passed in time. */
     void purgeResourcesNotUsedSince(GrStdSteadyClock::time_point);
 
+    bool overBudget() const { return fBudgetedBytes > fMaxBytes || fBudgetedCount > fMaxCount; }
+
     /**
      * Purge unlocked resources from the cache until the the provided byte count has been reached
      * or we have purged all unlocked resources. The default policy is to purge in LRU order, but
@@ -343,7 +345,6 @@
     void processFreedGpuResources();
     void addToNonpurgeableArray(GrGpuResource*);
     void removeFromNonpurgeableArray(GrGpuResource*);
-    bool overBudget() const { return fBudgetedBytes > fMaxBytes || fBudgetedCount > fMaxCount; }
 
     bool wouldFit(size_t bytes) {
         return fBudgetedBytes+bytes <= fMaxBytes && fBudgetedCount+1 <= fMaxCount;
diff --git a/src/gpu/GrResourceProvider.h b/src/gpu/GrResourceProvider.h
index c7b2fe3..4cd34da 100644
--- a/src/gpu/GrResourceProvider.h
+++ b/src/gpu/GrResourceProvider.h
@@ -10,6 +10,7 @@
 
 #include "GrBuffer.h"
 #include "GrPathRange.h"
+#include "GrResourceCache.h"
 #include "SkImageInfo.h"
 #include "SkScalerContext.h"
 
@@ -259,6 +260,7 @@
     static bool IsFunctionallyExact(GrSurfaceProxy* proxy);
 
     const GrCaps* caps() const { return fCaps.get(); }
+    bool overBudget() const { return fCache->overBudget(); }
 
 private:
     sk_sp<GrGpuResource> findResourceByUniqueKey(const GrUniqueKey&);
diff --git a/src/gpu/GrSurfaceProxy.cpp b/src/gpu/GrSurfaceProxy.cpp
index 3e188b8..71ff637 100644
--- a/src/gpu/GrSurfaceProxy.cpp
+++ b/src/gpu/GrSurfaceProxy.cpp
@@ -42,8 +42,8 @@
     SkASSERT(!fLastOpList);
 }
 
-static bool attach_stencil_if_needed(GrResourceProvider* resourceProvider,
-                                     GrSurface* surface, bool needsStencil) {
+bool GrSurfaceProxyPriv::AttachStencilIfNeeded(GrResourceProvider* resourceProvider,
+                                               GrSurface* surface, bool needsStencil) {
     if (needsStencil) {
         GrRenderTarget* rt = surface->asRenderTarget();
         if (!rt) {
@@ -88,7 +88,7 @@
 
     surface->asTexture()->texturePriv().setMipColorMode(mipColorMode);
 
-    if (!attach_stencil_if_needed(resourceProvider, surface.get(), needsStencil)) {
+    if (!GrSurfaceProxyPriv::AttachStencilIfNeeded(resourceProvider, surface.get(), needsStencil)) {
         return nullptr;
     }
 
@@ -115,7 +115,7 @@
         if (uniqueKey) {
             SkASSERT(fTarget->getUniqueKey() == *uniqueKey);
         }
-        return attach_stencil_if_needed(resourceProvider, fTarget, needsStencil);
+        return GrSurfaceProxyPriv::AttachStencilIfNeeded(resourceProvider, fTarget, needsStencil);
     }
 
     sk_sp<GrSurface> surface = this->createSurfaceImpl(resourceProvider, sampleCnt, needsStencil,
diff --git a/src/gpu/GrSurfaceProxyPriv.h b/src/gpu/GrSurfaceProxyPriv.h
index 1f953f4..a93a20c 100644
--- a/src/gpu/GrSurfaceProxyPriv.h
+++ b/src/gpu/GrSurfaceProxyPriv.h
@@ -68,6 +68,8 @@
     // Don't. Just don't.
     void exactify();
 
+    static bool AttachStencilIfNeeded(GrResourceProvider*, GrSurface*, bool needsStencil);
+
 private:
     explicit GrSurfaceProxyPriv(GrSurfaceProxy* proxy) : fProxy(proxy) {}
     GrSurfaceProxyPriv(const GrSurfaceProxyPriv&) {} // unimpl
diff --git a/tests/ResourceAllocatorTest.cpp b/tests/ResourceAllocatorTest.cpp
index bb4acf0..02785e6 100644
--- a/tests/ResourceAllocatorTest.cpp
+++ b/tests/ResourceAllocatorTest.cpp
@@ -73,8 +73,10 @@
 
     alloc.addInterval(p1.get(), 0, 4);
     alloc.addInterval(p2.get(), 1, 2);
+    alloc.markEndOfOpList(0);
 
-    alloc.assign();
+    int startIndex, stopIndex;
+    alloc.assign(&startIndex, &stopIndex);
 
     REPORTER_ASSERT(reporter, p1->priv().peekSurface());
     REPORTER_ASSERT(reporter, p2->priv().peekSurface());
@@ -91,8 +93,10 @@
 
     alloc.addInterval(p1.get(), 0, 2);
     alloc.addInterval(p2.get(), 3, 5);
+    alloc.markEndOfOpList(0);
 
-    alloc.assign();
+    int startIndex, stopIndex;
+    alloc.assign(&startIndex, &stopIndex);
 
     REPORTER_ASSERT(reporter, p1->priv().peekSurface());
     REPORTER_ASSERT(reporter, p2->priv().peekSurface());
@@ -139,7 +143,8 @@
     for (auto test : gOverlappingTests) {
         sk_sp<GrSurfaceProxy> p1 = make_deferred(resourceProvider, test.fP1);
         sk_sp<GrSurfaceProxy> p2 = make_deferred(resourceProvider, test.fP2);
-        overlap_test(reporter, resourceProvider, std::move(p1), std::move(p2), test.fExpectation);
+        overlap_test(reporter, resourceProvider,
+                     std::move(p1), std::move(p2), test.fExpectation);
     }
 
     int k2 = ctxInfo.grContext()->caps()->getSampleCount(2, kRGBA);
@@ -180,8 +185,8 @@
         if (!p1 || !p2) {
             continue; // creation can fail (i.e., for msaa4 on iOS)
         }
-        non_overlap_test(reporter, resourceProvider, std::move(p1), std::move(p2),
-                         test.fExpectation);
+        non_overlap_test(reporter, resourceProvider,
+                         std::move(p1), std::move(p2), test.fExpectation);
     }
 
     {
@@ -193,8 +198,8 @@
         GrBackendObject backEndObj;
         sk_sp<GrSurfaceProxy> p1 = make_backend(ctxInfo.grContext(), t[0].fP1, &backEndObj);
         sk_sp<GrSurfaceProxy> p2 = make_deferred(resourceProvider, t[0].fP2);
-        non_overlap_test(reporter, resourceProvider, std::move(p1), std::move(p2),
-                         t[0].fExpectation);
+        non_overlap_test(reporter, resourceProvider,
+                         std::move(p1), std::move(p2), t[0].fExpectation);
         cleanup_backend(ctxInfo.grContext(), &backEndObj);
     }
 }