Reland "Merge GrOpList and GrRTOpList and rename to GrOpsTask."
This reverts commit f21bf9e50bb175eb151e90a01d7f8351da0802f8.
Reason for revert: relanding with infra fix
Original change's description:
> Revert "Merge GrOpList and GrRTOpList and rename to GrOpsTask."
>
> This reverts commit 2a5954140b49d18e5161a30a4ae2c7ac28bc1993.
>
> Reason for revert: breaking everything
>
> Original change's description:
> > Merge GrOpList and GrRTOpList and rename to GrOpsTask.
> >
> > Change-Id: I8f4f2218a30fd0541a8f79f7bb9850f9500cd243
> > Reviewed-on: https://skia-review.googlesource.com/c/skia/+/236343
> > Commit-Queue: Greg Daniel <egdaniel@google.com>
> > Reviewed-by: Brian Salomon <bsalomon@google.com>
>
> TBR=egdaniel@google.com,bsalomon@google.com,robertphillips@google.com
>
> Change-Id: I27840ea0343e8e6b388556afb7bd2e76386d611d
> No-Presubmit: true
> No-Tree-Checks: true
> No-Try: true
> Reviewed-on: https://skia-review.googlesource.com/c/skia/+/236349
> Reviewed-by: Greg Daniel <egdaniel@google.com>
> Commit-Queue: Greg Daniel <egdaniel@google.com>
TBR=egdaniel@google.com,bsalomon@google.com,robertphillips@google.com
Change-Id: Ibd3a06e4a91dbb1f225dcc8d17d0db3967b6f85f
No-Presubmit: true
No-Tree-Checks: true
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/236350
Commit-Queue: Greg Daniel <egdaniel@google.com>
Reviewed-by: Greg Daniel <egdaniel@google.com>
diff --git a/src/gpu/GrResourceAllocator.cpp b/src/gpu/GrResourceAllocator.cpp
index f73f5be..ecc9a42 100644
--- a/src/gpu/GrResourceAllocator.cpp
+++ b/src/gpu/GrResourceAllocator.cpp
@@ -9,7 +9,7 @@
#include "src/gpu/GrDeinstantiateProxyTracker.h"
#include "src/gpu/GrGpuResourcePriv.h"
-#include "src/gpu/GrOpList.h"
+#include "src/gpu/GrOpsTask.h"
#include "src/gpu/GrRenderTargetProxy.h"
#include "src/gpu/GrResourceCache.h"
#include "src/gpu/GrResourceProvider.h"
@@ -53,16 +53,17 @@
}
}
-void GrResourceAllocator::markEndOfOpList(int opListIndex) {
- SkASSERT(!fAssigned); // We shouldn't be adding any opLists after (or during) assignment
+void GrResourceAllocator::markEndOfOpsTask(int opsTaskIndex) {
+ SkASSERT(!fAssigned); // We shouldn't be adding any opsTasks after (or during) assignment
- SkASSERT(fEndOfOpListOpIndices.count() == opListIndex);
- if (!fEndOfOpListOpIndices.empty()) {
- SkASSERT(fEndOfOpListOpIndices.back() < this->curOp());
+ SkASSERT(fEndOfOpsTaskOpIndices.count() == opsTaskIndex);
+ if (!fEndOfOpsTaskOpIndices.empty()) {
+ SkASSERT(fEndOfOpsTaskOpIndices.back() < this->curOp());
}
- fEndOfOpListOpIndices.push_back(this->curOp()); // This is the first op index of the next opList
- SkASSERT(fEndOfOpListOpIndices.count() <= fNumOpLists);
+ // This is the first op index of the next opsTask
+ fEndOfOpsTaskOpIndices.push_back(this->curOp());
+ SkASSERT(fEndOfOpsTaskOpIndices.count() <= fNumOpsTasks);
}
GrResourceAllocator::~GrResourceAllocator() {
@@ -113,7 +114,7 @@
if (0 == start && 0 == end) {
// This interval is for the initial upload to a deferred proxy. Due to the vagaries
// of how deferred proxies are collected they can appear as uploads multiple times
- // in a single opLists' list and as uploads in several opLists.
+ // in a single opsTasks' list and as uploads in several opsTasks.
SkASSERT(0 == intvl->start());
} else if (isDirectDstRead) {
// Direct reads from the render target itself should occur w/in the existing
@@ -352,30 +353,30 @@
}
}
-bool GrResourceAllocator::onOpListBoundary() const {
+bool GrResourceAllocator::onOpsTaskBoundary() const {
if (fIntvlList.empty()) {
- SkASSERT(fCurOpListIndex+1 <= fNumOpLists);
- // Although technically on an opList boundary there is no need to force an
+ SkASSERT(fCurOpsTaskIndex+1 <= fNumOpsTasks);
+ // Although technically on an opsTask boundary there is no need to force an
// intermediate flush here
return false;
}
const Interval* tmp = fIntvlList.peekHead();
- return fEndOfOpListOpIndices[fCurOpListIndex] <= tmp->start();
+ return fEndOfOpsTaskOpIndices[fCurOpsTaskIndex] <= tmp->start();
}
void GrResourceAllocator::forceIntermediateFlush(int* stopIndex) {
- *stopIndex = fCurOpListIndex+1;
+ *stopIndex = fCurOpsTaskIndex+1;
// This is interrupting the allocation of resources for this flush. We need to
// proactively clear the active interval list of any intervals that aren't
// guaranteed to survive the partial flush lest they become zombies (i.e.,
// holding a deleted surface proxy).
const Interval* tmp = fIntvlList.peekHead();
- SkASSERT(fEndOfOpListOpIndices[fCurOpListIndex] <= tmp->start());
+ SkASSERT(fEndOfOpsTaskOpIndices[fCurOpsTaskIndex] <= tmp->start());
- fCurOpListIndex++;
- SkASSERT(fCurOpListIndex < fNumOpLists);
+ fCurOpsTaskIndex++;
+ SkASSERT(fCurOpsTaskIndex < fNumOpsTasks);
this->expire(tmp->start());
}
@@ -385,28 +386,28 @@
*outError = fLazyInstantiationError ? AssignError::kFailedProxyInstantiation
: AssignError::kNoError;
- SkASSERT(fNumOpLists == fEndOfOpListOpIndices.count());
+ SkASSERT(fNumOpsTasks == fEndOfOpsTaskOpIndices.count());
fIntvlHash.reset(); // we don't need the interval hash anymore
- if (fCurOpListIndex >= fEndOfOpListOpIndices.count()) {
+ if (fCurOpsTaskIndex >= fEndOfOpsTaskOpIndices.count()) {
return false; // nothing to render
}
- *startIndex = fCurOpListIndex;
- *stopIndex = fEndOfOpListOpIndices.count();
+ *startIndex = fCurOpsTaskIndex;
+ *stopIndex = fEndOfOpsTaskOpIndices.count();
if (fIntvlList.empty()) {
- fCurOpListIndex = fEndOfOpListOpIndices.count();
+ fCurOpsTaskIndex = fEndOfOpsTaskOpIndices.count();
return true; // no resources to assign
}
#if GR_ALLOCATION_SPEW
- SkDebugf("assigning opLists %d through %d out of %d numOpLists\n",
- *startIndex, *stopIndex, fNumOpLists);
- SkDebugf("EndOfOpListIndices: ");
- for (int i = 0; i < fEndOfOpListOpIndices.count(); ++i) {
- SkDebugf("%d ", fEndOfOpListOpIndices[i]);
+ SkDebugf("assigning opsTasks %d through %d out of %d numOpsTasks\n",
+ *startIndex, *stopIndex, fNumOpsTasks);
+ SkDebugf("EndOfOpsTaskIndices: ");
+ for (int i = 0; i < fEndOfOpsTaskOpIndices.count(); ++i) {
+ SkDebugf("%d ", fEndOfOpsTaskOpIndices[i]);
}
SkDebugf("\n");
#endif
@@ -417,9 +418,9 @@
this->dumpIntervals();
#endif
while (Interval* cur = fIntvlList.popHead()) {
- if (fEndOfOpListOpIndices[fCurOpListIndex] <= cur->start()) {
- fCurOpListIndex++;
- SkASSERT(fCurOpListIndex < fNumOpLists);
+ if (fEndOfOpsTaskOpIndices[fCurOpsTaskIndex] <= cur->start()) {
+ fCurOpsTaskIndex++;
+ SkASSERT(fCurOpsTaskIndex < fNumOpsTasks);
}
this->expire(cur->start());
@@ -437,8 +438,8 @@
fActiveIntvls.insertByIncreasingEnd(cur);
if (fResourceProvider->overBudget()) {
- // Only force intermediate draws on opList boundaries
- if (this->onOpListBoundary()) {
+ // Only force intermediate draws on opsTask boundaries
+ if (this->onOpsTaskBoundary()) {
this->forceIntermediateFlush(stopIndex);
return true;
}
@@ -484,8 +485,8 @@
fActiveIntvls.insertByIncreasingEnd(cur);
if (fResourceProvider->overBudget()) {
- // Only force intermediate draws on opList boundaries
- if (this->onOpListBoundary()) {
+ // Only force intermediate draws on opsTask boundaries
+ if (this->onOpsTaskBoundary()) {
this->forceIntermediateFlush(stopIndex);
return true;
}