Reland "Merge GrOpList and GrRTOpList and rename to GrOpsTask."
This reverts commit f21bf9e50bb175eb151e90a01d7f8351da0802f8.
Reason for revert: relanding with infra fix
Original change's description:
> Revert "Merge GrOpList and GrRTOpList and rename to GrOpsTask."
>
> This reverts commit 2a5954140b49d18e5161a30a4ae2c7ac28bc1993.
>
> Reason for revert: breaking everything
>
> Original change's description:
> > Merge GrOpList and GrRTOpList and rename to GrOpsTask.
> >
> > Change-Id: I8f4f2218a30fd0541a8f79f7bb9850f9500cd243
> > Reviewed-on: https://skia-review.googlesource.com/c/skia/+/236343
> > Commit-Queue: Greg Daniel <egdaniel@google.com>
> > Reviewed-by: Brian Salomon <bsalomon@google.com>
>
> TBR=egdaniel@google.com,bsalomon@google.com,robertphillips@google.com
>
> Change-Id: I27840ea0343e8e6b388556afb7bd2e76386d611d
> No-Presubmit: true
> No-Tree-Checks: true
> No-Try: true
> Reviewed-on: https://skia-review.googlesource.com/c/skia/+/236349
> Reviewed-by: Greg Daniel <egdaniel@google.com>
> Commit-Queue: Greg Daniel <egdaniel@google.com>
TBR=egdaniel@google.com,bsalomon@google.com,robertphillips@google.com
Change-Id: Ibd3a06e4a91dbb1f225dcc8d17d0db3967b6f85f
No-Presubmit: true
No-Tree-Checks: true
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/236350
Commit-Queue: Greg Daniel <egdaniel@google.com>
Reviewed-by: Greg Daniel <egdaniel@google.com>
diff --git a/src/core/SkDeferredDisplayList.cpp b/src/core/SkDeferredDisplayList.cpp
index 7d2b971..c89cd99 100644
--- a/src/core/SkDeferredDisplayList.cpp
+++ b/src/core/SkDeferredDisplayList.cpp
@@ -13,7 +13,7 @@
#if SK_SUPPORT_GPU
#include "src/gpu/GrRenderTask.h"
-#include "src/gpu/ccpr/GrCCPerOpListPaths.h"
+#include "src/gpu/ccpr/GrCCPerOpsTaskPaths.h"
#endif
SkDeferredDisplayList::SkDeferredDisplayList(const SkSurfaceCharacterization& characterization,
diff --git a/src/core/SkTTopoSort.h b/src/core/SkTTopoSort.h
index 9df95ee..bc1b07e 100644
--- a/src/core/SkTTopoSort.h
+++ b/src/core/SkTTopoSort.h
@@ -77,7 +77,7 @@
//
// TODO: potentially add a version that takes a seed node and just outputs that
// node and all the nodes on which it depends. This could be used to partially
-// flush a GrOpList DAG.
+// flush a GrRenderTask DAG.
template <typename T, typename Traits = T>
bool SkTTopoSort(SkTArray<sk_sp<T>>* graph) {
SkTArray<sk_sp<T>> result;
diff --git a/src/gpu/GrAuditTrail.cpp b/src/gpu/GrAuditTrail.cpp
index 609fe84..9c72acf 100644
--- a/src/gpu/GrAuditTrail.cpp
+++ b/src/gpu/GrAuditTrail.cpp
@@ -18,7 +18,7 @@
auditOp->fName = op->name();
auditOp->fBounds = op->bounds();
auditOp->fClientID = kGrAuditTrailInvalidID;
- auditOp->fOpListID = kGrAuditTrailInvalidID;
+ auditOp->fOpsTaskID = kGrAuditTrailInvalidID;
auditOp->fChildID = kGrAuditTrailInvalidID;
// consume the current stack trace if any
@@ -40,15 +40,15 @@
}
// Our algorithm doesn't bother to reorder inside of an OpNode so the ChildID will start at 0
- auditOp->fOpListID = fOpList.count();
+ auditOp->fOpsTaskID = fOpsTask.count();
auditOp->fChildID = 0;
// We use the op pointer as a key to find the OpNode we are 'glomming' ops onto
- fIDLookup.set(op->uniqueID(), auditOp->fOpListID);
+ fIDLookup.set(op->uniqueID(), auditOp->fOpsTaskID);
OpNode* opNode = new OpNode(proxyID);
opNode->fBounds = op->bounds();
opNode->fChildren.push_back(auditOp);
- fOpList.emplace_back(opNode);
+ fOpsTask.emplace_back(opNode);
}
void GrAuditTrail::opsCombined(const GrOp* consumer, const GrOp* consumed) {
@@ -56,22 +56,22 @@
int* indexPtr = fIDLookup.find(consumer->uniqueID());
SkASSERT(indexPtr);
int index = *indexPtr;
- SkASSERT(index < fOpList.count() && fOpList[index]);
- OpNode& consumerOp = *fOpList[index];
+ SkASSERT(index < fOpsTask.count() && fOpsTask[index]);
+ OpNode& consumerOp = *fOpsTask[index];
// Look up the op which will be glommed
int* consumedPtr = fIDLookup.find(consumed->uniqueID());
SkASSERT(consumedPtr);
int consumedIndex = *consumedPtr;
- SkASSERT(consumedIndex < fOpList.count() && fOpList[consumedIndex]);
- OpNode& consumedOp = *fOpList[consumedIndex];
+ SkASSERT(consumedIndex < fOpsTask.count() && fOpsTask[consumedIndex]);
+ OpNode& consumedOp = *fOpsTask[consumedIndex];
// steal all of consumed's ops
for (int i = 0; i < consumedOp.fChildren.count(); i++) {
Op* childOp = consumedOp.fChildren[i];
// set the ids for the child op
- childOp->fOpListID = index;
+ childOp->fOpsTaskID = index;
childOp->fChildID = consumerOp.fChildren.count();
consumerOp.fChildren.push_back(childOp);
}
@@ -79,15 +79,15 @@
// Update the bounds for the combineWith node
consumerOp.fBounds = consumer->bounds();
- // remove the old node from our opList and clear the combinee's lookup
+ // remove the old node from our opsTask and clear the combinee's lookup
// NOTE: because we can't change the shape of the oplist, we use a sentinel
- fOpList[consumedIndex].reset(nullptr);
+ fOpsTask[consumedIndex].reset(nullptr);
fIDLookup.remove(consumed->uniqueID());
}
-void GrAuditTrail::copyOutFromOpList(OpInfo* outOpInfo, int opListID) {
- SkASSERT(opListID < fOpList.count());
- const OpNode* bn = fOpList[opListID].get();
+void GrAuditTrail::copyOutFromOpsTask(OpInfo* outOpInfo, int opsTaskID) {
+ SkASSERT(opsTaskID < fOpsTask.count());
+ const OpNode* bn = fOpsTask[opsTaskID].get();
SkASSERT(bn);
outOpInfo->fBounds = bn->fBounds;
outOpInfo->fProxyUniqueID = bn->fProxyUniqueID;
@@ -105,30 +105,30 @@
// We track which oplistID we're currently looking at. If it changes, then we need to push
// back a new op info struct. We happen to know that ops are in sequential order in the
// oplist, otherwise we'd have to do more bookkeeping
- int currentOpListID = kGrAuditTrailInvalidID;
+ int currentOpsTaskID = kGrAuditTrailInvalidID;
for (int i = 0; i < (*opsLookup)->count(); i++) {
const Op* op = (**opsLookup)[i];
// Because we will copy out all of the ops associated with a given op list id everytime
// the id changes, we only have to update our struct when the id changes.
- if (kGrAuditTrailInvalidID == currentOpListID || op->fOpListID != currentOpListID) {
+ if (kGrAuditTrailInvalidID == currentOpsTaskID || op->fOpsTaskID != currentOpsTaskID) {
OpInfo& outOpInfo = outInfo->push_back();
// copy out all of the ops so the client can display them even if they have a
// different clientID
- this->copyOutFromOpList(&outOpInfo, op->fOpListID);
+ this->copyOutFromOpsTask(&outOpInfo, op->fOpsTaskID);
}
}
}
}
-void GrAuditTrail::getBoundsByOpListID(OpInfo* outInfo, int opListID) {
- this->copyOutFromOpList(outInfo, opListID);
+void GrAuditTrail::getBoundsByOpsTaskID(OpInfo* outInfo, int opsTaskID) {
+ this->copyOutFromOpsTask(outInfo, opsTaskID);
}
void GrAuditTrail::fullReset() {
SkASSERT(fEnabled);
- fOpList.reset();
+ fOpsTask.reset();
fIDLookup.reset();
// free all client ops
fClientIDLookup.foreach ([](const int&, Ops** ops) { delete *ops; });
@@ -152,7 +152,7 @@
void GrAuditTrail::toJson(SkJSONWriter& writer) const {
writer.beginObject();
- JsonifyTArray(writer, "Ops", fOpList);
+ JsonifyTArray(writer, "Ops", fOpsTask);
writer.endObject();
}
@@ -178,7 +178,7 @@
writer.beginObject();
writer.appendString("Name", fName.c_str());
writer.appendS32("ClientID", fClientID);
- writer.appendS32("OpListID", fOpListID);
+ writer.appendS32("OpsTaskID", fOpsTaskID);
writer.appendS32("ChildID", fChildID);
skrect_to_json(writer, "Bounds", fBounds);
if (fStackTrace.count()) {
diff --git a/src/gpu/GrAuditTrail.h b/src/gpu/GrAuditTrail.h
index 74f47d3..dd6549e 100644
--- a/src/gpu/GrAuditTrail.h
+++ b/src/gpu/GrAuditTrail.h
@@ -51,12 +51,12 @@
GrAuditTrail* fAuditTrail;
};
- class AutoManageOpList {
+ class AutoManageOpsTask {
public:
- AutoManageOpList(GrAuditTrail* auditTrail)
+ AutoManageOpsTask(GrAuditTrail* auditTrail)
: fAutoEnable(auditTrail), fAuditTrail(auditTrail) {}
- ~AutoManageOpList() { fAuditTrail->fullReset(); }
+ ~AutoManageOpsTask() { fAuditTrail->fullReset(); }
private:
AutoEnable fAutoEnable;
@@ -116,7 +116,7 @@
};
void getBoundsByClientID(SkTArray<OpInfo>* outInfo, int clientID);
- void getBoundsByOpListID(OpInfo* outInfo, int opListID);
+ void getBoundsByOpsTaskID(OpInfo* outInfo, int opsTaskID);
void fullReset();
@@ -130,7 +130,7 @@
SkTArray<SkString> fStackTrace;
SkRect fBounds;
int fClientID;
- int fOpListID;
+ int fOpsTaskID;
int fChildID;
};
typedef SkTArray<std::unique_ptr<Op>, true> OpPool;
@@ -145,9 +145,9 @@
Ops fChildren;
const GrSurfaceProxy::UniqueID fProxyUniqueID;
};
- typedef SkTArray<std::unique_ptr<OpNode>, true> OpList;
+ typedef SkTArray<std::unique_ptr<OpNode>, true> OpsTask;
- void copyOutFromOpList(OpInfo* outOpInfo, int opListID);
+ void copyOutFromOpsTask(OpInfo* outOpInfo, int opsTask);
template <typename T>
static void JsonifyTArray(SkJSONWriter& writer, const char* name, const T& array);
@@ -155,7 +155,7 @@
OpPool fOpPool;
SkTHashMap<uint32_t, int> fIDLookup;
SkTHashMap<int, Ops*> fClientIDLookup;
- OpList fOpList;
+ OpsTask fOpsTask;
SkTArray<SkString> fCurrentStackTrace;
// The client can pass in an optional client ID which we will use to mark the ops
diff --git a/src/gpu/GrClipStackClip.cpp b/src/gpu/GrClipStackClip.cpp
index 07470ce..5d862d4 100644
--- a/src/gpu/GrClipStackClip.cpp
+++ b/src/gpu/GrClipStackClip.cpp
@@ -245,10 +245,10 @@
}
}
- // The opList ID must not be looked up until AFTER producing the clip mask (if any). That step
- // can cause a flush or otherwise change which opList our draw is going into.
- uint32_t opListID = renderTargetContext->getOpList()->uniqueID();
- if (auto clipFPs = reducedClip.finishAndDetachAnalyticFPs(ccpr, opListID)) {
+ // The opsTask ID must not be looked up until AFTER producing the clip mask (if any). That step
+ // can cause a flush or otherwise change which opstask our draw is going into.
+ uint32_t opsTaskID = renderTargetContext->getOpsTask()->uniqueID();
+ if (auto clipFPs = reducedClip.finishAndDetachAnalyticFPs(ccpr, opsTaskID)) {
out->addCoverageFP(std::move(clipFPs));
}
diff --git a/src/gpu/GrCopyRenderTask.cpp b/src/gpu/GrCopyRenderTask.cpp
index 6c41370..34af9a1 100644
--- a/src/gpu/GrCopyRenderTask.cpp
+++ b/src/gpu/GrCopyRenderTask.cpp
@@ -54,7 +54,7 @@
void GrCopyRenderTask::gatherProxyIntervals(GrResourceAllocator* alloc) const {
// This renderTask doesn't have "normal" ops. In this case we still need to add an interval (so
- // fEndOfOpListOpIndices will remain in sync), so we create a fake op# to capture the fact that
+ // fEndOfOpsTaskOpIndices will remain in sync), so we create a fake op# to capture the fact that
// we read fSrcProxy and copy to fTarget.
alloc->addInterval(fSrcProxy.get(), alloc->curOp(), alloc->curOp(),
GrResourceAllocator::ActualUse::kYes);
diff --git a/src/gpu/GrDDLContext.cpp b/src/gpu/GrDDLContext.cpp
index 4cfdad4..4d14504 100644
--- a/src/gpu/GrDDLContext.cpp
+++ b/src/gpu/GrDDLContext.cpp
@@ -52,7 +52,7 @@
return false;
}
- // DDL contexts/drawing managers always sort the oplists and attempt to reduce opList
+ // DDL contexts/drawing managers always sort the oplists and attempt to reduce opsTask
// splitting.
this->setupDrawingManager(true, true);
diff --git a/src/gpu/GrDrawingManager.cpp b/src/gpu/GrDrawingManager.cpp
index 3a5e750..75abb7f 100644
--- a/src/gpu/GrDrawingManager.cpp
+++ b/src/gpu/GrDrawingManager.cpp
@@ -95,13 +95,13 @@
return (fRenderTasks[fRenderTasks.count() - 2] = std::move(renderTask)).get();
}
-void GrDrawingManager::RenderTaskDAG::add(const SkTArray<sk_sp<GrRenderTask>>& opLists) {
- fRenderTasks.push_back_n(opLists.count(), opLists.begin());
+void GrDrawingManager::RenderTaskDAG::add(const SkTArray<sk_sp<GrRenderTask>>& renderTasks) {
+ fRenderTasks.push_back_n(renderTasks.count(), renderTasks.begin());
}
-void GrDrawingManager::RenderTaskDAG::swap(SkTArray<sk_sp<GrRenderTask>>* opLists) {
- SkASSERT(opLists->empty());
- opLists->swap(fRenderTasks);
+void GrDrawingManager::RenderTaskDAG::swap(SkTArray<sk_sp<GrRenderTask>>* renderTasks) {
+ SkASSERT(renderTasks->empty());
+ renderTasks->swap(fRenderTasks);
}
void GrDrawingManager::RenderTaskDAG::prepForFlush() {
@@ -112,18 +112,18 @@
}
#ifdef SK_DEBUG
- // This block checks for any unnecessary splits in the opLists. If two sequential opLists
- // share the same backing GrSurfaceProxy it means the opList was artificially split.
+ // This block checks for any unnecessary splits in the opsTasks. If two sequential opsTasks
+ // share the same backing GrSurfaceProxy it means the opsTask was artificially split.
if (fRenderTasks.count()) {
- GrRenderTargetOpList* prevOpList = fRenderTasks[0]->asRenderTargetOpList();
+ GrOpsTask* prevOpsTask = fRenderTasks[0]->asOpsTask();
for (int i = 1; i < fRenderTasks.count(); ++i) {
- GrRenderTargetOpList* curOpList = fRenderTasks[i]->asRenderTargetOpList();
+ GrOpsTask* curOpsTask = fRenderTasks[i]->asOpsTask();
- if (prevOpList && curOpList) {
- SkASSERT(prevOpList->fTarget.get() != curOpList->fTarget.get());
+ if (prevOpsTask && curOpsTask) {
+ SkASSERT(prevOpsTask->fTarget.get() != curOpsTask->fTarget.get());
}
- prevOpList = curOpList;
+ prevOpsTask = curOpsTask;
}
}
#endif
@@ -146,7 +146,7 @@
// no renderTask should receive a dependency
fRenderTasks[i]->makeClosed(*caps);
- // We shouldn't need to do this, but it turns out some clients still hold onto opLists
+ // We shouldn't need to do this, but it turns out some clients still hold onto opsTasks
// after a cleanup.
// MDB TODO: is this still true?
if (!fRenderTasks[i]->unique()) {
@@ -164,7 +164,7 @@
const GrPathRendererChain::Options& optionsForPathRendererChain,
const GrTextContext::Options& optionsForTextContext,
bool sortRenderTasks,
- bool reduceOpListSplitting)
+ bool reduceOpsTaskSplitting)
: fContext(context)
, fOptionsForPathRendererChain(optionsForPathRendererChain)
, fOptionsForTextContext(optionsForTextContext)
@@ -173,7 +173,7 @@
, fPathRendererChain(nullptr)
, fSoftwarePathRenderer(nullptr)
, fFlushing(false)
- , fReduceOpListSplitting(reduceOpListSplitting) {
+ , fReduceOpsTaskSplitting(reduceOpsTaskSplitting) {
}
void GrDrawingManager::cleanup() {
@@ -256,11 +256,11 @@
auto resourceCache = direct->priv().getResourceCache();
// Semi-usually the GrRenderTasks are already closed at this point, but sometimes Ganesh needs
- // to flush mid-draw. In that case, the SkGpuDevice's opLists won't be closed but need to be
- // flushed anyway. Closing such opLists here will mean new ones will be created to replace them
+ // to flush mid-draw. In that case, the SkGpuDevice's opsTasks won't be closed but need to be
+ // flushed anyway. Closing such opsTasks here will mean new ones will be created to replace them
// if the SkGpuDevice(s) write to them again.
fDAG.closeAll(fContext->priv().caps());
- fActiveOpList = nullptr;
+ fActiveOpsTask = nullptr;
fDAG.prepForFlush();
if (!fCpuBufferCache) {
@@ -286,21 +286,21 @@
onFlushCBObject->preFlush(&onFlushProvider, fFlushingRenderTaskIDs.begin(),
fFlushingRenderTaskIDs.count(), &renderTargetContexts);
for (const auto& rtc : renderTargetContexts) {
- sk_sp<GrRenderTargetOpList> onFlushOpList = sk_ref_sp(rtc->getRTOpList());
- if (!onFlushOpList) {
+ sk_sp<GrOpsTask> onFlushOpsTask = sk_ref_sp(rtc->getOpsTask());
+ if (!onFlushOpsTask) {
continue; // Odd - but not a big deal
}
#ifdef SK_DEBUG
// OnFlush callbacks are already invoked during flush, and are therefore expected to
// handle resource allocation & usage on their own. (No deferred or lazy proxies!)
- onFlushOpList->visitProxies_debugOnly([](GrSurfaceProxy* p, GrMipMapped) {
+ onFlushOpsTask->visitProxies_debugOnly([](GrSurfaceProxy* p, GrMipMapped) {
SkASSERT(!p->asTextureProxy() || !p->asTextureProxy()->texPriv().isDeferred());
SkASSERT(GrSurfaceProxy::LazyState::kNot == p->lazyInstantiationState());
});
#endif
- onFlushOpList->makeClosed(*fContext->priv().caps());
- onFlushOpList->prepare(&flushState);
- fOnFlushCBOpLists.push_back(std::move(onFlushOpList));
+ onFlushOpsTask->makeClosed(*fContext->priv().caps());
+ onFlushOpsTask->prepare(&flushState);
+ fOnFlushCBOpsTasks.push_back(std::move(onFlushOpsTask));
}
renderTargetContexts.reset();
}
@@ -323,7 +323,7 @@
if (fDAG.renderTask(i)) {
fDAG.renderTask(i)->gatherProxyIntervals(&alloc);
}
- alloc.markEndOfOpList(i);
+ alloc.markEndOfOpsTask(i);
}
alloc.determineRecyclability();
@@ -354,7 +354,7 @@
#ifdef SK_DEBUG
for (int i = 0; i < fDAG.numRenderTasks(); ++i) {
- // If there are any remaining opLists at this point, make sure they will not survive the
+ // If there are any remaining opsTaskss at this point, make sure they will not survive the
// flush. Otherwise we need to call endFlush() on them.
// http://skbug.com/7111
SkASSERT(!fDAG.renderTask(i) || fDAG.renderTask(i)->unique());
@@ -366,7 +366,7 @@
#ifdef SK_DEBUG
// In non-DDL mode this checks that all the flushed ops have been freed from the memory pool.
// When we move to partial flushes this assert will no longer be valid.
- // In DDL mode this check is somewhat superfluous since the memory for most of the ops/opLists
+ // In DDL mode this check is somewhat superfluous since the memory for most of the ops/opsTasks
// will be stored in the DDL's GrOpMemoryPools.
GrOpMemoryPool* opMemoryPool = fContext->priv().opMemoryPool();
opMemoryPool->isEmpty();
@@ -401,7 +401,7 @@
SkASSERT(startIndex <= stopIndex && stopIndex <= fDAG.numRenderTasks());
#if GR_FLUSH_TIME_OP_SPEW
- SkDebugf("Flushing opLists: %d to %d out of [%d, %d]\n",
+ SkDebugf("Flushing opsTask: %d to %d out of [%d, %d]\n",
startIndex, stopIndex, 0, fDAG.numRenderTasks());
for (int i = startIndex; i < stopIndex; ++i) {
if (fDAG.renderTask(i)) {
@@ -435,12 +435,12 @@
static constexpr int kMaxRenderTasksBeforeFlush = 100;
// Execute the onFlush op lists first, if any.
- for (sk_sp<GrOpList>& onFlushOpList : fOnFlushCBOpLists) {
- if (!onFlushOpList->execute(flushState)) {
- SkDebugf("WARNING: onFlushOpList failed to execute.\n");
+ for (sk_sp<GrOpsTask>& onFlushOpsTask : fOnFlushCBOpsTasks) {
+ if (!onFlushOpsTask->execute(flushState)) {
+ SkDebugf("WARNING: onFlushOpsTask failed to execute.\n");
}
- SkASSERT(onFlushOpList->unique());
- onFlushOpList = nullptr;
+ SkASSERT(onFlushOpsTask->unique());
+ onFlushOpsTask = nullptr;
(*numRenderTasksExecuted)++;
if (*numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) {
flushState->gpu()->finishFlush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess,
@@ -448,7 +448,7 @@
*numRenderTasksExecuted = 0;
}
}
- fOnFlushCBOpLists.reset();
+ fOnFlushCBOpsTasks.reset();
// Execute the normal op lists.
for (int i = startIndex; i < stopIndex; ++i) {
@@ -547,7 +547,7 @@
// no renderTask should receive a new command after this
fDAG.closeAll(fContext->priv().caps());
- fActiveOpList = nullptr;
+ fActiveOpsTask = nullptr;
fDAG.swap(&ddl->fRenderTasks);
@@ -564,19 +564,19 @@
GrRenderTargetProxy* newDest) {
SkDEBUGCODE(this->validate());
- if (fActiveOpList) {
+ if (fActiveOpsTask) {
// This is a temporary fix for the partial-MDB world. In that world we're not
- // reordering so ops that (in the single opList world) would've just glommed onto the
- // end of the single opList but referred to a far earlier RT need to appear in their
- // own opList.
- fActiveOpList->makeClosed(*fContext->priv().caps());
- fActiveOpList = nullptr;
+ // reordering so ops that (in the single opsTask world) would've just glommed onto the
+ // end of the single opsTask but referred to a far earlier RT need to appear in their
+ // own opsTask.
+ fActiveOpsTask->makeClosed(*fContext->priv().caps());
+ fActiveOpsTask = nullptr;
}
this->addDDLTarget(newDest);
// Here we jam the proxy that backs the current replay SkSurface into the LazyProxyData.
- // The lazy proxy that references it (in the copied opLists) will steal its GrTexture.
+ // The lazy proxy that references it (in the copied opsTasks) will steal its GrTexture.
ddl->fLazyProxyData->fReplayDest = newDest;
if (ddl->fPendingPaths.size()) {
@@ -592,30 +592,30 @@
#ifdef SK_DEBUG
void GrDrawingManager::validate() const {
- if (fDAG.sortingRenderTasks() && fReduceOpListSplitting) {
- SkASSERT(!fActiveOpList);
+ if (fDAG.sortingRenderTasks() && fReduceOpsTaskSplitting) {
+ SkASSERT(!fActiveOpsTask);
} else {
- if (fActiveOpList) {
+ if (fActiveOpsTask) {
SkASSERT(!fDAG.empty());
- SkASSERT(!fActiveOpList->isClosed());
- SkASSERT(fActiveOpList == fDAG.back());
+ SkASSERT(!fActiveOpsTask->isClosed());
+ SkASSERT(fActiveOpsTask == fDAG.back());
}
for (int i = 0; i < fDAG.numRenderTasks(); ++i) {
- if (fActiveOpList != fDAG.renderTask(i)) {
+ if (fActiveOpsTask != fDAG.renderTask(i)) {
SkASSERT(fDAG.renderTask(i)->isClosed());
}
}
if (!fDAG.empty() && !fDAG.back()->isClosed()) {
- SkASSERT(fActiveOpList == fDAG.back());
+ SkASSERT(fActiveOpsTask == fDAG.back());
}
}
}
#endif
void GrDrawingManager::closeRenderTasksForNewRenderTask(GrSurfaceProxy* target) {
- if (target && fDAG.sortingRenderTasks() && fReduceOpListSplitting) {
+ if (target && fDAG.sortingRenderTasks() && fReduceOpsTaskSplitting) {
// In this case we need to close all the renderTasks that rely on the current contents of
// 'target'. That is bc we're going to update the content of the proxy so they need to be
// split in case they use both the old and new content. (This is a bit of an overkill: they
@@ -624,46 +624,43 @@
if (GrRenderTask* lastRenderTask = target->getLastRenderTask()) {
lastRenderTask->closeThoseWhoDependOnMe(*fContext->priv().caps());
}
- } else if (fActiveOpList) {
+ } else if (fActiveOpsTask) {
// This is a temporary fix for the partial-MDB world. In that world we're not
- // reordering so ops that (in the single opList world) would've just glommed onto the
- // end of the single opList but referred to a far earlier RT need to appear in their
- // own opList.
- fActiveOpList->makeClosed(*fContext->priv().caps());
- fActiveOpList = nullptr;
+ // reordering so ops that (in the single opsTask world) would've just glommed onto the
+ // end of the single opsTask but referred to a far earlier RT need to appear in their
+ // own opsTask.
+ fActiveOpsTask->makeClosed(*fContext->priv().caps());
+ fActiveOpsTask = nullptr;
}
}
-sk_sp<GrRenderTargetOpList> GrDrawingManager::newRTOpList(sk_sp<GrRenderTargetProxy> rtp,
- bool managedOpList) {
+sk_sp<GrOpsTask> GrDrawingManager::newOpsTask(sk_sp<GrRenderTargetProxy> rtp, bool managedOpsTask) {
SkDEBUGCODE(this->validate());
SkASSERT(fContext);
this->closeRenderTasksForNewRenderTask(rtp.get());
- sk_sp<GrRenderTargetOpList> opList(new GrRenderTargetOpList(
- fContext->priv().refOpMemoryPool(),
- rtp,
- fContext->priv().auditTrail()));
- SkASSERT(rtp->getLastRenderTask() == opList.get());
+ sk_sp<GrOpsTask> opsTask(new GrOpsTask(fContext->priv().refOpMemoryPool(), rtp,
+ fContext->priv().auditTrail()));
+ SkASSERT(rtp->getLastRenderTask() == opsTask.get());
- if (managedOpList) {
- fDAG.add(opList);
+ if (managedOpsTask) {
+ fDAG.add(opsTask);
- if (!fDAG.sortingRenderTasks() || !fReduceOpListSplitting) {
- fActiveOpList = opList.get();
+ if (!fDAG.sortingRenderTasks() || !fReduceOpsTaskSplitting) {
+ fActiveOpsTask = opsTask.get();
}
}
SkDEBUGCODE(this->validate());
- return opList;
+ return opsTask;
}
GrRenderTask* GrDrawingManager::newTextureResolveRenderTask(
sk_sp<GrTextureProxy> textureProxy, GrTextureResolveFlags flags, const GrCaps& caps) {
- // Unlike in the "new opList" cases, we do not want to close the active opList, nor (if we are
- // in sorting and opList reduction mode) the render tasks that depend on the proxy's current
- // state. This is because those opLists can still receive new ops and because if they refer to
+ // Unlike in the "new opsTask" cases, we do not want to close the active opsTask, nor (if we are
+ // in sorting and opsTask reduction mode) the render tasks that depend on the proxy's current
+ // state. This is because those opsTasks can still receive new ops and because if they refer to
// the mipmapped version of 'textureProxy', they will then come to depend on the render task
// being created here.
// NOTE: In either case, 'textureProxy' should already be closed at this point (i.e., its state
@@ -674,8 +671,8 @@
SkASSERT(!previousTaskBeforeMipsResolve || previousTaskBeforeMipsResolve->isClosed());
SkASSERT(textureProxy->getLastRenderTask() == textureResolveTask.get());
- // Add the new textureResolveTask before the fActiveOpList (if not in
- // sorting/opList-splitting-reduction mode) because it will depend upon this resolve task.
+ // Add the new textureResolveTask before the fActiveOpsTask (if not in
+ // sorting/opsTask-splitting-reduction mode) because it will depend upon this resolve task.
// NOTE: Putting it here will also reduce the amount of work required by the topological sort.
return fDAG.addBeforeLast(std::move(textureResolveTask));
}
@@ -705,7 +702,7 @@
fDAG.add(std::move(task));
// We have closed the previous active oplist but since a new oplist isn't being added there
// shouldn't be an active one.
- SkASSERT(!fActiveOpList);
+ SkASSERT(!fActiveOpsTask);
SkDEBUGCODE(this->validate());
}
@@ -732,7 +729,7 @@
fDAG.add(std::move(task));
// We have closed the previous active oplist but since a new oplist isn't being added there
// shouldn't be an active one.
- SkASSERT(!fActiveOpList);
+ SkASSERT(!fActiveOpsTask);
SkDEBUGCODE(this->validate());
return true;
}
@@ -806,7 +803,7 @@
GrColorType colorType,
sk_sp<SkColorSpace> colorSpace,
const SkSurfaceProps* surfaceProps,
- bool managedOpList) {
+ bool managedOpsTask) {
if (this->wasAbandoned() || !sProxy->asRenderTargetProxy()) {
return nullptr;
}
@@ -826,7 +823,7 @@
colorType,
std::move(colorSpace),
surfaceProps,
- managedOpList));
+ managedOpsTask));
}
std::unique_ptr<GrTextureContext> GrDrawingManager::makeTextureContext(
diff --git a/src/gpu/GrDrawingManager.h b/src/gpu/GrDrawingManager.h
index 6b5fc1d..25b0343 100644
--- a/src/gpu/GrDrawingManager.h
+++ b/src/gpu/GrDrawingManager.h
@@ -21,11 +21,10 @@
class GrCoverageCountingPathRenderer;
class GrOnFlushCallbackObject;
class GrOpFlushState;
-class GrOpList;
+class GrOpsTask;
class GrRecordingContext;
class GrRenderTargetContext;
class GrRenderTargetProxy;
-class GrRenderTargetOpList;
class GrSoftwarePathRenderer;
class GrTextureContext;
class SkDeferredDisplayList;
@@ -40,15 +39,15 @@
GrColorType,
sk_sp<SkColorSpace>,
const SkSurfaceProps*,
- bool managedOpList = true);
+ bool managedOpsTask = true);
std::unique_ptr<GrTextureContext> makeTextureContext(sk_sp<GrSurfaceProxy>,
GrColorType,
SkAlphaType,
sk_sp<SkColorSpace>);
- // A managed opList is controlled by the drawing manager (i.e., sorted & flushed with the
+ // A managed opsTask is controlled by the drawing manager (i.e., sorted & flushed with the
// others). An unmanaged one is created and used by the onFlushCallback.
- sk_sp<GrRenderTargetOpList> newRTOpList(sk_sp<GrRenderTargetProxy>, bool managedOpList);
+ sk_sp<GrOpsTask> newOpsTask(sk_sp<GrRenderTargetProxy>, bool managedOpsTask);
// Create a new, specialized, render task that will regenerate mipmap levels and/or resolve
// MSAA (depending on GrTextureResolveFlags). This method will add the new render task to the
@@ -122,8 +121,8 @@
~RenderTaskDAG();
// Currently, when explicitly allocating resources, this call will topologically sort the
- // opLists.
- // MDB TODO: remove once incremental opList sorting is enabled
+ // GrRenderTasks.
+ // MDB TODO: remove once incremental GrRenderTask sorting is enabled
void prepForFlush();
void closeAll(const GrCaps* caps);
@@ -135,10 +134,10 @@
void reset();
- // These calls forceably remove an opList from the DAG. They are problematic bc they just
- // remove the opList but don't cleanup any refering pointers (i.e., dependency pointers
- // in the DAG). They work right now bc they are only called at flush time, after the
- // topological sort is complete (so the dangling pointers aren't used).
+ // These calls forceably remove a GrRenderTask from the DAG. They are problematic bc they
+ // just remove the GrRenderTask but don't cleanup any refering pointers (i.e., dependency
+ // pointers in the DAG). They work right now bc they are only called at flush time, after
+ // the topological sort is complete (so the dangling pointers aren't used).
void removeRenderTask(int index);
void removeRenderTasks(int startIndex, int stopIndex);
@@ -169,18 +168,18 @@
GrDrawingManager(GrRecordingContext*, const GrPathRendererChain::Options&,
const GrTextContext::Options&,
bool sortRenderTasks,
- bool reduceOpListSplitting);
+ bool reduceOpsTaskSplitting);
bool wasAbandoned() const;
void cleanup();
- // Closes the target's dependent render tasks (or, if not in sorting/opList-splitting-reduction
- // mode, closes fActiveOpList) in preparation for us opening a new opList that will write to
+ // Closes the target's dependent render tasks (or, if not in sorting/opsTask-splitting-reduction
+ // mode, closes fActiveOpsTask) in preparation for us opening a new opsTask that will write to
// 'target'.
void closeRenderTasksForNewRenderTask(GrSurfaceProxy* target);
- // return true if any opLists were actually executed; false otherwise
+ // return true if any GrRenderTasks were actually executed; false otherwise
bool executeRenderTasks(int startIndex, int stopIndex, GrOpFlushState*,
int* numRenderTasksExecuted);
@@ -209,11 +208,11 @@
sk_sp<GrBufferAllocPool::CpuBufferCache> fCpuBufferCache;
RenderTaskDAG fDAG;
- GrOpList* fActiveOpList = nullptr;
- // These are the IDs of the opLists currently being flushed (in internalFlush)
+ GrOpsTask* fActiveOpsTask = nullptr;
+ // These are the IDs of the opsTask currently being flushed (in internalFlush)
SkSTArray<8, uint32_t, true> fFlushingRenderTaskIDs;
- // These are the new opLists generated by the onFlush CBs
- SkSTArray<8, sk_sp<GrOpList>> fOnFlushCBOpLists;
+ // These are the new opsTask generated by the onFlush CBs
+ SkSTArray<8, sk_sp<GrOpsTask>> fOnFlushCBOpsTasks;
std::unique_ptr<GrTextContext> fTextContext;
@@ -222,7 +221,7 @@
GrTokenTracker fTokenTracker;
bool fFlushing;
- bool fReduceOpListSplitting;
+ bool fReduceOpsTaskSplitting;
SkTArray<GrOnFlushCallbackObject*> fOnFlushCBObjects;
diff --git a/src/gpu/GrGpu.h b/src/gpu/GrGpu.h
index 84d9c41..71e1557 100644
--- a/src/gpu/GrGpu.h
+++ b/src/gpu/GrGpu.h
@@ -289,7 +289,7 @@
GrGpuBuffer* transferBuffer, size_t offset);
// Called to perform a surface to surface copy. Fallbacks to issuing a draw from the src to dst
- // take place at the GrOpList level and this function implement faster copy paths. The rect
+ // take place at higher levels and this function implement faster copy paths. The rect
// and point are pre-clipped. The src rect and implied dst rect are guaranteed to be within the
// src/dst bounds and non-empty. They must also be in their exact device space coords, including
// already being transformed for origin if need be. If canDiscardOutsideDstRect is set to true
@@ -310,14 +310,14 @@
return fSamplePatternDictionary.retrieveSampleLocations(samplePatternKey);
}
- // Returns a GrGpuRTCommandBuffer which GrOpLists send draw commands to instead of directly
+ // Returns a GrGpuRTCommandBuffer which GrOpsTasks send draw commands to instead of directly
// to the Gpu object. The 'bounds' rect is the content rect of the destination.
virtual GrGpuRTCommandBuffer* getCommandBuffer(
GrRenderTarget*, GrSurfaceOrigin, const SkRect& bounds,
const GrGpuRTCommandBuffer::LoadAndStoreInfo&,
const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo&) = 0;
- // Returns a GrGpuTextureCommandBuffer which GrOpLists send texture commands to instead of
+ // Returns a GrGpuTextureCommandBuffer which GrOpsTasks send texture commands to instead of
// directly to the Gpu object.
virtual GrGpuTextureCommandBuffer* getCommandBuffer(GrTexture*, GrSurfaceOrigin) = 0;
diff --git a/src/gpu/GrLegacyDirectContext.cpp b/src/gpu/GrLegacyDirectContext.cpp
index ffd224d..dbdcd5c 100644
--- a/src/gpu/GrLegacyDirectContext.cpp
+++ b/src/gpu/GrLegacyDirectContext.cpp
@@ -28,9 +28,9 @@
#endif
#ifdef SK_DISABLE_REDUCE_OPLIST_SPLITTING
-static const bool kDefaultReduceOpListSplitting = false;
+static const bool kDefaultReduceOpsTaskSplitting = false;
#else
-static const bool kDefaultReduceOpListSplitting = false;
+static const bool kDefaultReduceOpsTaskSplitting = false;
#endif
class GrLegacyDirectContext : public GrContext {
@@ -82,14 +82,14 @@
return false;
}
- bool reduceOpListSplitting = kDefaultReduceOpListSplitting;
+ bool reduceOpsTaskSplitting = kDefaultReduceOpsTaskSplitting;
if (GrContextOptions::Enable::kNo == this->options().fReduceOpListSplitting) {
- reduceOpListSplitting = false;
+ reduceOpsTaskSplitting = false;
} else if (GrContextOptions::Enable::kYes == this->options().fReduceOpListSplitting) {
- reduceOpListSplitting = true;
+ reduceOpsTaskSplitting = true;
}
- this->setupDrawingManager(true, reduceOpListSplitting);
+ this->setupDrawingManager(true, reduceOpsTaskSplitting);
SkASSERT(this->caps());
diff --git a/src/gpu/GrOnFlushResourceProvider.h b/src/gpu/GrOnFlushResourceProvider.h
index cc36c10..f26f8d3 100644
--- a/src/gpu/GrOnFlushResourceProvider.h
+++ b/src/gpu/GrOnFlushResourceProvider.h
@@ -15,9 +15,7 @@
#include "src/gpu/GrResourceProvider.h"
class GrDrawingManager;
-class GrOpList;
class GrOnFlushResourceProvider;
-class GrRenderTargetOpList;
class GrRenderTargetContext;
class GrSurfaceProxy;
class SkColorSpace;
@@ -33,11 +31,11 @@
/*
* The onFlush callback allows subsystems (e.g., text, path renderers) to create atlases
- * for a specific flush. All the GrOpList IDs required for the flush are passed into the
+ * for a specific flush. All the GrOpsTask IDs required for the flush are passed into the
* callback. The callback should return the render target contexts used to render the atlases
* in 'results'.
*/
- virtual void preFlush(GrOnFlushResourceProvider*, const uint32_t* opListIDs, int numOpListIDs,
+ virtual void preFlush(GrOnFlushResourceProvider*, const uint32_t* opsTaskIDs, int numOpsTaskIDs,
SkTArray<std::unique_ptr<GrRenderTargetContext>>* results) = 0;
/**
@@ -45,7 +43,7 @@
* released. startTokenForNextFlush can be used to track resources used in the current flush.
*/
virtual void postFlush(GrDeferredUploadToken startTokenForNextFlush,
- const uint32_t* opListIDs, int numOpListIDs) {}
+ const uint32_t* opsTaskIDs, int numOpsTaskIDs) {}
/**
* Tells the callback owner to hold onto this object when freeing GPU resources
diff --git a/src/gpu/GrOpFlushState.h b/src/gpu/GrOpFlushState.h
index fa3778b..c9178e2 100644
--- a/src/gpu/GrOpFlushState.h
+++ b/src/gpu/GrOpFlushState.h
@@ -23,7 +23,7 @@
class GrGpuRTCommandBuffer;
class GrResourceProvider;
-/** Tracks the state across all the GrOps (really just the GrDrawOps) in a GrOpList flush. */
+/** Tracks the state across all the GrOps (really just the GrDrawOps) in a GrOpsTask flush. */
class GrOpFlushState final : public GrDeferredUploadTarget, public GrMeshDrawOp::Target {
public:
// vertexSpace and indexSpace may either be null or an alloation of size
@@ -47,7 +47,7 @@
const GrUserStencilSettings* = &GrUserStencilSettings::kUnused);
GrGpuCommandBuffer* commandBuffer() { return fCommandBuffer; }
- // Helper function used by Ops that are only called via RenderTargetOpLists
+ // Helper function used by Ops that are only called via OpsTasks
GrGpuRTCommandBuffer* rtCommandBuffer();
void setCommandBuffer(GrGpuCommandBuffer* buffer) { fCommandBuffer = buffer; }
diff --git a/src/gpu/GrOpList.cpp b/src/gpu/GrOpList.cpp
deleted file mode 100644
index 2035dd5..0000000
--- a/src/gpu/GrOpList.cpp
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright 2016 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#include "src/gpu/GrMemoryPool.h"
-#include "src/gpu/GrOpList.h"
-
-GrOpList::GrOpList(sk_sp<GrOpMemoryPool> opMemoryPool,
- sk_sp<GrSurfaceProxy> surfaceProxy,
- GrAuditTrail* auditTrail)
- : GrRenderTask(std::move(surfaceProxy))
- , fOpMemoryPool(std::move(opMemoryPool))
- , fAuditTrail(auditTrail) {
- SkASSERT(fOpMemoryPool);
-}
-
-GrOpList::~GrOpList() {
-}
-
-void GrOpList::endFlush() {
- if (fTarget && this == fTarget->getLastRenderTask()) {
- fTarget->setLastRenderTask(nullptr);
- }
-
- fTarget.reset();
- fDeferredProxies.reset();
- fAuditTrail = nullptr;
-}
diff --git a/src/gpu/GrOpList.h b/src/gpu/GrOpList.h
deleted file mode 100644
index a4ee09e..0000000
--- a/src/gpu/GrOpList.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright 2016 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef GrOpList_DEFINED
-#define GrOpList_DEFINED
-
-#include "include/core/SkRefCnt.h"
-#include "include/private/SkColorData.h"
-#include "include/private/SkTDArray.h"
-#include "src/gpu/GrRenderTask.h"
-#include "src/gpu/GrTextureProxy.h"
-
-class GrAuditTrail;
-class GrOpMemoryPool;
-class GrGpuBuffer;
-
-class GrOpList : public GrRenderTask {
-public:
- GrOpList(sk_sp<GrOpMemoryPool>, sk_sp<GrSurfaceProxy>, GrAuditTrail*);
- ~GrOpList() override;
-
- void endFlush() override;
-
-protected:
- // This is a backpointer to the GrOpMemoryPool that holds the memory for this opLists' ops.
- // In the DDL case, these back pointers keep the DDL's GrOpMemoryPool alive as long as its
- // constituent opLists survive.
- sk_sp<GrOpMemoryPool> fOpMemoryPool;
- GrAuditTrail* fAuditTrail;
-};
-
-#endif
diff --git a/src/gpu/GrRenderTargetOpList.cpp b/src/gpu/GrOpsTask.cpp
similarity index 87%
rename from src/gpu/GrRenderTargetOpList.cpp
rename to src/gpu/GrOpsTask.cpp
index 2d7c2c9..bcf0034 100644
--- a/src/gpu/GrRenderTargetOpList.cpp
+++ b/src/gpu/GrOpsTask.cpp
@@ -1,11 +1,11 @@
/*
- * Copyright 2010 Google Inc.
+ * Copyright 2019 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
-#include "src/gpu/GrRenderTargetOpList.h"
+#include "src/gpu/GrOpsTask.h"
#include "include/private/GrRecordingContext.h"
#include "src/core/SkExchange.h"
@@ -40,15 +40,14 @@
////////////////////////////////////////////////////////////////////////////////
-inline GrRenderTargetOpList::OpChain::List::List(std::unique_ptr<GrOp> op)
+inline GrOpsTask::OpChain::List::List(std::unique_ptr<GrOp> op)
: fHead(std::move(op)), fTail(fHead.get()) {
this->validate();
}
-inline GrRenderTargetOpList::OpChain::List::List(List&& that) { *this = std::move(that); }
+inline GrOpsTask::OpChain::List::List(List&& that) { *this = std::move(that); }
-inline GrRenderTargetOpList::OpChain::List& GrRenderTargetOpList::OpChain::List::operator=(
- List&& that) {
+inline GrOpsTask::OpChain::List& GrOpsTask::OpChain::List::operator=(List&& that) {
fHead = std::move(that.fHead);
fTail = that.fTail;
that.fTail = nullptr;
@@ -56,7 +55,7 @@
return *this;
}
-inline std::unique_ptr<GrOp> GrRenderTargetOpList::OpChain::List::popHead() {
+inline std::unique_ptr<GrOp> GrOpsTask::OpChain::List::popHead() {
SkASSERT(fHead);
auto temp = fHead->cutChain();
std::swap(temp, fHead);
@@ -67,7 +66,7 @@
return temp;
}
-inline std::unique_ptr<GrOp> GrRenderTargetOpList::OpChain::List::removeOp(GrOp* op) {
+inline std::unique_ptr<GrOp> GrOpsTask::OpChain::List::removeOp(GrOp* op) {
#ifdef SK_DEBUG
auto head = op;
while (head->prevInChain()) { head = head->prevInChain(); }
@@ -89,7 +88,7 @@
return temp;
}
-inline void GrRenderTargetOpList::OpChain::List::pushHead(std::unique_ptr<GrOp> op) {
+inline void GrOpsTask::OpChain::List::pushHead(std::unique_ptr<GrOp> op) {
SkASSERT(op);
SkASSERT(op->isChainHead());
SkASSERT(op->isChainTail());
@@ -102,13 +101,13 @@
}
}
-inline void GrRenderTargetOpList::OpChain::List::pushTail(std::unique_ptr<GrOp> op) {
+inline void GrOpsTask::OpChain::List::pushTail(std::unique_ptr<GrOp> op) {
SkASSERT(op->isChainTail());
fTail->chainConcat(std::move(op));
fTail = fTail->nextInChain();
}
-inline void GrRenderTargetOpList::OpChain::List::validate() const {
+inline void GrOpsTask::OpChain::List::validate() const {
#ifdef SK_DEBUG
if (fHead) {
SkASSERT(fTail);
@@ -119,9 +118,9 @@
////////////////////////////////////////////////////////////////////////////////
-GrRenderTargetOpList::OpChain::OpChain(std::unique_ptr<GrOp> op,
- GrProcessorSet::Analysis processorAnalysis,
- GrAppliedClip* appliedClip, const DstProxy* dstProxy)
+GrOpsTask::OpChain::OpChain(std::unique_ptr<GrOp> op,
+ GrProcessorSet::Analysis processorAnalysis,
+ GrAppliedClip* appliedClip, const DstProxy* dstProxy)
: fList{std::move(op)}
, fProcessorAnalysis(processorAnalysis)
, fAppliedClip(appliedClip) {
@@ -132,7 +131,7 @@
fBounds = fList.head()->bounds();
}
-void GrRenderTargetOpList::OpChain::visitProxies(const GrOp::VisitProxyFunc& func) const {
+void GrOpsTask::OpChain::visitProxies(const GrOp::VisitProxyFunc& func) const {
if (fList.empty()) {
return;
}
@@ -147,7 +146,7 @@
}
}
-void GrRenderTargetOpList::OpChain::deleteOps(GrOpMemoryPool* pool) {
+void GrOpsTask::OpChain::deleteOps(GrOpMemoryPool* pool) {
while (!fList.empty()) {
pool->release(fList.popHead());
}
@@ -155,7 +154,7 @@
// Concatenates two op chains and attempts to merge ops across the chains. Assumes that we know that
// the two chains are chainable. Returns the new chain.
-GrRenderTargetOpList::OpChain::List GrRenderTargetOpList::OpChain::DoConcat(
+GrOpsTask::OpChain::List GrOpsTask::OpChain::DoConcat(
List chainA, List chainB, const GrCaps& caps, GrOpMemoryPool* pool,
GrAuditTrail* auditTrail) {
// We process ops in chain b from head to tail. We attempt to merge with nodes in a, starting
@@ -231,7 +230,7 @@
// Attempts to concatenate the given chain onto our own and merge ops across the chains. Returns
// whether the operation succeeded. On success, the provided list will be returned empty.
-bool GrRenderTargetOpList::OpChain::tryConcat(
+bool GrOpsTask::OpChain::tryConcat(
List* list, GrProcessorSet::Analysis processorAnalysis, const DstProxy& dstProxy,
const GrAppliedClip* appliedClip, const SkRect& bounds, const GrCaps& caps,
GrOpMemoryPool* pool, GrAuditTrail* auditTrail) {
@@ -289,8 +288,8 @@
return true;
}
-bool GrRenderTargetOpList::OpChain::prependChain(OpChain* that, const GrCaps& caps,
- GrOpMemoryPool* pool, GrAuditTrail* auditTrail) {
+bool GrOpsTask::OpChain::prependChain(OpChain* that, const GrCaps& caps, GrOpMemoryPool* pool,
+ GrAuditTrail* auditTrail) {
if (!that->tryConcat(
&fList, fProcessorAnalysis, fDstProxy, fAppliedClip, fBounds, caps, pool, auditTrail)) {
this->validate();
@@ -313,7 +312,7 @@
return true;
}
-std::unique_ptr<GrOp> GrRenderTargetOpList::OpChain::appendOp(
+std::unique_ptr<GrOp> GrOpsTask::OpChain::appendOp(
std::unique_ptr<GrOp> op, GrProcessorSet::Analysis processorAnalysis,
const DstProxy* dstProxy, const GrAppliedClip* appliedClip, const GrCaps& caps,
GrOpMemoryPool* pool, GrAuditTrail* auditTrail) {
@@ -336,7 +335,7 @@
return nullptr;
}
-inline void GrRenderTargetOpList::OpChain::validate() const {
+inline void GrOpsTask::OpChain::validate() const {
#ifdef SK_DEBUG
fList.validate();
for (const auto& op : GrOp::ChainRange<>(fList.head())) {
@@ -349,71 +348,46 @@
////////////////////////////////////////////////////////////////////////////////
-GrRenderTargetOpList::GrRenderTargetOpList(sk_sp<GrOpMemoryPool> opMemoryPool,
- sk_sp<GrRenderTargetProxy> proxy,
- GrAuditTrail* auditTrail)
- : INHERITED(std::move(opMemoryPool), std::move(proxy), auditTrail)
+GrOpsTask::GrOpsTask(sk_sp<GrOpMemoryPool> opMemoryPool,
+ sk_sp<GrRenderTargetProxy> rtProxy,
+ GrAuditTrail* auditTrail)
+ : GrRenderTask(std::move(rtProxy))
+ , fOpMemoryPool(std::move(opMemoryPool))
+ , fAuditTrail(auditTrail)
, fLastClipStackGenID(SK_InvalidUniqueID)
SkDEBUGCODE(, fNumClips(0)) {
+ SkASSERT(fOpMemoryPool);
fTarget->setLastRenderTask(this);
}
-void GrRenderTargetOpList::deleteOps() {
+void GrOpsTask::deleteOps() {
for (auto& chain : fOpChains) {
chain.deleteOps(fOpMemoryPool.get());
}
fOpChains.reset();
}
-GrRenderTargetOpList::~GrRenderTargetOpList() {
+GrOpsTask::~GrOpsTask() {
this->deleteOps();
}
////////////////////////////////////////////////////////////////////////////////
-#ifdef SK_DEBUG
-static const char* load_op_to_name(GrLoadOp op) {
- return GrLoadOp::kLoad == op ? "load" : GrLoadOp::kClear == op ? "clear" : "discard";
-}
+void GrOpsTask::endFlush() {
+ fLastClipStackGenID = SK_InvalidUniqueID;
+ this->deleteOps();
+ fClipAllocator.reset();
-void GrRenderTargetOpList::dump(bool printDependencies) const {
- INHERITED::dump(printDependencies);
-
- SkDebugf("ColorLoadOp: %s %x StencilLoadOp: %s\n",
- load_op_to_name(fColorLoadOp),
- GrLoadOp::kClear == fColorLoadOp ? fLoadClearColor.toBytes_RGBA() : 0x0,
- load_op_to_name(fStencilLoadOp));
-
- SkDebugf("ops (%d):\n", fOpChains.count());
- for (int i = 0; i < fOpChains.count(); ++i) {
- SkDebugf("*******************************\n");
- if (!fOpChains[i].head()) {
- SkDebugf("%d: <combined forward or failed instantiation>\n", i);
- } else {
- SkDebugf("%d: %s\n", i, fOpChains[i].head()->name());
- SkRect bounds = fOpChains[i].bounds();
- SkDebugf("ClippedBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n", bounds.fLeft,
- bounds.fTop, bounds.fRight, bounds.fBottom);
- for (const auto& op : GrOp::ChainRange<>(fOpChains[i].head())) {
- SkString info = SkTabString(op.dumpInfo(), 1);
- SkDebugf("%s\n", info.c_str());
- bounds = op.bounds();
- SkDebugf("\tClippedBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n", bounds.fLeft,
- bounds.fTop, bounds.fRight, bounds.fBottom);
- }
- }
+ if (fTarget && this == fTarget->getLastRenderTask()) {
+ fTarget->setLastRenderTask(nullptr);
}
+
+ fTarget.reset();
+ fDeferredProxies.reset();
+ fAuditTrail = nullptr;
}
-void GrRenderTargetOpList::visitProxies_debugOnly(const GrOp::VisitProxyFunc& func) const {
- for (const OpChain& chain : fOpChains) {
- chain.visitProxies(func);
- }
-}
-
-#endif
-
-void GrRenderTargetOpList::onPrepare(GrOpFlushState* flushState) {
+void GrOpsTask::onPrepare(GrOpFlushState* flushState) {
SkASSERT(fTarget->peekRenderTarget());
SkASSERT(this->isClosed());
#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
@@ -455,7 +429,7 @@
// TODO:
// We would like to (at this level) only ever clear & discard. We would need
- // to stop splitting up higher level opLists for copyOps to achieve that.
+ // to stop splitting up higher level OpsTasks for copyOps to achieve that.
// Note: we would still need SB loads and stores but they would happen at a
// lower level (inside the VK command buffer).
const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo stencilLoadAndStoreInfo {
@@ -469,7 +443,7 @@
// TODO: this is where GrOp::renderTarget is used (which is fine since it
// is at flush time). However, we need to store the RenderTargetProxy in the
// Ops and instantiate them here.
-bool GrRenderTargetOpList::onExecute(GrOpFlushState* flushState) {
+bool GrOpsTask::onExecute(GrOpFlushState* flushState) {
if (this->isNoOp()) {
return false;
}
@@ -525,31 +499,15 @@
return true;
}
-void GrRenderTargetOpList::endFlush() {
- fLastClipStackGenID = SK_InvalidUniqueID;
- this->deleteOps();
- fClipAllocator.reset();
- INHERITED::endFlush();
-}
-
-void GrRenderTargetOpList::discard() {
- // Discard calls to in-progress opLists are ignored. Calls at the start update the
- // opLists' color & stencil load ops.
- if (this->isEmpty()) {
- fColorLoadOp = GrLoadOp::kDiscard;
- fStencilLoadOp = GrLoadOp::kDiscard;
- }
-}
-
-void GrRenderTargetOpList::setColorLoadOp(GrLoadOp op, const SkPMColor4f& color) {
+void GrOpsTask::setColorLoadOp(GrLoadOp op, const SkPMColor4f& color) {
fColorLoadOp = op;
fLoadClearColor = color;
}
-bool GrRenderTargetOpList::resetForFullscreenClear(CanDiscardPreviousOps canDiscardPreviousOps) {
+bool GrOpsTask::resetForFullscreenClear(CanDiscardPreviousOps canDiscardPreviousOps) {
// Mark the color load op as discard (this may be followed by a clearColorOnLoad call to make
// the load op kClear, or it may be followed by an explicit op). In the event of an absClear()
- // after a regular clear(), we could end up with a clear load op and a real clear op in the list
+ // after a regular clear(), we could end up with a clear load op and a real clear op in the task
// if the load op were not reset here.
fColorLoadOp = GrLoadOp::kDiscard;
@@ -563,19 +521,87 @@
this->deleteOps();
fDeferredProxies.reset();
- // If the opList is using a render target which wraps a vulkan command buffer, we can't do a
- // clear load since we cannot change the render pass that we are using. Thus we fall back to
- // making a clear op in this case.
+ // If the opsTask is using a render target which wraps a vulkan command buffer, we can't do
+ // a clear load since we cannot change the render pass that we are using. Thus we fall back
+ // to making a clear op in this case.
return !fTarget->asRenderTargetProxy()->wrapsVkSecondaryCB();
}
- // Could not empty the list, so an op must be added to handle the clear
+ // Could not empty the task, so an op must be added to handle the clear
return false;
}
+void GrOpsTask::discard() {
+ // Discard calls to in-progress opsTasks are ignored. Calls at the start update the
+ // opsTasks' color & stencil load ops.
+ if (this->isEmpty()) {
+ fColorLoadOp = GrLoadOp::kDiscard;
+ fStencilLoadOp = GrLoadOp::kDiscard;
+ }
+}
+
////////////////////////////////////////////////////////////////////////////////
-void GrRenderTargetOpList::handleInternalAllocationFailure() {
+#ifdef SK_DEBUG
+static const char* load_op_to_name(GrLoadOp op) {
+ return GrLoadOp::kLoad == op ? "load" : GrLoadOp::kClear == op ? "clear" : "discard";
+}
+
+void GrOpsTask::dump(bool printDependencies) const {
+ GrRenderTask::dump(printDependencies);
+
+ SkDebugf("ColorLoadOp: %s %x StencilLoadOp: %s\n",
+ load_op_to_name(fColorLoadOp),
+ GrLoadOp::kClear == fColorLoadOp ? fLoadClearColor.toBytes_RGBA() : 0x0,
+ load_op_to_name(fStencilLoadOp));
+
+ SkDebugf("ops (%d):\n", fOpChains.count());
+ for (int i = 0; i < fOpChains.count(); ++i) {
+ SkDebugf("*******************************\n");
+ if (!fOpChains[i].head()) {
+ SkDebugf("%d: <combined forward or failed instantiation>\n", i);
+ } else {
+ SkDebugf("%d: %s\n", i, fOpChains[i].head()->name());
+ SkRect bounds = fOpChains[i].bounds();
+ SkDebugf("ClippedBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n", bounds.fLeft,
+ bounds.fTop, bounds.fRight, bounds.fBottom);
+ for (const auto& op : GrOp::ChainRange<>(fOpChains[i].head())) {
+ SkString info = SkTabString(op.dumpInfo(), 1);
+ SkDebugf("%s\n", info.c_str());
+ bounds = op.bounds();
+ SkDebugf("\tClippedBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n", bounds.fLeft,
+ bounds.fTop, bounds.fRight, bounds.fBottom);
+ }
+ }
+ }
+}
+
+void GrOpsTask::visitProxies_debugOnly(const GrOp::VisitProxyFunc& func) const {
+ for (const OpChain& chain : fOpChains) {
+ chain.visitProxies(func);
+ }
+}
+
+#endif
+
+////////////////////////////////////////////////////////////////////////////////
+
+bool GrOpsTask::onIsUsed(GrSurfaceProxy* proxyToCheck) const {
+ bool used = false;
+
+ auto visit = [ proxyToCheck, &used ] (GrSurfaceProxy* p, GrMipMapped) {
+ if (p == proxyToCheck) {
+ used = true;
+ }
+ };
+ for (const OpChain& recordedOp : fOpChains) {
+ recordedOp.visitProxies(visit);
+ }
+
+ return used;
+}
+
+void GrOpsTask::handleInternalAllocationFailure() {
bool hasUninstantiatedProxy = false;
auto checkInstantiation = [&hasUninstantiatedProxy](GrSurfaceProxy* p, GrMipMapped) {
if (!p->isInstantiated()) {
@@ -592,23 +618,7 @@
}
}
-bool GrRenderTargetOpList::onIsUsed(GrSurfaceProxy* proxyToCheck) const {
- bool used = false;
-
- auto visit = [ proxyToCheck, &used ] (GrSurfaceProxy* p, GrMipMapped) {
- if (p == proxyToCheck) {
- used = true;
- }
- };
- for (const OpChain& recordedOp : fOpChains) {
- recordedOp.visitProxies(visit);
- }
-
- return used;
-}
-
-void GrRenderTargetOpList::gatherProxyIntervals(GrResourceAllocator* alloc) const {
-
+void GrOpsTask::gatherProxyIntervals(GrResourceAllocator* alloc) const {
for (int i = 0; i < fDeferredProxies.count(); ++i) {
SkASSERT(!fDeferredProxies[i]->isInstantiated());
// We give all the deferred proxies a write usage at the very start of flushing. This
@@ -619,7 +629,7 @@
alloc->addInterval(fDeferredProxies[i], 0, 0, GrResourceAllocator::ActualUse::kNo);
}
- // Add the interval for all the writes to this opList's target
+ // Add the interval for all the writes to this GrOpsTasks's target
if (fOpChains.count()) {
unsigned int cur = alloc->curOp();
@@ -647,14 +657,14 @@
}
}
-void GrRenderTargetOpList::recordOp(
+void GrOpsTask::recordOp(
std::unique_ptr<GrOp> op, GrProcessorSet::Analysis processorAnalysis, GrAppliedClip* clip,
const DstProxy* dstProxy, const GrCaps& caps) {
SkDEBUGCODE(op->validate();)
SkASSERT(processorAnalysis.requiresDstTexture() == (dstProxy && dstProxy->proxy()));
SkASSERT(fTarget);
- // A closed GrOpList should never receive new/more ops
+ // A closed GrOpsTask should never receive new/more ops
SkASSERT(!this->isClosed());
if (!op->bounds().isFinite()) {
fOpMemoryPool->release(std::move(op));
@@ -666,7 +676,7 @@
// 2) intersect with something
// 3) find a 'blocker'
GR_AUDIT_TRAIL_ADD_OP(fAuditTrail, op.get(), fTarget->uniqueID());
- GrOP_INFO("opList: %d Recording (%s, opID: %u)\n"
+ GrOP_INFO("opsTask: %d Recording (%s, opID: %u)\n"
"\tBounds [L: %.2f, T: %.2f R: %.2f B: %.2f]\n",
this->uniqueID(),
op->name(),
@@ -706,9 +716,9 @@
fOpChains.emplace_back(std::move(op), processorAnalysis, clip, dstProxy);
}
-void GrRenderTargetOpList::forwardCombine(const GrCaps& caps) {
+void GrOpsTask::forwardCombine(const GrCaps& caps) {
SkASSERT(!this->isClosed());
- GrOP_INFO("opList: %d ForwardCombine %d ops:\n", this->uniqueID(), fOpChains.count());
+ GrOP_INFO("opsTask: %d ForwardCombine %d ops:\n", this->uniqueID(), fOpChains.count());
for (int i = 0; i < fOpChains.count() - 1; ++i) {
OpChain& chain = fOpChains[i];
diff --git a/src/gpu/GrRenderTargetOpList.h b/src/gpu/GrOpsTask.h
similarity index 86%
rename from src/gpu/GrRenderTargetOpList.h
rename to src/gpu/GrOpsTask.h
index fc675e3..a89b22c 100644
--- a/src/gpu/GrRenderTargetOpList.h
+++ b/src/gpu/GrOpsTask.h
@@ -1,41 +1,47 @@
/*
- * Copyright 2010 Google Inc.
+ * Copyright 2019 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
-#ifndef GrRenderTargetOpList_DEFINED
-#define GrRenderTargetOpList_DEFINED
+#ifndef GrOpsTask_DEFINED
+#define GrOpsTask_DEFINED
#include "include/core/SkMatrix.h"
+#include "include/core/SkRefCnt.h"
#include "include/core/SkStrokeRec.h"
#include "include/core/SkTypes.h"
+#include "include/private/SkColorData.h"
#include "include/private/SkTArray.h"
+#include "include/private/SkTDArray.h"
#include "src/core/SkArenaAlloc.h"
#include "src/core/SkClipStack.h"
#include "src/core/SkStringUtils.h"
#include "src/core/SkTLazy.h"
#include "src/gpu/GrAppliedClip.h"
-#include "src/gpu/GrOpList.h"
#include "src/gpu/GrPathRendering.h"
#include "src/gpu/GrPrimitiveProcessor.h"
+#include "src/gpu/GrRenderTask.h"
#include "src/gpu/ops/GrDrawOp.h"
#include "src/gpu/ops/GrOp.h"
class GrAuditTrail;
-class GrClearOp;
class GrCaps;
+class GrClearOp;
+class GrGpuBuffer;
+class GrOpMemoryPool;
class GrRenderTargetProxy;
-class GrRenderTargetOpList final : public GrOpList {
+class GrOpsTask : public GrRenderTask {
private:
using DstProxy = GrXferProcessor::DstProxy;
public:
- GrRenderTargetOpList(sk_sp<GrOpMemoryPool>, sk_sp<GrRenderTargetProxy>, GrAuditTrail*);
+ GrOpsTask(sk_sp<GrOpMemoryPool>, sk_sp<GrRenderTargetProxy>, GrAuditTrail*);
+ ~GrOpsTask() override;
- ~GrRenderTargetOpList() override;
+ GrOpsTask* asOpsTask() override { return this; }
bool isEmpty() const { return fOpChains.empty(); }
@@ -89,23 +95,13 @@
void discard();
- GrRenderTargetOpList* asRenderTargetOpList() override { return this; }
-
SkDEBUGCODE(void dump(bool printDependencies) const override;)
SkDEBUGCODE(int numClips() const override { return fNumClips; })
SkDEBUGCODE(void visitProxies_debugOnly(const GrOp::VisitProxyFunc&) const;)
private:
- friend class GrRenderTargetContextPriv; // for stencil clip state. TODO: this is invasive
-
- // The RTC and RTOpList have to work together to handle buffer clears. In most cases, buffer
- // clearing can be done natively, in which case the op list's load ops are sufficient. In other
- // cases, draw ops must be used, which makes the RTC the best place for those decisions. This,
- // however, requires that the RTC be able to coordinate with the op list to achieve similar ends
- friend class GrRenderTargetContext;
-
bool isNoOp() const {
- // TODO: GrLoadOp::kDiscard -> [empty opList] -> GrStoreOp::kStore should also be a no-op.
+ // TODO: GrLoadOp::kDiscard -> [empty OpsTask] -> GrStoreOp::kStore should also be a no-op.
// We don't count it as a no-op right now because of Vulkan. There are real cases where we
// store a discard, and if we skip that render pass, then the next time we load the render
// target, Vulkan detects loading of uninitialized memory and complains. If we don't skip
@@ -118,7 +114,7 @@
GrLoadOp::kDiscard != fColorLoadOp;
}
- bool onIsUsed(GrSurfaceProxy*) const override;
+ void deleteOps();
// Must only be called if native stencil buffer clearing is enabled
void setStencilLoadOp(GrLoadOp op) { fStencilLoadOp = op; }
@@ -140,8 +136,6 @@
// Returns true if the clear can be converted into a load op (barring device caps).
bool resetForFullscreenClear(CanDiscardPreviousOps);
- void deleteOps();
-
class OpChain {
public:
OpChain(const OpChain&) = delete;
@@ -213,6 +207,9 @@
SkRect fBounds;
};
+
+ bool onIsUsed(GrSurfaceProxy*) const override;
+
void handleInternalAllocationFailure() override;
void gatherProxyIntervals(GrResourceAllocator*) const override;
@@ -227,26 +224,39 @@
return (this->isNoOp()) ? ExpectedOutcome::kTargetUnchanged : ExpectedOutcome::kTargetDirty;
}
- GrLoadOp fColorLoadOp = GrLoadOp::kLoad;
- SkPMColor4f fLoadClearColor = SK_PMColor4fTRANSPARENT;
- GrLoadOp fStencilLoadOp = GrLoadOp::kLoad;
+ friend class GrRenderTargetContextPriv; // for stencil clip state. TODO: this is invasive
- uint32_t fLastClipStackGenID;
- SkIRect fLastDevClipBounds;
- int fLastClipNumAnalyticFPs;
+ // The RTC and OpsTask have to work together to handle buffer clears. In most cases, buffer
+ // clearing can be done natively, in which case the op list's load ops are sufficient. In other
+ // cases, draw ops must be used, which makes the RTC the best place for those decisions. This,
+ // however, requires that the RTC be able to coordinate with the op list to achieve similar ends
+ friend class GrRenderTargetContext;
+
+ // This is a backpointer to the GrOpMemoryPool that holds the memory for this GrOpsTask's ops.
+ // In the DDL case, these back pointers keep the DDL's GrOpMemoryPool alive as long as its
+ // constituent GrOpsTask survives.
+ sk_sp<GrOpMemoryPool> fOpMemoryPool;
+ GrAuditTrail* fAuditTrail;
+
+ GrLoadOp fColorLoadOp = GrLoadOp::kLoad;
+ SkPMColor4f fLoadClearColor = SK_PMColor4fTRANSPARENT;
+ GrLoadOp fStencilLoadOp = GrLoadOp::kLoad;
+
+ uint32_t fLastClipStackGenID;
+ SkIRect fLastDevClipBounds;
+ int fLastClipNumAnalyticFPs;
// We must track if we have a wait op so that we don't delete the op when we have a full clear.
bool fHasWaitOp = false;;
- // For ops/opList we have mean: 5 stdDev: 28
- SkSTArray<25, OpChain, true> fOpChains;
+ // For ops/opsTask we have mean: 5 stdDev: 28
+ SkSTArray<25, OpChain, true> fOpChains;
// MDB TODO: 4096 for the first allocation of the clip space will be huge overkill.
// Gather statistics to determine the correct size.
- SkArenaAlloc fClipAllocator{4096};
- SkDEBUGCODE(int fNumClips;)
+ SkArenaAlloc fClipAllocator{4096};
+ SkDEBUGCODE(int fNumClips;)
- typedef GrOpList INHERITED;
};
#endif
diff --git a/src/gpu/GrPathRenderer.h b/src/gpu/GrPathRenderer.h
index 26f7c7f..a1d0741 100644
--- a/src/gpu/GrPathRenderer.h
+++ b/src/gpu/GrPathRenderer.h
@@ -28,7 +28,7 @@
class SkPath;
/**
- * Base class for drawing paths into a GrOpList.
+ * Base class for drawing paths into a GrOpsTask.
*/
class GrPathRenderer : public SkRefCnt {
public:
diff --git a/src/gpu/GrPipeline.cpp b/src/gpu/GrPipeline.cpp
index 1de2199..3e5e18d 100644
--- a/src/gpu/GrPipeline.cpp
+++ b/src/gpu/GrPipeline.cpp
@@ -11,7 +11,6 @@
#include "src/gpu/GrCaps.h"
#include "src/gpu/GrGpu.h"
#include "src/gpu/GrRenderTargetContext.h"
-#include "src/gpu/GrRenderTargetOpList.h"
#include "src/gpu/GrXferProcessor.h"
#include "src/gpu/ops/GrOp.h"
diff --git a/src/gpu/GrRecordingContext.cpp b/src/gpu/GrRecordingContext.cpp
index 58544ed..9043f6a 100644
--- a/src/gpu/GrRecordingContext.cpp
+++ b/src/gpu/GrRecordingContext.cpp
@@ -66,7 +66,7 @@
return true;
}
-void GrRecordingContext::setupDrawingManager(bool sortOpLists, bool reduceOpListSplitting) {
+void GrRecordingContext::setupDrawingManager(bool sortOpsTasks, bool reduceOpsTaskSplitting) {
GrPathRendererChain::Options prcOptions;
prcOptions.fAllowPathMaskCaching = this->options().fAllowPathMaskCaching;
#if GR_TEST_UTILS
@@ -83,7 +83,7 @@
if (!this->proxyProvider()->renderingDirectly()) {
// DDL TODO: remove this crippling of the path renderer chain
// Disable the small path renderer bc of the proxies in the atlas. They need to be
- // unified when the opLists are added back to the destination drawing manager.
+ // unified when the opsTasks are added back to the destination drawing manager.
prcOptions.fGpuPathRenderers &= ~GpuPathRenderers::kSmall;
}
@@ -100,8 +100,8 @@
fDrawingManager.reset(new GrDrawingManager(this,
prcOptions,
textContextOptions,
- sortOpLists,
- reduceOpListSplitting));
+ sortOpsTasks,
+ reduceOpsTaskSplitting));
}
void GrRecordingContext::abandonContext() {
diff --git a/src/gpu/GrReducedClip.cpp b/src/gpu/GrReducedClip.cpp
index e485992..9224e00 100644
--- a/src/gpu/GrReducedClip.cpp
+++ b/src/gpu/GrReducedClip.cpp
@@ -656,7 +656,7 @@
if (fCCPRClipPaths.count() < fMaxCCPRClipPaths && GrAA::kYes == aa) {
// Set aside CCPR paths for later. We will create their clip FPs once we know the ID of the
- // opList they will operate in.
+ // opsTask they will operate in.
SkPath& ccprClipPath = fCCPRClipPaths.push_back(deviceSpacePath);
if (Invert::kYes == invert) {
ccprClipPath.toggleInverseFillType();
@@ -972,7 +972,7 @@
}
std::unique_ptr<GrFragmentProcessor> GrReducedClip::finishAndDetachAnalyticFPs(
- GrCoverageCountingPathRenderer* ccpr, uint32_t opListID) {
+ GrCoverageCountingPathRenderer* ccpr, uint32_t opsTaskID) {
// Make sure finishAndDetachAnalyticFPs hasn't been called already.
SkDEBUGCODE(for (const auto& fp : fAnalyticFPs) { SkASSERT(fp); })
@@ -981,7 +981,7 @@
for (const SkPath& ccprClipPath : fCCPRClipPaths) {
SkASSERT(ccpr);
SkASSERT(fHasScissor);
- auto fp = ccpr->makeClipProcessor(opListID, ccprClipPath, fScissor, *fCaps);
+ auto fp = ccpr->makeClipProcessor(opsTaskID, ccprClipPath, fScissor, *fCaps);
fAnalyticFPs.push_back(std::move(fp));
}
fCCPRClipPaths.reset();
diff --git a/src/gpu/GrReducedClip.h b/src/gpu/GrReducedClip.h
index 25b05df..ca95bd0 100644
--- a/src/gpu/GrReducedClip.h
+++ b/src/gpu/GrReducedClip.h
@@ -88,16 +88,16 @@
int numAnalyticFPs() const { return fAnalyticFPs.count() + fCCPRClipPaths.count(); }
/**
- * Called once the client knows the ID of the opList that the clip FPs will operate in. This
- * method finishes any outstanding work that was waiting for the opList ID, then detaches and
+ * Called once the client knows the ID of the opsTask that the clip FPs will operate in. This
+ * method finishes any outstanding work that was waiting for the opsTask ID, then detaches and
* returns this class's list of FPs that complete the clip.
*
* NOTE: this must be called AFTER producing the clip mask (if any) because draw calls on
* the render target context, surface allocations, and even switching render targets (pre MDB)
- * may cause flushes or otherwise change which opList the actual draw is going into.
+ * may cause flushes or otherwise change which opsTask the actual draw is going into.
*/
std::unique_ptr<GrFragmentProcessor> finishAndDetachAnalyticFPs(
- GrCoverageCountingPathRenderer*, uint32_t opListID);
+ GrCoverageCountingPathRenderer*, uint32_t opsTaskID);
private:
void walkStack(const SkClipStack&, const SkRect& queryBounds);
@@ -145,7 +145,7 @@
uint32_t fMaskGenID;
bool fMaskRequiresAA;
SkSTArray<4, std::unique_ptr<GrFragmentProcessor>> fAnalyticFPs;
- SkSTArray<4, SkPath> fCCPRClipPaths; // Will convert to FPs once we have an opList ID for CCPR.
+ SkSTArray<4, SkPath> fCCPRClipPaths; // Will convert to FPs once we have an opsTask ID for CCPR.
};
#endif
diff --git a/src/gpu/GrRenderTarget.cpp b/src/gpu/GrRenderTarget.cpp
index 2942427..9f915e5 100644
--- a/src/gpu/GrRenderTarget.cpp
+++ b/src/gpu/GrRenderTarget.cpp
@@ -13,7 +13,6 @@
#include "src/gpu/GrContextPriv.h"
#include "src/gpu/GrGpu.h"
#include "src/gpu/GrRenderTargetContext.h"
-#include "src/gpu/GrRenderTargetOpList.h"
#include "src/gpu/GrRenderTargetPriv.h"
#include "src/gpu/GrSamplePatternDictionary.h"
#include "src/gpu/GrStencilAttachment.h"
diff --git a/src/gpu/GrRenderTarget.h b/src/gpu/GrRenderTarget.h
index 7a6f44c..4e59422 100644
--- a/src/gpu/GrRenderTarget.h
+++ b/src/gpu/GrRenderTarget.h
@@ -12,7 +12,6 @@
#include "include/gpu/GrSurface.h"
class GrCaps;
-class GrRenderTargetOpList;
class GrRenderTargetPriv;
class GrStencilAttachment;
class GrBackendRenderTarget;
diff --git a/src/gpu/GrRenderTargetContext.cpp b/src/gpu/GrRenderTargetContext.cpp
index ff9cf20..862b79c 100644
--- a/src/gpu/GrRenderTargetContext.cpp
+++ b/src/gpu/GrRenderTargetContext.cpp
@@ -29,7 +29,6 @@
#include "src/gpu/GrFixedClip.h"
#include "src/gpu/GrGpuResourcePriv.h"
#include "src/gpu/GrMemoryPool.h"
-#include "src/gpu/GrOpList.h"
#include "src/gpu/GrPathRenderer.h"
#include "src/gpu/GrRecordingContextPriv.h"
#include "src/gpu/GrRenderTarget.h"
@@ -136,21 +135,21 @@
GrDrawingManager* fDrawingManager;
};
-// In MDB mode the reffing of the 'getLastOpList' call's result allows in-progress
-// GrOpLists to be picked up and added to by renderTargetContexts lower in the call
-// stack. When this occurs with a closed GrOpList, a new one will be allocated
-// when the renderTargetContext attempts to use it (via getOpList).
+// In MDB mode the reffing of the 'getLastOpsTask' call's result allows in-progress
+// GrOpsTask to be picked up and added to by renderTargetContexts lower in the call
+// stack. When this occurs with a closed GrOpsTask, a new one will be allocated
+// when the renderTargetContext attempts to use it (via getOpsTask).
GrRenderTargetContext::GrRenderTargetContext(GrRecordingContext* context,
sk_sp<GrRenderTargetProxy> rtp,
GrColorType colorType,
sk_sp<SkColorSpace> colorSpace,
const SkSurfaceProps* surfaceProps,
- bool managedOpList)
+ bool managedOpsTask)
: GrSurfaceContext(context, colorType, kPremul_SkAlphaType, std::move(colorSpace))
, fRenderTargetProxy(std::move(rtp))
- , fOpList(sk_ref_sp(fRenderTargetProxy->getLastRenderTargetOpList()))
+ , fOpsTask(sk_ref_sp(fRenderTargetProxy->getLastOpsTask()))
, fSurfaceProps(SkSurfacePropsCopyOrDefault(surfaceProps))
- , fManagedOpList(managedOpList) {
+ , fManagedOpsTask(managedOpsTask) {
fTextTarget.reset(new TextTarget(this));
SkDEBUGCODE(this->validate();)
}
@@ -160,8 +159,8 @@
SkASSERT(fRenderTargetProxy);
fRenderTargetProxy->validate(fContext);
- if (fOpList && !fOpList->isClosed()) {
- SkASSERT(fRenderTargetProxy->getLastRenderTask() == fOpList.get());
+ if (fOpsTask && !fOpsTask->isClosed()) {
+ SkASSERT(fRenderTargetProxy->getLastRenderTask() == fOpsTask.get());
}
}
#endif
@@ -201,19 +200,15 @@
return GrMipMapped::kNo;
}
-GrRenderTargetOpList* GrRenderTargetContext::getRTOpList() {
+GrOpsTask* GrRenderTargetContext::getOpsTask() {
ASSERT_SINGLE_OWNER
SkDEBUGCODE(this->validate();)
- if (!fOpList || fOpList->isClosed()) {
- fOpList = this->drawingManager()->newRTOpList(fRenderTargetProxy, fManagedOpList);
+ if (!fOpsTask || fOpsTask->isClosed()) {
+ fOpsTask = this->drawingManager()->newOpsTask(fRenderTargetProxy, fManagedOpsTask);
}
- return fOpList.get();
-}
-
-GrOpList* GrRenderTargetContext::getOpList() {
- return this->getRTOpList();
+ return fOpsTask.get();
}
void GrRenderTargetContext::drawGlyphRunList(
@@ -244,7 +239,7 @@
AutoCheckFlush acf(this->drawingManager());
- this->getRTOpList()->discard();
+ this->getOpsTask()->discard();
}
void GrRenderTargetContext::clear(const SkIRect* rect,
@@ -300,16 +295,16 @@
}
if (isFull) {
- GrRenderTargetOpList* opList = this->getRTOpList();
- if (opList->resetForFullscreenClear(this->canDiscardPreviousOpsOnFullClear()) &&
+ GrOpsTask* opsTask = this->getOpsTask();
+ if (opsTask->resetForFullscreenClear(this->canDiscardPreviousOpsOnFullClear()) &&
!this->caps()->performColorClearsAsDraws()) {
// The op list was emptied and native clears are allowed, so just use the load op
- opList->setColorLoadOp(GrLoadOp::kClear, color);
+ opsTask->setColorLoadOp(GrLoadOp::kClear, color);
return;
} else {
// Will use an op for the clear, reset the load op to discard since the op will
// blow away the color buffer contents
- opList->setColorLoadOp(GrLoadOp::kDiscard);
+ opsTask->setColorLoadOp(GrLoadOp::kDiscard);
}
// Must add an op to the list (either because we couldn't use a load op, or because the
@@ -369,7 +364,7 @@
}
}
- // TODO: in a post-MDB world this should be handled at the OpList level.
+ // TODO: in a post-MDB world this should be handled at the OpsTask level.
// This makes sure to always add an op to the list, instead of marking the clear as a load op.
// This code follows very similar logic to internalClear() below, but critical differences are
// highlighted in line related to absClear()'s unique behavior.
@@ -394,9 +389,9 @@
}
} else {
// Reset the oplist like in internalClear(), but do not rely on a load op for the clear
- fRenderTargetContext->getRTOpList()->resetForFullscreenClear(
+ fRenderTargetContext->getOpsTask()->resetForFullscreenClear(
fRenderTargetContext->canDiscardPreviousOpsOnFullClear());
- fRenderTargetContext->getRTOpList()->setColorLoadOp(GrLoadOp::kDiscard);
+ fRenderTargetContext->getOpsTask()->setColorLoadOp(GrLoadOp::kDiscard);
if (fRenderTargetContext->caps()->performColorClearsAsDraws()) {
// This draws a quad covering the worst case dimensions instead of just the logical
@@ -789,11 +784,11 @@
*fRenderTargetContext->caps());
}
-GrRenderTargetOpList::CanDiscardPreviousOps GrRenderTargetContext::canDiscardPreviousOpsOnFullClear(
+GrOpsTask::CanDiscardPreviousOps GrRenderTargetContext::canDiscardPreviousOpsOnFullClear(
) const {
#if GR_TEST_UTILS
if (fPreserveOpsOnFullClear_TestingOnly) {
- return GrRenderTargetOpList::CanDiscardPreviousOps::kNo;
+ return GrOpsTask::CanDiscardPreviousOps::kNo;
}
#endif
// Regardless of how the clear is implemented (native clear or a fullscreen quad), all prior ops
@@ -802,7 +797,7 @@
// Although the clear will ignore the stencil buffer, following draw ops may not so we can't get
// rid of all the preceding ops. Beware! If we ever add any ops that have a side effect beyond
// modifying the stencil buffer we will need a more elaborate tracking system (skbug.com/7002).
- return GrRenderTargetOpList::CanDiscardPreviousOps(!fNumStencilSamples);
+ return GrOpsTask::CanDiscardPreviousOps(!fNumStencilSamples);
}
void GrRenderTargetContext::setNeedsStencil(bool multisampled) {
@@ -835,7 +830,7 @@
// code note when the instantiated stencil buffer is already clear and skip the clear
// altogether. And on tilers, loading the stencil buffer cleared is even faster than
// preserving the previous contents.
- this->getRTOpList()->setStencilLoadOp(GrLoadOp::kClear);
+ this->getOpsTask()->setStencilLoadOp(GrLoadOp::kClear);
}
}
}
@@ -2007,7 +2002,7 @@
kAdopt_GrWrapOwnership);
std::unique_ptr<GrOp> waitOp(GrSemaphoreOp::MakeWait(fContext, std::move(sema),
fRenderTargetProxy.get()));
- this->getRTOpList()->addWaitOp(
+ this->getOpsTask()->addWaitOp(
std::move(waitOp), GrTextureResolveManager(this->drawingManager()), *this->caps());
}
return true;
@@ -2284,7 +2279,7 @@
}
void GrRenderTargetContext::addOp(std::unique_ptr<GrOp> op) {
- this->getRTOpList()->addOp(
+ this->getOpsTask()->addOp(
std::move(op), GrTextureResolveManager(this->drawingManager()), *this->caps());
}
@@ -2340,12 +2335,12 @@
}
op->setClippedBounds(bounds);
- auto opList = this->getRTOpList();
+ auto opsTask = this->getOpsTask();
if (willAddFn) {
- willAddFn(op.get(), opList->uniqueID());
+ willAddFn(op.get(), opsTask->uniqueID());
}
- opList->addDrawOp(std::move(op), analysis, std::move(appliedClip), dstProxy,
- GrTextureResolveManager(this->drawingManager()), *this->caps());
+ opsTask->addDrawOp(std::move(op), analysis, std::move(appliedClip), dstProxy,
+ GrTextureResolveManager(this->drawingManager()), *this->caps());
}
bool GrRenderTargetContext::setupDstProxy(const GrClip& clip, const GrOp& op,
diff --git a/src/gpu/GrRenderTargetContext.h b/src/gpu/GrRenderTargetContext.h
index aa3281d..10094ec 100644
--- a/src/gpu/GrRenderTargetContext.h
+++ b/src/gpu/GrRenderTargetContext.h
@@ -14,8 +14,8 @@
#include "include/core/SkSurface.h"
#include "include/core/SkSurfaceProps.h"
#include "include/private/GrTypesPriv.h"
+#include "src/gpu/GrOpsTask.h"
#include "src/gpu/GrPaint.h"
-#include "src/gpu/GrRenderTargetOpList.h"
#include "src/gpu/GrRenderTargetProxy.h"
#include "src/gpu/GrSurfaceContext.h"
#include "src/gpu/GrXferProcessor.h"
@@ -502,12 +502,12 @@
#if GR_TEST_UTILS
bool testingOnly_IsInstantiated() const { return fRenderTargetProxy->isInstantiated(); }
void testingOnly_SetPreserveOpsOnFullClear() { fPreserveOpsOnFullClear_TestingOnly = true; }
- GrRenderTargetOpList* testingOnly_PeekLastOpList() { return fOpList.get(); }
+ GrOpsTask* testingOnly_PeekLastOpsTask() { return fOpsTask.get(); }
#endif
protected:
GrRenderTargetContext(GrRecordingContext*, sk_sp<GrRenderTargetProxy>, GrColorType,
- sk_sp<SkColorSpace>, const SkSurfaceProps*, bool managedOpList = true);
+ sk_sp<SkColorSpace>, const SkSurfaceProps*, bool managedOpsTask = true);
SkDEBUGCODE(void validate() const override;)
@@ -518,7 +518,7 @@
GrAAType chooseAAType(GrAA);
friend class GrAtlasTextBlob; // for access to add[Mesh]DrawOp
- friend class GrClipStackClip; // for access to getOpList
+ friend class GrClipStackClip; // for access to getOpsTask
friend class GrDrawingManager; // for ctor
friend class GrRenderTargetContextPriv;
@@ -541,7 +541,7 @@
std::unique_ptr<GrFragmentProcessor>,
sk_sp<GrTextureProxy>);
- GrRenderTargetOpList::CanDiscardPreviousOps canDiscardPreviousOpsOnFullClear() const;
+ GrOpsTask::CanDiscardPreviousOps canDiscardPreviousOpsOnFullClear() const;
void setNeedsStencil(bool multisampled);
void internalClear(const GrFixedClip&, const SkPMColor4f&, CanClearFullscreen);
@@ -603,7 +603,7 @@
void addOp(std::unique_ptr<GrOp>);
// Allows caller of addDrawOp to know which op list an op will be added to.
- using WillAddOpFn = void(GrOp*, uint32_t opListID);
+ using WillAddOpFn = void(GrOp*, uint32_t opsTaskID);
// These perform processing specific to GrDrawOp-derived ops before recording them into an
// op list. Before adding the op to an op list the WillAddOpFn is called. Note that it
// will not be called in the event that the op is discarded. Moreover, the op may merge into
@@ -621,18 +621,17 @@
void asyncReadPixels(const SkIRect& rect, SkColorType colorType, ReadPixelsCallback callback,
ReadPixelsContext context);
- GrRenderTargetOpList* getRTOpList();
- GrOpList* getOpList();
+ GrOpsTask* getOpsTask();
std::unique_ptr<GrTextTarget> fTextTarget;
sk_sp<GrRenderTargetProxy> fRenderTargetProxy;
- // In MDB-mode the GrOpList can be closed by some other renderTargetContext that has picked
- // it up. For this reason, the GrOpList should only ever be accessed via 'getOpList'.
- sk_sp<GrRenderTargetOpList> fOpList;
+ // In MDB-mode the GrOpsTask can be closed by some other renderTargetContext that has picked
+ // it up. For this reason, the GrOpsTask should only ever be accessed via 'getOpsTask'.
+ sk_sp<GrOpsTask> fOpsTask;
SkSurfaceProps fSurfaceProps;
- bool fManagedOpList;
+ bool fManagedOpsTask;
int fNumStencilSamples = 0;
#if GR_TEST_UTILS
diff --git a/src/gpu/GrRenderTargetContextPriv.h b/src/gpu/GrRenderTargetContextPriv.h
index f816ea7..1440ec2 100644
--- a/src/gpu/GrRenderTargetContextPriv.h
+++ b/src/gpu/GrRenderTargetContextPriv.h
@@ -8,9 +8,9 @@
#ifndef GrRenderTargetContextPriv_DEFINED
#define GrRenderTargetContextPriv_DEFINED
+#include "src/gpu/GrOpsTask.h"
#include "src/gpu/GrPathRendering.h"
#include "src/gpu/GrRenderTargetContext.h"
-#include "src/gpu/GrRenderTargetOpList.h"
class GrFixedClip;
class GrHardClip;
@@ -27,20 +27,20 @@
// TODO: remove after clipping overhaul.
void setLastClip(uint32_t clipStackGenID, const SkIRect& devClipBounds,
int numClipAnalyticFPs) {
- GrRenderTargetOpList* opList = fRenderTargetContext->getRTOpList();
- opList->fLastClipStackGenID = clipStackGenID;
- opList->fLastDevClipBounds = devClipBounds;
- opList->fLastClipNumAnalyticFPs = numClipAnalyticFPs;
+ GrOpsTask* opsTask = fRenderTargetContext->getOpsTask();
+ opsTask->fLastClipStackGenID = clipStackGenID;
+ opsTask->fLastDevClipBounds = devClipBounds;
+ opsTask->fLastClipNumAnalyticFPs = numClipAnalyticFPs;
}
// called to determine if we have to render the clip into SB.
// TODO: remove after clipping overhaul.
bool mustRenderClip(uint32_t clipStackGenID, const SkIRect& devClipBounds,
int numClipAnalyticFPs) const {
- GrRenderTargetOpList* opList = fRenderTargetContext->getRTOpList();
- return opList->fLastClipStackGenID != clipStackGenID ||
- !opList->fLastDevClipBounds.contains(devClipBounds) ||
- opList->fLastClipNumAnalyticFPs != numClipAnalyticFPs;
+ GrOpsTask* opsTask = fRenderTargetContext->getOpsTask();
+ return opsTask->fLastClipStackGenID != clipStackGenID ||
+ !opsTask->fLastDevClipBounds.contains(devClipBounds) ||
+ opsTask->fLastClipNumAnalyticFPs != numClipAnalyticFPs;
}
using CanClearFullscreen = GrRenderTargetContext::CanClearFullscreen;
@@ -104,7 +104,7 @@
return fRenderTargetContext->fRenderTargetProxy->uniqueID();
}
- uint32_t testingOnly_getOpListID();
+ uint32_t testingOnly_getOpsTaskID();
using WillAddOpFn = GrRenderTargetContext::WillAddOpFn;
void testingOnly_addDrawOp(std::unique_ptr<GrDrawOp>);
diff --git a/src/gpu/GrRenderTargetProxy.cpp b/src/gpu/GrRenderTargetProxy.cpp
index 6d831fa..8faa0a4 100644
--- a/src/gpu/GrRenderTargetProxy.cpp
+++ b/src/gpu/GrRenderTargetProxy.cpp
@@ -12,7 +12,7 @@
#include "src/gpu/GrCaps.h"
#include "src/gpu/GrContextPriv.h"
#include "src/gpu/GrGpuResourcePriv.h"
-#include "src/gpu/GrRenderTargetOpList.h"
+#include "src/gpu/GrOpsTask.h"
#include "src/gpu/GrRenderTargetPriv.h"
#include "src/gpu/GrResourceProvider.h"
#include "src/gpu/GrSurfacePriv.h"
diff --git a/src/gpu/GrRenderTask.cpp b/src/gpu/GrRenderTask.cpp
index 0d54208..639f4cf 100644
--- a/src/gpu/GrRenderTask.cpp
+++ b/src/gpu/GrRenderTask.cpp
@@ -119,7 +119,7 @@
// Does this proxy have mipmaps that need to be regenerated?
if (GrMipMapped::kYes == mipMapped && textureProxy->mipMapsAreDirty()) {
- // Create an opList that resolves the texture's mipmap data.
+ // Create a renderTask that resolves the texture's mipmap data.
GrRenderTask* textureResolveTask = textureResolveManager.newTextureResolveRenderTask(
sk_ref_sp(textureProxy), GrTextureResolveFlags::kMipMaps, caps);
@@ -130,7 +130,7 @@
textureResolveTask->fDeferredProxies.back() == textureProxy);
// The GrTextureResolveRenderTask factory should have also marked the mipmaps clean, set the
- // last opList on the textureProxy to textureResolveTask, and closed textureResolveTask.
+ // last renderTask on the textureProxy to textureResolveTask, and closed textureResolveTask.
SkASSERT(!textureProxy->mipMapsAreDirty());
SkASSERT(textureProxy->getLastRenderTask() == textureResolveTask);
SkASSERT(textureResolveTask->isClosed());
diff --git a/src/gpu/GrRenderTask.h b/src/gpu/GrRenderTask.h
index 7b2c776..f13a0de 100644
--- a/src/gpu/GrRenderTask.h
+++ b/src/gpu/GrRenderTask.h
@@ -15,13 +15,12 @@
#include "src/gpu/GrTextureResolveManager.h"
class GrOpFlushState;
-class GrOpList;
-class GrRenderTargetOpList;
+class GrOpsTask;
class GrResourceAllocator;
// This class abstracts a task that targets a single GrSurfaceProxy, participates in the
// GrDrawingManager's DAG, and implements the onExecute method to modify its target proxy's
-// contents. (e.g., an opList that executes a command buffer, a task to regenerate mipmaps, etc.)
+// contents. (e.g., an opsTask that executes a command buffer, a task to regenerate mipmaps, etc.)
class GrRenderTask : public SkRefCnt {
public:
GrRenderTask(sk_sp<GrSurfaceProxy> target);
@@ -54,9 +53,9 @@
uint32_t uniqueID() const { return fUniqueID; }
/*
- * Safely cast this GrRenderTask to a GrRenderTargetOpList (if possible).
+ * Safely cast this GrRenderTask to a GrOpsTask (if possible).
*/
- virtual GrRenderTargetOpList* asRenderTargetOpList() { return nullptr; }
+ virtual GrOpsTask* asOpsTask() { return nullptr; }
/*
* Dump out the GrRenderTask dependency DAG
@@ -165,9 +164,9 @@
const uint32_t fUniqueID;
uint32_t fFlags;
- // 'this' GrOpList relies on the output of the GrOpLists in 'fDependencies'
+ // 'this' GrRenderTask relies on the output of the GrRenderTasks in 'fDependencies'
SkSTArray<1, GrRenderTask*, true> fDependencies;
- // 'this' GrOpList's output is relied on by the GrOpLists in 'fDependents'
+ // 'this' GrRenderTask's output is relied on by the GrRenderTasks in 'fDependents'
SkSTArray<1, GrRenderTask*, true> fDependents;
};
diff --git a/src/gpu/GrResourceAllocator.cpp b/src/gpu/GrResourceAllocator.cpp
index f73f5be..ecc9a42 100644
--- a/src/gpu/GrResourceAllocator.cpp
+++ b/src/gpu/GrResourceAllocator.cpp
@@ -9,7 +9,7 @@
#include "src/gpu/GrDeinstantiateProxyTracker.h"
#include "src/gpu/GrGpuResourcePriv.h"
-#include "src/gpu/GrOpList.h"
+#include "src/gpu/GrOpsTask.h"
#include "src/gpu/GrRenderTargetProxy.h"
#include "src/gpu/GrResourceCache.h"
#include "src/gpu/GrResourceProvider.h"
@@ -53,16 +53,17 @@
}
}
-void GrResourceAllocator::markEndOfOpList(int opListIndex) {
- SkASSERT(!fAssigned); // We shouldn't be adding any opLists after (or during) assignment
+void GrResourceAllocator::markEndOfOpsTask(int opsTaskIndex) {
+ SkASSERT(!fAssigned); // We shouldn't be adding any opsTasks after (or during) assignment
- SkASSERT(fEndOfOpListOpIndices.count() == opListIndex);
- if (!fEndOfOpListOpIndices.empty()) {
- SkASSERT(fEndOfOpListOpIndices.back() < this->curOp());
+ SkASSERT(fEndOfOpsTaskOpIndices.count() == opsTaskIndex);
+ if (!fEndOfOpsTaskOpIndices.empty()) {
+ SkASSERT(fEndOfOpsTaskOpIndices.back() < this->curOp());
}
- fEndOfOpListOpIndices.push_back(this->curOp()); // This is the first op index of the next opList
- SkASSERT(fEndOfOpListOpIndices.count() <= fNumOpLists);
+ // This is the first op index of the next opsTask
+ fEndOfOpsTaskOpIndices.push_back(this->curOp());
+ SkASSERT(fEndOfOpsTaskOpIndices.count() <= fNumOpsTasks);
}
GrResourceAllocator::~GrResourceAllocator() {
@@ -113,7 +114,7 @@
if (0 == start && 0 == end) {
// This interval is for the initial upload to a deferred proxy. Due to the vagaries
// of how deferred proxies are collected they can appear as uploads multiple times
- // in a single opLists' list and as uploads in several opLists.
+ // in a single opsTasks' list and as uploads in several opsTasks.
SkASSERT(0 == intvl->start());
} else if (isDirectDstRead) {
// Direct reads from the render target itself should occur w/in the existing
@@ -352,30 +353,30 @@
}
}
-bool GrResourceAllocator::onOpListBoundary() const {
+bool GrResourceAllocator::onOpsTaskBoundary() const {
if (fIntvlList.empty()) {
- SkASSERT(fCurOpListIndex+1 <= fNumOpLists);
- // Although technically on an opList boundary there is no need to force an
+ SkASSERT(fCurOpsTaskIndex+1 <= fNumOpsTasks);
+ // Although technically on an opsTask boundary there is no need to force an
// intermediate flush here
return false;
}
const Interval* tmp = fIntvlList.peekHead();
- return fEndOfOpListOpIndices[fCurOpListIndex] <= tmp->start();
+ return fEndOfOpsTaskOpIndices[fCurOpsTaskIndex] <= tmp->start();
}
void GrResourceAllocator::forceIntermediateFlush(int* stopIndex) {
- *stopIndex = fCurOpListIndex+1;
+ *stopIndex = fCurOpsTaskIndex+1;
// This is interrupting the allocation of resources for this flush. We need to
// proactively clear the active interval list of any intervals that aren't
// guaranteed to survive the partial flush lest they become zombies (i.e.,
// holding a deleted surface proxy).
const Interval* tmp = fIntvlList.peekHead();
- SkASSERT(fEndOfOpListOpIndices[fCurOpListIndex] <= tmp->start());
+ SkASSERT(fEndOfOpsTaskOpIndices[fCurOpsTaskIndex] <= tmp->start());
- fCurOpListIndex++;
- SkASSERT(fCurOpListIndex < fNumOpLists);
+ fCurOpsTaskIndex++;
+ SkASSERT(fCurOpsTaskIndex < fNumOpsTasks);
this->expire(tmp->start());
}
@@ -385,28 +386,28 @@
*outError = fLazyInstantiationError ? AssignError::kFailedProxyInstantiation
: AssignError::kNoError;
- SkASSERT(fNumOpLists == fEndOfOpListOpIndices.count());
+ SkASSERT(fNumOpsTasks == fEndOfOpsTaskOpIndices.count());
fIntvlHash.reset(); // we don't need the interval hash anymore
- if (fCurOpListIndex >= fEndOfOpListOpIndices.count()) {
+ if (fCurOpsTaskIndex >= fEndOfOpsTaskOpIndices.count()) {
return false; // nothing to render
}
- *startIndex = fCurOpListIndex;
- *stopIndex = fEndOfOpListOpIndices.count();
+ *startIndex = fCurOpsTaskIndex;
+ *stopIndex = fEndOfOpsTaskOpIndices.count();
if (fIntvlList.empty()) {
- fCurOpListIndex = fEndOfOpListOpIndices.count();
+ fCurOpsTaskIndex = fEndOfOpsTaskOpIndices.count();
return true; // no resources to assign
}
#if GR_ALLOCATION_SPEW
- SkDebugf("assigning opLists %d through %d out of %d numOpLists\n",
- *startIndex, *stopIndex, fNumOpLists);
- SkDebugf("EndOfOpListIndices: ");
- for (int i = 0; i < fEndOfOpListOpIndices.count(); ++i) {
- SkDebugf("%d ", fEndOfOpListOpIndices[i]);
+ SkDebugf("assigning opsTasks %d through %d out of %d numOpsTasks\n",
+ *startIndex, *stopIndex, fNumOpsTasks);
+ SkDebugf("EndOfOpsTaskIndices: ");
+ for (int i = 0; i < fEndOfOpsTaskOpIndices.count(); ++i) {
+ SkDebugf("%d ", fEndOfOpsTaskOpIndices[i]);
}
SkDebugf("\n");
#endif
@@ -417,9 +418,9 @@
this->dumpIntervals();
#endif
while (Interval* cur = fIntvlList.popHead()) {
- if (fEndOfOpListOpIndices[fCurOpListIndex] <= cur->start()) {
- fCurOpListIndex++;
- SkASSERT(fCurOpListIndex < fNumOpLists);
+ if (fEndOfOpsTaskOpIndices[fCurOpsTaskIndex] <= cur->start()) {
+ fCurOpsTaskIndex++;
+ SkASSERT(fCurOpsTaskIndex < fNumOpsTasks);
}
this->expire(cur->start());
@@ -437,8 +438,8 @@
fActiveIntvls.insertByIncreasingEnd(cur);
if (fResourceProvider->overBudget()) {
- // Only force intermediate draws on opList boundaries
- if (this->onOpListBoundary()) {
+ // Only force intermediate draws on opsTask boundaries
+ if (this->onOpsTaskBoundary()) {
this->forceIntermediateFlush(stopIndex);
return true;
}
@@ -484,8 +485,8 @@
fActiveIntvls.insertByIncreasingEnd(cur);
if (fResourceProvider->overBudget()) {
- // Only force intermediate draws on opList boundaries
- if (this->onOpListBoundary()) {
+ // Only force intermediate draws on opsTask boundaries
+ if (this->onOpsTaskBoundary()) {
this->forceIntermediateFlush(stopIndex);
return true;
}
diff --git a/src/gpu/GrResourceAllocator.h b/src/gpu/GrResourceAllocator.h
index b448c2d..0dc7b46 100644
--- a/src/gpu/GrResourceAllocator.h
+++ b/src/gpu/GrResourceAllocator.h
@@ -38,24 +38,24 @@
* adds the new interval to the active list (that is sorted by increasing end index)
*
* Note: the op indices (used in the usage intervals) come from the order of the ops in
- * their opLists after the opList DAG has been linearized.
+ * their opsTasks after the opsTask DAG has been linearized.
*
*************************************************************************************************
* How does instantiation failure handling work when explicitly allocating?
*
* In the gather usage intervals pass all the GrSurfaceProxies used in the flush should be
- * gathered (i.e., in GrOpList::gatherProxyIntervals).
+ * gathered (i.e., in GrOpsTask::gatherProxyIntervals).
*
* The allocator will churn through this list but could fail anywhere.
*
* Allocation failure handling occurs at two levels:
*
- * 1) If the GrSurface backing an opList fails to allocate then the entire opList is dropped.
+ * 1) If the GrSurface backing an opsTask fails to allocate then the entire opsTask is dropped.
*
* 2) If an individual GrSurfaceProxy fails to allocate then any ops that use it are dropped
- * (via GrOpList::purgeOpsWithUninstantiatedProxies)
+ * (via GrOpsTask::purgeOpsWithUninstantiatedProxies)
*
- * The pass to determine which ops to drop is a bit laborious so we only check the opLists and
+ * The pass to determine which ops to drop is a bit laborious so we only check the opsTasks and
* individual ops when something goes wrong in allocation (i.e., when the return code from
* GrResourceAllocator::assign is bad)
*
@@ -70,10 +70,10 @@
public:
GrResourceAllocator(GrResourceProvider* resourceProvider,
GrDeinstantiateProxyTracker* tracker
- SkDEBUGCODE(, int numOpLists))
+ SkDEBUGCODE(, int numOpsTasks))
: fResourceProvider(resourceProvider)
, fDeinstantiateTracker(tracker)
- SkDEBUGCODE(, fNumOpLists(numOpLists)) {
+ SkDEBUGCODE(, fNumOpsTasks(numOpsTasks)) {
}
~GrResourceAllocator();
@@ -82,7 +82,7 @@
void incOps() { fNumOps++; }
/** Indicates whether a given call to addInterval represents an actual usage of the
- * provided proxy. This is mainly here to accomodate deferred proxies attached to opLists.
+ * provided proxy. This is mainly here to accomodate deferred proxies attached to opsTasks.
* In that case we need to create an extra long interval for them (due to the upload) but
* don't want to count that usage/reference towards the proxy's recyclability.
*/
@@ -101,16 +101,16 @@
kFailedProxyInstantiation
};
- // Returns true when the opLists from 'startIndex' to 'stopIndex' should be executed;
+ // Returns true when the opsTasks from 'startIndex' to 'stopIndex' should be executed;
// false when nothing remains to be executed.
// If any proxy fails to instantiate, the AssignError will be set to kFailedProxyInstantiation.
// If this happens, the caller should remove all ops which reference an uninstantiated proxy.
- // This is used to execute a portion of the queued opLists in order to reduce the total
+ // This is used to execute a portion of the queued opsTasks in order to reduce the total
// amount of GPU resources required.
bool assign(int* startIndex, int* stopIndex, AssignError* outError);
void determineRecyclability();
- void markEndOfOpList(int opListIndex);
+ void markEndOfOpsTask(int opsTaskIndex);
#if GR_ALLOCATION_SPEW
void dumpIntervals();
@@ -122,7 +122,7 @@
// Remove dead intervals from the active list
void expire(unsigned int curIndex);
- bool onOpListBoundary() const;
+ bool onOpsTaskBoundary() const;
void forceIntermediateFlush(int* stopIndex);
// These two methods wrap the interactions with the free pool
@@ -269,9 +269,9 @@
IntervalList fActiveIntvls; // List of live intervals during assignment
// (sorted by increasing end)
unsigned int fNumOps = 0;
- SkTArray<unsigned int> fEndOfOpListOpIndices;
- int fCurOpListIndex = 0;
- SkDEBUGCODE(const int fNumOpLists = -1;)
+ SkTArray<unsigned int> fEndOfOpsTaskOpIndices;
+ int fCurOpsTaskIndex = 0;
+ SkDEBUGCODE(const int fNumOpsTasks = -1;)
SkDEBUGCODE(bool fAssigned = false;)
diff --git a/src/gpu/GrResourceProvider.h b/src/gpu/GrResourceProvider.h
index 4adb2fa..d3264d3 100644
--- a/src/gpu/GrResourceProvider.h
+++ b/src/gpu/GrResourceProvider.h
@@ -45,7 +45,7 @@
kNone = 0x0,
/** If the caller intends to do direct reads/writes to/from the CPU then this flag must be
- * set when accessing resources during a GrOpList flush. This includes the execution of
+ * set when accessing resources during a GrOpsTask flush. This includes the execution of
* GrOp objects. The reason is that these memory operations are done immediately and
* will occur out of order WRT the operations being flushed.
* Make this automatic: https://bug.skia.org/4156
diff --git a/src/gpu/GrSoftwarePathRenderer.cpp b/src/gpu/GrSoftwarePathRenderer.cpp
index a3d3f49..c562436 100644
--- a/src/gpu/GrSoftwarePathRenderer.cpp
+++ b/src/gpu/GrSoftwarePathRenderer.cpp
@@ -16,7 +16,6 @@
#include "src/gpu/GrDeferredProxyUploader.h"
#include "src/gpu/GrGpuResourcePriv.h"
#include "src/gpu/GrOpFlushState.h"
-#include "src/gpu/GrOpList.h"
#include "src/gpu/GrProxyProvider.h"
#include "src/gpu/GrRecordingContextPriv.h"
#include "src/gpu/GrRenderTargetContextPriv.h"
diff --git a/src/gpu/GrSurface.cpp b/src/gpu/GrSurface.cpp
index db5a707..df451a8 100644
--- a/src/gpu/GrSurface.cpp
+++ b/src/gpu/GrSurface.cpp
@@ -8,7 +8,6 @@
#include "include/gpu/GrContext.h"
#include "include/gpu/GrSurface.h"
#include "include/gpu/GrTexture.h"
-#include "src/gpu/GrOpList.h"
#include "src/gpu/GrRenderTarget.h"
#include "src/gpu/GrResourceProvider.h"
#include "src/gpu/GrSurfacePriv.h"
diff --git a/src/gpu/GrSurfaceContext.cpp b/src/gpu/GrSurfaceContext.cpp
index dff02b0..3a4726b 100644
--- a/src/gpu/GrSurfaceContext.cpp
+++ b/src/gpu/GrSurfaceContext.cpp
@@ -15,7 +15,6 @@
#include "src/gpu/GrDataUtils.h"
#include "src/gpu/GrDrawingManager.h"
#include "src/gpu/GrGpu.h"
-#include "src/gpu/GrOpList.h"
#include "src/gpu/GrRecordingContextPriv.h"
#include "src/gpu/GrRenderTargetContext.h"
#include "src/gpu/GrSurfaceContextPriv.h"
@@ -28,10 +27,10 @@
SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(this->singleOwner());)
#define RETURN_FALSE_IF_ABANDONED if (this->fContext->priv().abandoned()) { return false; }
-// In MDB mode the reffing of the 'getLastOpList' call's result allows in-progress
-// GrOpLists to be picked up and added to by renderTargetContexts lower in the call
-// stack. When this occurs with a closed GrOpList, a new one will be allocated
-// when the renderTargetContext attempts to use it (via getOpList).
+// In MDB mode the reffing of the 'getLastOpsTask' call's result allows in-progress
+// GrOpsTasks to be picked up and added to by renderTargetContexts lower in the call
+// stack. When this occurs with a closed GrOpsTask, a new one will be allocated
+// when the renderTargetContext attempts to use it (via getOpsTask).
GrSurfaceContext::GrSurfaceContext(GrRecordingContext* context,
GrColorType colorType,
SkAlphaType alphaType,
diff --git a/src/gpu/GrSurfaceContext.h b/src/gpu/GrSurfaceContext.h
index 6bbeb72..fccc52b 100644
--- a/src/gpu/GrSurfaceContext.h
+++ b/src/gpu/GrSurfaceContext.h
@@ -19,7 +19,6 @@
class GrAuditTrail;
class GrDrawingManager;
-class GrOpList;
class GrRecordingContext;
class GrRenderTargetContext;
class GrRenderTargetProxy;
diff --git a/src/gpu/GrSurfaceProxy.cpp b/src/gpu/GrSurfaceProxy.cpp
index f9ec3c0..484087b 100644
--- a/src/gpu/GrSurfaceProxy.cpp
+++ b/src/gpu/GrSurfaceProxy.cpp
@@ -16,7 +16,7 @@
#include "src/gpu/GrClip.h"
#include "src/gpu/GrContextPriv.h"
#include "src/gpu/GrGpuResourcePriv.h"
-#include "src/gpu/GrOpList.h"
+#include "src/gpu/GrOpsTask.h"
#include "src/gpu/GrProxyProvider.h"
#include "src/gpu/GrRecordingContextPriv.h"
#include "src/gpu/GrStencilAttachment.h"
@@ -107,7 +107,7 @@
}
GrSurfaceProxy::~GrSurfaceProxy() {
- // For this to be deleted the opList that held a ref on it (if there was one) must have been
+ // For this to be deleted the opsTask that held a ref on it (if there was one) must have been
// deleted. Which would have cleared out this back pointer.
SkASSERT(!fLastRenderTask);
}
@@ -302,8 +302,8 @@
fLastRenderTask = renderTask;
}
-GrRenderTargetOpList* GrSurfaceProxy::getLastRenderTargetOpList() {
- return fLastRenderTask ? fLastRenderTask->asRenderTargetOpList() : nullptr;
+GrOpsTask* GrSurfaceProxy::getLastOpsTask() {
+ return fLastRenderTask ? fLastRenderTask->asOpsTask() : nullptr;
}
int GrSurfaceProxy::worstCaseWidth() const {
diff --git a/src/gpu/GrSurfaceProxy.h b/src/gpu/GrSurfaceProxy.h
index 4c78019..5b0297d 100644
--- a/src/gpu/GrSurfaceProxy.h
+++ b/src/gpu/GrSurfaceProxy.h
@@ -18,8 +18,8 @@
class GrCaps;
class GrContext_Base;
+class GrOpsTask;
class GrRecordingContext;
-class GrRenderTargetOpList;
class GrRenderTargetProxy;
class GrRenderTask;
class GrResourceProvider;
@@ -281,7 +281,7 @@
void setLastRenderTask(GrRenderTask*);
GrRenderTask* getLastRenderTask() { return fLastRenderTask; }
- GrRenderTargetOpList* getLastRenderTargetOpList();
+ GrOpsTask* getLastOpsTask();
/**
* Retrieves the amount of GPU memory that will be or currently is used by this resource
@@ -439,12 +439,12 @@
// the instantiation method.
mutable size_t fGpuMemorySize;
- // The last opList that wrote to or is currently going to write to this surface
- // The opList can be closed (e.g., no surface context is currently bound
+ // The last GrRenderTask that wrote to or is currently going to write to this surface
+ // The GrRenderTask can be closed (e.g., no surface context is currently bound
// to this proxy).
// This back-pointer is required so that we can add a dependancy between
- // the opList used to create the current contents of this surface
- // and the opList of a destination surface to which this one is being drawn or copied.
+ // the GrRenderTask used to create the current contents of this surface
+ // and the GrRenderTask of a destination surface to which this one is being drawn or copied.
// This pointer is unreffed. GrRenderTasks own a ref on their surface proxies.
GrRenderTask* fLastRenderTask;
diff --git a/src/gpu/GrTextureProxy.h b/src/gpu/GrTextureProxy.h
index 986975b..5870510 100644
--- a/src/gpu/GrTextureProxy.h
+++ b/src/gpu/GrTextureProxy.h
@@ -145,7 +145,7 @@
// This tracks the mipmap status at the proxy level and is thus somewhat distinct from the
// backing GrTexture's mipmap status. In particular, this status is used to determine when
- // mipmap levels need to be explicitly regenerated during the execution of a DAG of opLists.
+ // mipmap levels need to be explicitly regenerated during the execution of a DAG of opsTasks.
GrMipMapsStatus fMipMapsStatus;
// TEMPORARY: We are in the process of moving GrMipMapsStatus from the texture to the proxy.
// We track the fInitialMipMapsStatus here so we can assert that the proxy did indeed expect
diff --git a/src/gpu/GrTextureResolveRenderTask.cpp b/src/gpu/GrTextureResolveRenderTask.cpp
index 613f6bd..1e52b87 100644
--- a/src/gpu/GrTextureResolveRenderTask.cpp
+++ b/src/gpu/GrTextureResolveRenderTask.cpp
@@ -27,7 +27,7 @@
textureProxyPtr, GrMipMapped::kNo, GrTextureResolveManager(nullptr), caps);
textureProxyPtr->setLastRenderTask(resolveTask.get());
- // We only resolve the texture; nobody should try to do anything else with this opList.
+ // We only resolve the texture; nobody should try to do anything else with this opsTask.
resolveTask->makeClosed(caps);
if (GrTextureResolveFlags::kMipMaps & flags) {
@@ -41,7 +41,7 @@
void GrTextureResolveRenderTask::gatherProxyIntervals(GrResourceAllocator* alloc) const {
// This renderTask doesn't have "normal" ops. In this case we still need to add an interval (so
- // fEndOfOpListOpIndices will remain in sync), so we create a fake op# to capture the fact that
+ // fEndOfOpsTaskOpIndices will remain in sync), so we create a fake op# to capture the fact that
// we manipulate fTarget.
alloc->addInterval(fTarget.get(), alloc->curOp(), alloc->curOp(),
GrResourceAllocator::ActualUse::kYes);
diff --git a/src/gpu/GrTransferFromRenderTask.cpp b/src/gpu/GrTransferFromRenderTask.cpp
index 3e6dfc5..7826bce 100644
--- a/src/gpu/GrTransferFromRenderTask.cpp
+++ b/src/gpu/GrTransferFromRenderTask.cpp
@@ -13,7 +13,7 @@
void GrTransferFromRenderTask::gatherProxyIntervals(GrResourceAllocator* alloc) const {
// This renderTask doesn't have "normal" ops. In this case we still need to add an interval (so
- // fEndOfOpListOpIndices will remain in sync), so we create a fake op# to capture the fact that
+ // fEndOfOpsTaskOpIndices will remain in sync), so we create a fake op# to capture the fact that
// we read fSrcProxy.
alloc->addInterval(fSrcProxy.get(), alloc->curOp(), alloc->curOp(),
GrResourceAllocator::ActualUse::kYes);
diff --git a/src/gpu/GrUserStencilSettings.h b/src/gpu/GrUserStencilSettings.h
index b89a9ec..83a19b7 100644
--- a/src/gpu/GrUserStencilSettings.h
+++ b/src/gpu/GrUserStencilSettings.h
@@ -13,12 +13,12 @@
/**
* Gr uses the stencil buffer to implement complex clipping inside the
- * GrOpList class. The GrOpList makes a subset of the stencil buffer
+ * GrOpsTask class. The GrOpsTask makes a subset of the stencil buffer
* bits available for other uses by external code (user bits). Client code can
- * modify these bits. GrOpList will ignore ref, mask, and writemask bits
+ * modify these bits. GrOpsTask will ignore ref, mask, and writemask bits
* provided by clients that fall outside the user range.
*
- * When code outside the GrOpList class uses the stencil buffer the contract
+ * When code outside the GrOpsTask class uses the stencil buffer the contract
* is as follows:
*
* > Normal stencil funcs allow the client to pass / fail regardless of the
diff --git a/src/gpu/ccpr/GrCCClipPath.h b/src/gpu/ccpr/GrCCClipPath.h
index 1494d2c6..32e5ae4 100644
--- a/src/gpu/ccpr/GrCCClipPath.h
+++ b/src/gpu/ccpr/GrCCClipPath.h
@@ -19,7 +19,7 @@
/**
* These are keyed by SkPath generation ID, and store which device-space paths are accessed and
- * where by clip FPs in a given opList. A single GrCCClipPath can be referenced by multiple FPs. At
+ * where by clip FPs in a given opsTask. A single GrCCClipPath can be referenced by multiple FPs. At
* flush time their coverage count masks are packed into atlas(es) alongside normal DrawPathOps.
*/
class GrCCClipPath {
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.cpp b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
index 0a6433c..4ad6cd9 100644
--- a/src/gpu/ccpr/GrCCDrawPathsOp.cpp
+++ b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
@@ -111,7 +111,7 @@
// If the path is clipped, CCPR will only draw the visible portion. This helps improve batching,
// since it eliminates the need for scissor when drawing to the main canvas.
// FIXME: We should parse the path right here. It will provide a tighter bounding box for us to
- // give the opList, as well as enabling threaded parsing when using DDL.
+ // give the opsTask, as well as enabling threaded parsing when using DDL.
SkRect clippedDrawBounds;
if (!clippedDrawBounds.intersect(conservativeDevBounds, SkRect::Make(maskDevIBounds))) {
clippedDrawBounds.setEmpty();
@@ -122,9 +122,9 @@
}
GrCCDrawPathsOp::~GrCCDrawPathsOp() {
- if (fOwningPerOpListPaths) {
+ if (fOwningPerOpsTaskPaths) {
// Remove the list's dangling pointer to this Op before deleting it.
- fOwningPerOpListPaths->fDrawOps.remove(this);
+ fOwningPerOpsTaskPaths->fDrawOps.remove(this);
}
}
@@ -195,9 +195,10 @@
GrOp::CombineResult GrCCDrawPathsOp::onCombineIfPossible(GrOp* op, const GrCaps&) {
GrCCDrawPathsOp* that = op->cast<GrCCDrawPathsOp>();
- SkASSERT(fOwningPerOpListPaths);
+ SkASSERT(fOwningPerOpsTaskPaths);
SkASSERT(fNumDraws);
- SkASSERT(!that->fOwningPerOpListPaths || that->fOwningPerOpListPaths == fOwningPerOpListPaths);
+ SkASSERT(!that->fOwningPerOpsTaskPaths ||
+ that->fOwningPerOpsTaskPaths == fOwningPerOpsTaskPaths);
SkASSERT(that->fNumDraws);
if (fProcessors != that->fProcessors ||
@@ -205,18 +206,18 @@
return CombineResult::kCannotCombine;
}
- fDraws.append(std::move(that->fDraws), &fOwningPerOpListPaths->fAllocator);
+ fDraws.append(std::move(that->fDraws), &fOwningPerOpsTaskPaths->fAllocator);
SkDEBUGCODE(fNumDraws += that->fNumDraws);
SkDEBUGCODE(that->fNumDraws = 0);
return CombineResult::kMerged;
}
-void GrCCDrawPathsOp::addToOwningPerOpListPaths(sk_sp<GrCCPerOpListPaths> owningPerOpListPaths) {
+void GrCCDrawPathsOp::addToOwningPerOpsTaskPaths(sk_sp<GrCCPerOpsTaskPaths> owningPerOpsTaskPaths) {
SkASSERT(1 == fNumDraws);
- SkASSERT(!fOwningPerOpListPaths);
- fOwningPerOpListPaths = std::move(owningPerOpListPaths);
- fOwningPerOpListPaths->fDrawOps.addToTail(this);
+ SkASSERT(!fOwningPerOpsTaskPaths);
+ fOwningPerOpsTaskPaths = std::move(owningPerOpsTaskPaths);
+ fOwningPerOpsTaskPaths->fDrawOps.addToTail(this);
}
void GrCCDrawPathsOp::accountForOwnPaths(GrCCPathCache* pathCache,
@@ -414,9 +415,9 @@
}
void GrCCDrawPathsOp::onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) {
- SkASSERT(fOwningPerOpListPaths);
+ SkASSERT(fOwningPerOpsTaskPaths);
- const GrCCPerFlushResources* resources = fOwningPerOpListPaths->fFlushResources.get();
+ const GrCCPerFlushResources* resources = fOwningPerOpsTaskPaths->fFlushResources.get();
if (!resources) {
return; // Setup failed.
}
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.h b/src/gpu/ccpr/GrCCDrawPathsOp.h
index 1499c28..d0c4777 100644
--- a/src/gpu/ccpr/GrCCDrawPathsOp.h
+++ b/src/gpu/ccpr/GrCCDrawPathsOp.h
@@ -17,7 +17,7 @@
class GrCCAtlas;
class GrCCPerFlushResources;
struct GrCCPerFlushResourceSpecs;
-struct GrCCPerOpListPaths;
+struct GrCCPerOpsTaskPaths;
class GrOnFlushResourceProvider;
class GrRecordingContext;
@@ -46,7 +46,7 @@
}
void onPrepare(GrOpFlushState*) override {}
- void addToOwningPerOpListPaths(sk_sp<GrCCPerOpListPaths> owningPerOpListPaths);
+ void addToOwningPerOpsTaskPaths(sk_sp<GrCCPerOpsTaskPaths> owningPerOpsTaskPaths);
// Makes decisions about how to draw each path (cached, copied, rendered, etc.), and
// increments/fills out the corresponding GrCCPerFlushResourceSpecs.
@@ -125,9 +125,9 @@
friend class GrCCSTLList<SingleDraw>; // To access fNext.
};
- // Declare fOwningPerOpListPaths first, before fDraws. The draws use memory allocated by
- // fOwningPerOpListPaths, so it must not be unreffed until after fDraws is destroyed.
- sk_sp<GrCCPerOpListPaths> fOwningPerOpListPaths;
+ // Declare fOwningPerOpsTaskPaths first, before fDraws. The draws use memory allocated by
+ // fOwningPerOpsTaskPaths, so it must not be unreffed until after fDraws is destroyed.
+ sk_sp<GrCCPerOpsTaskPaths> fOwningPerOpsTaskPaths;
GrCCSTLList<SingleDraw> fDraws;
SkDEBUGCODE(int fNumDraws = 1);
diff --git a/src/gpu/ccpr/GrCCPerFlushResources.h b/src/gpu/ccpr/GrCCPerFlushResources.h
index 59122fe..f2504e6 100644
--- a/src/gpu/ccpr/GrCCPerFlushResources.h
+++ b/src/gpu/ccpr/GrCCPerFlushResources.h
@@ -62,8 +62,8 @@
/**
* This class wraps all the GPU resources that CCPR builds at flush time. It is allocated in CCPR's
- * preFlush() method, and referenced by all the GrCCPerOpListPaths objects that are being flushed.
- * It is deleted in postFlush() once all the flushing GrCCPerOpListPaths objects are deleted.
+ * preFlush() method, and referenced by all the GrCCPerOpsTaskPaths objects that are being flushed.
+ * It is deleted in postFlush() once all the flushing GrCCPerOpsTaskPaths objects are deleted.
*/
class GrCCPerFlushResources : public GrNonAtomicRef<GrCCPerFlushResources> {
public:
diff --git a/src/gpu/ccpr/GrCCPerOpListPaths.h b/src/gpu/ccpr/GrCCPerOpsTaskPaths.h
similarity index 78%
rename from src/gpu/ccpr/GrCCPerOpListPaths.h
rename to src/gpu/ccpr/GrCCPerOpsTaskPaths.h
index e0dd115..ff8a224 100644
--- a/src/gpu/ccpr/GrCCPerOpListPaths.h
+++ b/src/gpu/ccpr/GrCCPerOpsTaskPaths.h
@@ -5,8 +5,8 @@
* found in the LICENSE file.
*/
-#ifndef GrCCPerOpListPaths_DEFINED
-#define GrCCPerOpListPaths_DEFINED
+#ifndef GrCCPerOpsTaskPaths_DEFINED
+#define GrCCPerOpsTaskPaths_DEFINED
#include "include/core/SkRefCnt.h"
#include "src/core/SkArenaAlloc.h"
@@ -19,10 +19,10 @@
class GrCCPerFlushResources;
/**
- * Tracks all the CCPR paths in a given opList that will be drawn when it flushes.
+ * Tracks all the CCPR paths in a given opsTask that will be drawn when it flushes.
*/
// DDL TODO: given the usage pattern in DDL mode, this could probably be non-atomic refcounting.
-struct GrCCPerOpListPaths : public SkRefCnt {
+struct GrCCPerOpsTaskPaths : public SkRefCnt {
SkTInternalLList<GrCCDrawPathsOp> fDrawOps; // This class does not own these ops.
std::map<uint32_t, GrCCClipPath> fClipPaths;
SkSTArenaAlloc<10 * 1024> fAllocator{10 * 1024 * 2};
diff --git a/src/gpu/ccpr/GrCCSTLList.h b/src/gpu/ccpr/GrCCSTLList.h
index eb75863..29f26b9 100644
--- a/src/gpu/ccpr/GrCCSTLList.h
+++ b/src/gpu/ccpr/GrCCSTLList.h
@@ -14,8 +14,8 @@
/**
* A singly-linked list whose head element is a local class member. This is required by
- * GrCCDrawPathsOp because the owning opList is unknown at the time of creation, so we can't use its
- * associated allocator to create the first element.
+ * GrCCDrawPathsOp because the owning opsTask is unknown at the time of creation, so we can't use
+ * its associated allocator to create the first element.
*/
template<typename T> class GrCCSTLList : SkNoncopyable {
public:
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
index a9e080b..4798b6f 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
@@ -71,11 +71,11 @@
}
}
-GrCCPerOpListPaths* GrCoverageCountingPathRenderer::lookupPendingPaths(uint32_t opListID) {
- auto it = fPendingPaths.find(opListID);
+GrCCPerOpsTaskPaths* GrCoverageCountingPathRenderer::lookupPendingPaths(uint32_t opsTaskID) {
+ auto it = fPendingPaths.find(opsTaskID);
if (fPendingPaths.end() == it) {
- sk_sp<GrCCPerOpListPaths> paths = sk_make_sp<GrCCPerOpListPaths>();
- it = fPendingPaths.insert(std::make_pair(opListID, std::move(paths))).first;
+ sk_sp<GrCCPerOpsTaskPaths> paths = sk_make_sp<GrCCPerOpsTaskPaths>();
+ it = fPendingPaths.insert(std::make_pair(opsTaskID, std::move(paths))).first;
}
return it->second.get();
}
@@ -182,16 +182,17 @@
void GrCoverageCountingPathRenderer::recordOp(std::unique_ptr<GrCCDrawPathsOp> op,
const DrawPathArgs& args) {
if (op) {
- auto addToOwningPerOpListPaths = [this](GrOp* op, uint32_t opListID) {
- op->cast<GrCCDrawPathsOp>()->addToOwningPerOpListPaths(
- sk_ref_sp(this->lookupPendingPaths(opListID)));
+ auto addToOwningPerOpsTaskPaths = [this](GrOp* op, uint32_t opsTaskID) {
+ op->cast<GrCCDrawPathsOp>()->addToOwningPerOpsTaskPaths(
+ sk_ref_sp(this->lookupPendingPaths(opsTaskID)));
};
- args.fRenderTargetContext->addDrawOp(*args.fClip, std::move(op), addToOwningPerOpListPaths);
+ args.fRenderTargetContext->addDrawOp(*args.fClip, std::move(op),
+ addToOwningPerOpsTaskPaths);
}
}
std::unique_ptr<GrFragmentProcessor> GrCoverageCountingPathRenderer::makeClipProcessor(
- uint32_t opListID, const SkPath& deviceSpacePath, const SkIRect& accessRect,
+ uint32_t opsTaskID, const SkPath& deviceSpacePath, const SkIRect& accessRect,
const GrCaps& caps) {
SkASSERT(!fFlushing);
@@ -202,7 +203,7 @@
key = (key << 1) | (uint32_t)GrFillRuleForSkPath(deviceSpacePath);
}
GrCCClipPath& clipPath =
- this->lookupPendingPaths(opListID)->fClipPaths[key];
+ this->lookupPendingPaths(opsTaskID)->fClipPaths[key];
if (!clipPath.isInitialized()) {
// This ClipPath was just created during lookup. Initialize it.
const SkRect& pathDevBounds = deviceSpacePath.getBounds();
@@ -228,8 +229,8 @@
void GrCoverageCountingPathRenderer::preFlush(
GrOnFlushResourceProvider* onFlushRP,
- const uint32_t* opListIDs,
- int numOpListIDs,
+ const uint32_t* opsTaskIDs,
+ int numOpsTaskIDs,
SkTArray<std::unique_ptr<GrRenderTargetContext>>* out) {
using DoCopiesToA8Coverage = GrCCDrawPathsOp::DoCopiesToA8Coverage;
SkASSERT(!fFlushing);
@@ -251,13 +252,13 @@
specs.fRenderedAtlasSpecs.fMaxPreferredTextureSize = maxPreferredRTSize;
specs.fRenderedAtlasSpecs.fMinTextureSize = SkTMin(512, maxPreferredRTSize);
- // Move the per-opList paths that are about to be flushed from fPendingPaths to fFlushingPaths,
+ // Move the per-opsTask paths that are about to be flushed from fPendingPaths to fFlushingPaths,
// and count them up so we can preallocate buffers.
- fFlushingPaths.reserve(numOpListIDs);
- for (int i = 0; i < numOpListIDs; ++i) {
- auto iter = fPendingPaths.find(opListIDs[i]);
+ fFlushingPaths.reserve(numOpsTaskIDs);
+ for (int i = 0; i < numOpsTaskIDs; ++i) {
+ auto iter = fPendingPaths.find(opsTaskIDs[i]);
if (fPendingPaths.end() == iter) {
- continue; // No paths on this opList.
+ continue; // No paths on this opsTask.
}
fFlushingPaths.push_back(std::move(iter->second));
@@ -318,8 +319,8 @@
}
}
-void GrCoverageCountingPathRenderer::postFlush(GrDeferredUploadToken, const uint32_t* opListIDs,
- int numOpListIDs) {
+void GrCoverageCountingPathRenderer::postFlush(GrDeferredUploadToken, const uint32_t* opsTaskIDs,
+ int numOpsTaskIDs) {
SkASSERT(fFlushing);
if (!fFlushingPaths.empty()) {
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
index 0450918..4a7ca18 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
@@ -10,10 +10,10 @@
#include <map>
#include "src/gpu/GrOnFlushResourceProvider.h"
+#include "src/gpu/GrOpsTask.h"
#include "src/gpu/GrPathRenderer.h"
-#include "src/gpu/GrRenderTargetOpList.h"
#include "src/gpu/ccpr/GrCCPerFlushResources.h"
-#include "src/gpu/ccpr/GrCCPerOpListPaths.h"
+#include "src/gpu/ccpr/GrCCPerOpsTaskPaths.h"
class GrCCDrawPathsOp;
class GrCCPathCache;
@@ -41,16 +41,16 @@
CoverageType coverageType() const { return fCoverageType; }
- using PendingPathsMap = std::map<uint32_t, sk_sp<GrCCPerOpListPaths>>;
+ using PendingPathsMap = std::map<uint32_t, sk_sp<GrCCPerOpsTaskPaths>>;
- // In DDL mode, Ganesh needs to be able to move the pending GrCCPerOpListPaths to the DDL object
- // (detachPendingPaths) and then return them upon replay (mergePendingPaths).
+ // In DDL mode, Ganesh needs to be able to move the pending GrCCPerOpsTaskPaths to the DDL
+ // object (detachPendingPaths) and then return them upon replay (mergePendingPaths).
PendingPathsMap detachPendingPaths() { return std::move(fPendingPaths); }
void mergePendingPaths(const PendingPathsMap& paths) {
#ifdef SK_DEBUG
- // Ensure there are no duplicate opList IDs between the incoming path map and ours.
- // This should always be true since opList IDs are globally unique and these are coming
+ // Ensure there are no duplicate opsTask IDs between the incoming path map and ours.
+ // This should always be true since opsTask IDs are globally unique and these are coming
// from different DDL recordings.
for (const auto& it : paths) {
SkASSERT(!fPendingPaths.count(it.first));
@@ -65,9 +65,9 @@
const GrCaps&);
// GrOnFlushCallbackObject overrides.
- void preFlush(GrOnFlushResourceProvider*, const uint32_t* opListIDs, int numOpListIDs,
+ void preFlush(GrOnFlushResourceProvider*, const uint32_t* opsTaskIDs, int numOpsTaskIDs,
SkTArray<std::unique_ptr<GrRenderTargetContext>>* out) override;
- void postFlush(GrDeferredUploadToken, const uint32_t* opListIDs, int numOpListIDs) override;
+ void postFlush(GrDeferredUploadToken, const uint32_t* opsTaskIDs, int numOpsTaskIDs) override;
void purgeCacheEntriesOlderThan(GrProxyProvider*, const GrStdSteadyClock::time_point&);
@@ -94,19 +94,19 @@
CanDrawPath onCanDrawPath(const CanDrawPathArgs&) const override;
bool onDrawPath(const DrawPathArgs&) override;
- GrCCPerOpListPaths* lookupPendingPaths(uint32_t opListID);
+ GrCCPerOpsTaskPaths* lookupPendingPaths(uint32_t opsTaskID);
void recordOp(std::unique_ptr<GrCCDrawPathsOp>, const DrawPathArgs&);
const CoverageType fCoverageType;
- // fPendingPaths holds the GrCCPerOpListPaths objects that have already been created, but not
- // flushed, and those that are still being created. All GrCCPerOpListPaths objects will first
+ // fPendingPaths holds the GrCCPerOpsTaskPaths objects that have already been created, but not
+ // flushed, and those that are still being created. All GrCCPerOpsTaskPaths objects will first
// reside in fPendingPaths, then be moved to fFlushingPaths during preFlush().
PendingPathsMap fPendingPaths;
- // fFlushingPaths holds the GrCCPerOpListPaths objects that are currently being flushed.
+ // fFlushingPaths holds the GrCCPerOpsTaskPaths objects that are currently being flushed.
// (It will only contain elements when fFlushing is true.)
- SkSTArray<4, sk_sp<GrCCPerOpListPaths>> fFlushingPaths;
+ SkSTArray<4, sk_sp<GrCCPerOpsTaskPaths>> fFlushingPaths;
std::unique_ptr<GrCCPathCache> fPathCache;
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp
index 0b61e4b..1b2a55e 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp
@@ -17,7 +17,7 @@
}
std::unique_ptr<GrFragmentProcessor> GrCoverageCountingPathRenderer::makeClipProcessor(
- uint32_t opListID, const SkPath& deviceSpacePath, const SkIRect& accessRect,
+ uint32_t opsTaskID, const SkPath& deviceSpacePath, const SkIRect& accessRect,
const GrCaps& caps) {
return nullptr;
}
diff --git a/src/gpu/gl/GrGLGpu.cpp b/src/gpu/gl/GrGLGpu.cpp
index 0cbef8b..479ca28 100644
--- a/src/gpu/gl/GrGLGpu.cpp
+++ b/src/gpu/gl/GrGLGpu.cpp
@@ -1879,7 +1879,7 @@
if (rt && rt->needsResolve()) {
this->resolveRenderTarget(rt);
// TEMPORARY: MSAA resolve will have dirtied mipmaps. This goes away once we switch
- // to resolving MSAA from the opList as well.
+ // to resolving MSAA from the opsTask as well.
if (GrSamplerState::Filter::kMipMap == sampler.filter() &&
(tex->width() != 1 || tex->height() != 1)) {
SkASSERT(tex->texturePriv().mipMapped() == GrMipMapped::kYes);
@@ -1887,7 +1887,7 @@
this->regenerateMipMapLevels(tex);
}
}
- // Ensure mipmaps were all resolved ahead of time by the opList.
+ // Ensure mipmaps were all resolved ahead of time by the opsTask.
if (GrSamplerState::Filter::kMipMap == sampler.filter() &&
(tex->width() != 1 || tex->height() != 1)) {
// There are some cases where we might be given a non-mipmapped texture with a mipmap
@@ -2165,7 +2165,7 @@
#else
// we could just clear the clip bit but when we go through
// ANGLE a partial stencil mask will cause clears to be
- // turned into draws. Our contract on GrOpList says that
+ // turned into draws. Our contract on GrOpsTask says that
// changing the clip between stencil passes may or may not
// zero the client's clip bits. So we just clear the whole thing.
static const GrGLint clipStencilMask = ~0;
diff --git a/src/gpu/ops/GrOp.h b/src/gpu/ops/GrOp.h
index f94e041..7aff59d 100644
--- a/src/gpu/ops/GrOp.h
+++ b/src/gpu/ops/GrOp.h
@@ -21,7 +21,6 @@
class GrCaps;
class GrGpuCommandBuffer;
class GrOpFlushState;
-class GrRenderTargetOpList;
/**
* GrOp is the base class for all Ganesh deferred GPU operations. To facilitate reordering and to
diff --git a/src/gpu/ops/GrSmallPathRenderer.h b/src/gpu/ops/GrSmallPathRenderer.h
index d7aa781..e7017b1 100644
--- a/src/gpu/ops/GrSmallPathRenderer.h
+++ b/src/gpu/ops/GrSmallPathRenderer.h
@@ -41,7 +41,7 @@
}
void postFlush(GrDeferredUploadToken startTokenForNextFlush,
- const uint32_t* /*opListIDs*/, int /*numOpListIDs*/) override {
+ const uint32_t* /*opsTaskIDs*/, int /*numOpsTaskIDs*/) override {
if (fAtlas) {
fAtlas->compact(startTokenForNextFlush);
}
diff --git a/src/gpu/text/GrAtlasManager.h b/src/gpu/text/GrAtlasManager.h
index 026806b..650b905 100644
--- a/src/gpu/text/GrAtlasManager.h
+++ b/src/gpu/text/GrAtlasManager.h
@@ -98,7 +98,7 @@
}
void postFlush(GrDeferredUploadToken startTokenForNextFlush,
- const uint32_t* opListIDs, int numOpListIDs) override {
+ const uint32_t* opsTaskIDs, int numOpsTaskIDs) override {
for (int i = 0; i < kMaskFormatCount; ++i) {
if (fAtlases[i]) {
fAtlases[i]->compact(startTokenForNextFlush);
diff --git a/src/gpu/vk/GrVkGpuCommandBuffer.cpp b/src/gpu/vk/GrVkGpuCommandBuffer.cpp
index 80e4340..8f5309e 100644
--- a/src/gpu/vk/GrVkGpuCommandBuffer.cpp
+++ b/src/gpu/vk/GrVkGpuCommandBuffer.cpp
@@ -637,7 +637,7 @@
if (texRT && texRT->needsResolve()) {
fGpu->resolveRenderTargetNoFlush(texRT);
// TEMPORARY: MSAA resolve will have dirtied mipmaps. This goes away once we switch
- // to resolving MSAA from the opList as well.
+ // to resolving MSAA from the opsTask as well.
if (GrSamplerState::Filter::kMipMap == filter &&
(vkTexture->width() != 1 || vkTexture->height() != 1)) {
SkASSERT(vkTexture->texturePriv().mipMapped() == GrMipMapped::kYes);
@@ -646,7 +646,7 @@
}
}
- // Ensure mip maps were all resolved ahead of time by the opList.
+ // Ensure mip maps were all resolved ahead of time by the opsTask.
if (GrSamplerState::Filter::kMipMap == filter &&
(vkTexture->width() != 1 || vkTexture->height() != 1)) {
SkASSERT(vkTexture->texturePriv().mipMapped() == GrMipMapped::kYes);