Reland "Merge GrOpList and GrRTOpList and rename to GrOpsTask."
This reverts commit f21bf9e50bb175eb151e90a01d7f8351da0802f8.
Reason for revert: relanding with infra fix
Original change's description:
> Revert "Merge GrOpList and GrRTOpList and rename to GrOpsTask."
>
> This reverts commit 2a5954140b49d18e5161a30a4ae2c7ac28bc1993.
>
> Reason for revert: breaking everything
>
> Original change's description:
> > Merge GrOpList and GrRTOpList and rename to GrOpsTask.
> >
> > Change-Id: I8f4f2218a30fd0541a8f79f7bb9850f9500cd243
> > Reviewed-on: https://skia-review.googlesource.com/c/skia/+/236343
> > Commit-Queue: Greg Daniel <egdaniel@google.com>
> > Reviewed-by: Brian Salomon <bsalomon@google.com>
>
> TBR=egdaniel@google.com,bsalomon@google.com,robertphillips@google.com
>
> Change-Id: I27840ea0343e8e6b388556afb7bd2e76386d611d
> No-Presubmit: true
> No-Tree-Checks: true
> No-Try: true
> Reviewed-on: https://skia-review.googlesource.com/c/skia/+/236349
> Reviewed-by: Greg Daniel <egdaniel@google.com>
> Commit-Queue: Greg Daniel <egdaniel@google.com>
TBR=egdaniel@google.com,bsalomon@google.com,robertphillips@google.com
Change-Id: Ibd3a06e4a91dbb1f225dcc8d17d0db3967b6f85f
No-Presubmit: true
No-Tree-Checks: true
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/236350
Commit-Queue: Greg Daniel <egdaniel@google.com>
Reviewed-by: Greg Daniel <egdaniel@google.com>
diff --git a/src/gpu/ccpr/GrCCClipPath.h b/src/gpu/ccpr/GrCCClipPath.h
index 1494d2c6..32e5ae4 100644
--- a/src/gpu/ccpr/GrCCClipPath.h
+++ b/src/gpu/ccpr/GrCCClipPath.h
@@ -19,7 +19,7 @@
/**
* These are keyed by SkPath generation ID, and store which device-space paths are accessed and
- * where by clip FPs in a given opList. A single GrCCClipPath can be referenced by multiple FPs. At
+ * where by clip FPs in a given opsTask. A single GrCCClipPath can be referenced by multiple FPs. At
* flush time their coverage count masks are packed into atlas(es) alongside normal DrawPathOps.
*/
class GrCCClipPath {
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.cpp b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
index 0a6433c..4ad6cd9 100644
--- a/src/gpu/ccpr/GrCCDrawPathsOp.cpp
+++ b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
@@ -111,7 +111,7 @@
// If the path is clipped, CCPR will only draw the visible portion. This helps improve batching,
// since it eliminates the need for scissor when drawing to the main canvas.
// FIXME: We should parse the path right here. It will provide a tighter bounding box for us to
- // give the opList, as well as enabling threaded parsing when using DDL.
+ // give the opsTask, as well as enabling threaded parsing when using DDL.
SkRect clippedDrawBounds;
if (!clippedDrawBounds.intersect(conservativeDevBounds, SkRect::Make(maskDevIBounds))) {
clippedDrawBounds.setEmpty();
@@ -122,9 +122,9 @@
}
GrCCDrawPathsOp::~GrCCDrawPathsOp() {
- if (fOwningPerOpListPaths) {
+ if (fOwningPerOpsTaskPaths) {
// Remove the list's dangling pointer to this Op before deleting it.
- fOwningPerOpListPaths->fDrawOps.remove(this);
+ fOwningPerOpsTaskPaths->fDrawOps.remove(this);
}
}
@@ -195,9 +195,10 @@
GrOp::CombineResult GrCCDrawPathsOp::onCombineIfPossible(GrOp* op, const GrCaps&) {
GrCCDrawPathsOp* that = op->cast<GrCCDrawPathsOp>();
- SkASSERT(fOwningPerOpListPaths);
+ SkASSERT(fOwningPerOpsTaskPaths);
SkASSERT(fNumDraws);
- SkASSERT(!that->fOwningPerOpListPaths || that->fOwningPerOpListPaths == fOwningPerOpListPaths);
+ SkASSERT(!that->fOwningPerOpsTaskPaths ||
+ that->fOwningPerOpsTaskPaths == fOwningPerOpsTaskPaths);
SkASSERT(that->fNumDraws);
if (fProcessors != that->fProcessors ||
@@ -205,18 +206,18 @@
return CombineResult::kCannotCombine;
}
- fDraws.append(std::move(that->fDraws), &fOwningPerOpListPaths->fAllocator);
+ fDraws.append(std::move(that->fDraws), &fOwningPerOpsTaskPaths->fAllocator);
SkDEBUGCODE(fNumDraws += that->fNumDraws);
SkDEBUGCODE(that->fNumDraws = 0);
return CombineResult::kMerged;
}
-void GrCCDrawPathsOp::addToOwningPerOpListPaths(sk_sp<GrCCPerOpListPaths> owningPerOpListPaths) {
+void GrCCDrawPathsOp::addToOwningPerOpsTaskPaths(sk_sp<GrCCPerOpsTaskPaths> owningPerOpsTaskPaths) {
SkASSERT(1 == fNumDraws);
- SkASSERT(!fOwningPerOpListPaths);
- fOwningPerOpListPaths = std::move(owningPerOpListPaths);
- fOwningPerOpListPaths->fDrawOps.addToTail(this);
+ SkASSERT(!fOwningPerOpsTaskPaths);
+ fOwningPerOpsTaskPaths = std::move(owningPerOpsTaskPaths);
+ fOwningPerOpsTaskPaths->fDrawOps.addToTail(this);
}
void GrCCDrawPathsOp::accountForOwnPaths(GrCCPathCache* pathCache,
@@ -414,9 +415,9 @@
}
void GrCCDrawPathsOp::onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) {
- SkASSERT(fOwningPerOpListPaths);
+ SkASSERT(fOwningPerOpsTaskPaths);
- const GrCCPerFlushResources* resources = fOwningPerOpListPaths->fFlushResources.get();
+ const GrCCPerFlushResources* resources = fOwningPerOpsTaskPaths->fFlushResources.get();
if (!resources) {
return; // Setup failed.
}
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.h b/src/gpu/ccpr/GrCCDrawPathsOp.h
index 1499c28..d0c4777 100644
--- a/src/gpu/ccpr/GrCCDrawPathsOp.h
+++ b/src/gpu/ccpr/GrCCDrawPathsOp.h
@@ -17,7 +17,7 @@
class GrCCAtlas;
class GrCCPerFlushResources;
struct GrCCPerFlushResourceSpecs;
-struct GrCCPerOpListPaths;
+struct GrCCPerOpsTaskPaths;
class GrOnFlushResourceProvider;
class GrRecordingContext;
@@ -46,7 +46,7 @@
}
void onPrepare(GrOpFlushState*) override {}
- void addToOwningPerOpListPaths(sk_sp<GrCCPerOpListPaths> owningPerOpListPaths);
+ void addToOwningPerOpsTaskPaths(sk_sp<GrCCPerOpsTaskPaths> owningPerOpsTaskPaths);
// Makes decisions about how to draw each path (cached, copied, rendered, etc.), and
// increments/fills out the corresponding GrCCPerFlushResourceSpecs.
@@ -125,9 +125,9 @@
friend class GrCCSTLList<SingleDraw>; // To access fNext.
};
- // Declare fOwningPerOpListPaths first, before fDraws. The draws use memory allocated by
- // fOwningPerOpListPaths, so it must not be unreffed until after fDraws is destroyed.
- sk_sp<GrCCPerOpListPaths> fOwningPerOpListPaths;
+ // Declare fOwningPerOpsTaskPaths first, before fDraws. The draws use memory allocated by
+ // fOwningPerOpsTaskPaths, so it must not be unreffed until after fDraws is destroyed.
+ sk_sp<GrCCPerOpsTaskPaths> fOwningPerOpsTaskPaths;
GrCCSTLList<SingleDraw> fDraws;
SkDEBUGCODE(int fNumDraws = 1);
diff --git a/src/gpu/ccpr/GrCCPerFlushResources.h b/src/gpu/ccpr/GrCCPerFlushResources.h
index 59122fe..f2504e6 100644
--- a/src/gpu/ccpr/GrCCPerFlushResources.h
+++ b/src/gpu/ccpr/GrCCPerFlushResources.h
@@ -62,8 +62,8 @@
/**
* This class wraps all the GPU resources that CCPR builds at flush time. It is allocated in CCPR's
- * preFlush() method, and referenced by all the GrCCPerOpListPaths objects that are being flushed.
- * It is deleted in postFlush() once all the flushing GrCCPerOpListPaths objects are deleted.
+ * preFlush() method, and referenced by all the GrCCPerOpsTaskPaths objects that are being flushed.
+ * It is deleted in postFlush() once all the flushing GrCCPerOpsTaskPaths objects are deleted.
*/
class GrCCPerFlushResources : public GrNonAtomicRef<GrCCPerFlushResources> {
public:
diff --git a/src/gpu/ccpr/GrCCPerOpListPaths.h b/src/gpu/ccpr/GrCCPerOpsTaskPaths.h
similarity index 78%
rename from src/gpu/ccpr/GrCCPerOpListPaths.h
rename to src/gpu/ccpr/GrCCPerOpsTaskPaths.h
index e0dd115..ff8a224 100644
--- a/src/gpu/ccpr/GrCCPerOpListPaths.h
+++ b/src/gpu/ccpr/GrCCPerOpsTaskPaths.h
@@ -5,8 +5,8 @@
* found in the LICENSE file.
*/
-#ifndef GrCCPerOpListPaths_DEFINED
-#define GrCCPerOpListPaths_DEFINED
+#ifndef GrCCPerOpsTaskPaths_DEFINED
+#define GrCCPerOpsTaskPaths_DEFINED
#include "include/core/SkRefCnt.h"
#include "src/core/SkArenaAlloc.h"
@@ -19,10 +19,10 @@
class GrCCPerFlushResources;
/**
- * Tracks all the CCPR paths in a given opList that will be drawn when it flushes.
+ * Tracks all the CCPR paths in a given opsTask that will be drawn when it flushes.
*/
// DDL TODO: given the usage pattern in DDL mode, this could probably be non-atomic refcounting.
-struct GrCCPerOpListPaths : public SkRefCnt {
+struct GrCCPerOpsTaskPaths : public SkRefCnt {
SkTInternalLList<GrCCDrawPathsOp> fDrawOps; // This class does not own these ops.
std::map<uint32_t, GrCCClipPath> fClipPaths;
SkSTArenaAlloc<10 * 1024> fAllocator{10 * 1024 * 2};
diff --git a/src/gpu/ccpr/GrCCSTLList.h b/src/gpu/ccpr/GrCCSTLList.h
index eb75863..29f26b9 100644
--- a/src/gpu/ccpr/GrCCSTLList.h
+++ b/src/gpu/ccpr/GrCCSTLList.h
@@ -14,8 +14,8 @@
/**
* A singly-linked list whose head element is a local class member. This is required by
- * GrCCDrawPathsOp because the owning opList is unknown at the time of creation, so we can't use its
- * associated allocator to create the first element.
+ * GrCCDrawPathsOp because the owning opsTask is unknown at the time of creation, so we can't use
+ * its associated allocator to create the first element.
*/
template<typename T> class GrCCSTLList : SkNoncopyable {
public:
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
index a9e080b..4798b6f 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp
@@ -71,11 +71,11 @@
}
}
-GrCCPerOpListPaths* GrCoverageCountingPathRenderer::lookupPendingPaths(uint32_t opListID) {
- auto it = fPendingPaths.find(opListID);
+GrCCPerOpsTaskPaths* GrCoverageCountingPathRenderer::lookupPendingPaths(uint32_t opsTaskID) {
+ auto it = fPendingPaths.find(opsTaskID);
if (fPendingPaths.end() == it) {
- sk_sp<GrCCPerOpListPaths> paths = sk_make_sp<GrCCPerOpListPaths>();
- it = fPendingPaths.insert(std::make_pair(opListID, std::move(paths))).first;
+ sk_sp<GrCCPerOpsTaskPaths> paths = sk_make_sp<GrCCPerOpsTaskPaths>();
+ it = fPendingPaths.insert(std::make_pair(opsTaskID, std::move(paths))).first;
}
return it->second.get();
}
@@ -182,16 +182,17 @@
void GrCoverageCountingPathRenderer::recordOp(std::unique_ptr<GrCCDrawPathsOp> op,
const DrawPathArgs& args) {
if (op) {
- auto addToOwningPerOpListPaths = [this](GrOp* op, uint32_t opListID) {
- op->cast<GrCCDrawPathsOp>()->addToOwningPerOpListPaths(
- sk_ref_sp(this->lookupPendingPaths(opListID)));
+ auto addToOwningPerOpsTaskPaths = [this](GrOp* op, uint32_t opsTaskID) {
+ op->cast<GrCCDrawPathsOp>()->addToOwningPerOpsTaskPaths(
+ sk_ref_sp(this->lookupPendingPaths(opsTaskID)));
};
- args.fRenderTargetContext->addDrawOp(*args.fClip, std::move(op), addToOwningPerOpListPaths);
+ args.fRenderTargetContext->addDrawOp(*args.fClip, std::move(op),
+ addToOwningPerOpsTaskPaths);
}
}
std::unique_ptr<GrFragmentProcessor> GrCoverageCountingPathRenderer::makeClipProcessor(
- uint32_t opListID, const SkPath& deviceSpacePath, const SkIRect& accessRect,
+ uint32_t opsTaskID, const SkPath& deviceSpacePath, const SkIRect& accessRect,
const GrCaps& caps) {
SkASSERT(!fFlushing);
@@ -202,7 +203,7 @@
key = (key << 1) | (uint32_t)GrFillRuleForSkPath(deviceSpacePath);
}
GrCCClipPath& clipPath =
- this->lookupPendingPaths(opListID)->fClipPaths[key];
+ this->lookupPendingPaths(opsTaskID)->fClipPaths[key];
if (!clipPath.isInitialized()) {
// This ClipPath was just created during lookup. Initialize it.
const SkRect& pathDevBounds = deviceSpacePath.getBounds();
@@ -228,8 +229,8 @@
void GrCoverageCountingPathRenderer::preFlush(
GrOnFlushResourceProvider* onFlushRP,
- const uint32_t* opListIDs,
- int numOpListIDs,
+ const uint32_t* opsTaskIDs,
+ int numOpsTaskIDs,
SkTArray<std::unique_ptr<GrRenderTargetContext>>* out) {
using DoCopiesToA8Coverage = GrCCDrawPathsOp::DoCopiesToA8Coverage;
SkASSERT(!fFlushing);
@@ -251,13 +252,13 @@
specs.fRenderedAtlasSpecs.fMaxPreferredTextureSize = maxPreferredRTSize;
specs.fRenderedAtlasSpecs.fMinTextureSize = SkTMin(512, maxPreferredRTSize);
- // Move the per-opList paths that are about to be flushed from fPendingPaths to fFlushingPaths,
+ // Move the per-opsTask paths that are about to be flushed from fPendingPaths to fFlushingPaths,
// and count them up so we can preallocate buffers.
- fFlushingPaths.reserve(numOpListIDs);
- for (int i = 0; i < numOpListIDs; ++i) {
- auto iter = fPendingPaths.find(opListIDs[i]);
+ fFlushingPaths.reserve(numOpsTaskIDs);
+ for (int i = 0; i < numOpsTaskIDs; ++i) {
+ auto iter = fPendingPaths.find(opsTaskIDs[i]);
if (fPendingPaths.end() == iter) {
- continue; // No paths on this opList.
+ continue; // No paths on this opsTask.
}
fFlushingPaths.push_back(std::move(iter->second));
@@ -318,8 +319,8 @@
}
}
-void GrCoverageCountingPathRenderer::postFlush(GrDeferredUploadToken, const uint32_t* opListIDs,
- int numOpListIDs) {
+void GrCoverageCountingPathRenderer::postFlush(GrDeferredUploadToken, const uint32_t* opsTaskIDs,
+ int numOpsTaskIDs) {
SkASSERT(fFlushing);
if (!fFlushingPaths.empty()) {
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
index 0450918..4a7ca18 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h
@@ -10,10 +10,10 @@
#include <map>
#include "src/gpu/GrOnFlushResourceProvider.h"
+#include "src/gpu/GrOpsTask.h"
#include "src/gpu/GrPathRenderer.h"
-#include "src/gpu/GrRenderTargetOpList.h"
#include "src/gpu/ccpr/GrCCPerFlushResources.h"
-#include "src/gpu/ccpr/GrCCPerOpListPaths.h"
+#include "src/gpu/ccpr/GrCCPerOpsTaskPaths.h"
class GrCCDrawPathsOp;
class GrCCPathCache;
@@ -41,16 +41,16 @@
CoverageType coverageType() const { return fCoverageType; }
- using PendingPathsMap = std::map<uint32_t, sk_sp<GrCCPerOpListPaths>>;
+ using PendingPathsMap = std::map<uint32_t, sk_sp<GrCCPerOpsTaskPaths>>;
- // In DDL mode, Ganesh needs to be able to move the pending GrCCPerOpListPaths to the DDL object
- // (detachPendingPaths) and then return them upon replay (mergePendingPaths).
+ // In DDL mode, Ganesh needs to be able to move the pending GrCCPerOpsTaskPaths to the DDL
+ // object (detachPendingPaths) and then return them upon replay (mergePendingPaths).
PendingPathsMap detachPendingPaths() { return std::move(fPendingPaths); }
void mergePendingPaths(const PendingPathsMap& paths) {
#ifdef SK_DEBUG
- // Ensure there are no duplicate opList IDs between the incoming path map and ours.
- // This should always be true since opList IDs are globally unique and these are coming
+ // Ensure there are no duplicate opsTask IDs between the incoming path map and ours.
+ // This should always be true since opsTask IDs are globally unique and these are coming
// from different DDL recordings.
for (const auto& it : paths) {
SkASSERT(!fPendingPaths.count(it.first));
@@ -65,9 +65,9 @@
const GrCaps&);
// GrOnFlushCallbackObject overrides.
- void preFlush(GrOnFlushResourceProvider*, const uint32_t* opListIDs, int numOpListIDs,
+ void preFlush(GrOnFlushResourceProvider*, const uint32_t* opsTaskIDs, int numOpsTaskIDs,
SkTArray<std::unique_ptr<GrRenderTargetContext>>* out) override;
- void postFlush(GrDeferredUploadToken, const uint32_t* opListIDs, int numOpListIDs) override;
+ void postFlush(GrDeferredUploadToken, const uint32_t* opsTaskIDs, int numOpsTaskIDs) override;
void purgeCacheEntriesOlderThan(GrProxyProvider*, const GrStdSteadyClock::time_point&);
@@ -94,19 +94,19 @@
CanDrawPath onCanDrawPath(const CanDrawPathArgs&) const override;
bool onDrawPath(const DrawPathArgs&) override;
- GrCCPerOpListPaths* lookupPendingPaths(uint32_t opListID);
+ GrCCPerOpsTaskPaths* lookupPendingPaths(uint32_t opsTaskID);
void recordOp(std::unique_ptr<GrCCDrawPathsOp>, const DrawPathArgs&);
const CoverageType fCoverageType;
- // fPendingPaths holds the GrCCPerOpListPaths objects that have already been created, but not
- // flushed, and those that are still being created. All GrCCPerOpListPaths objects will first
+ // fPendingPaths holds the GrCCPerOpsTaskPaths objects that have already been created, but not
+ // flushed, and those that are still being created. All GrCCPerOpsTaskPaths objects will first
// reside in fPendingPaths, then be moved to fFlushingPaths during preFlush().
PendingPathsMap fPendingPaths;
- // fFlushingPaths holds the GrCCPerOpListPaths objects that are currently being flushed.
+ // fFlushingPaths holds the GrCCPerOpsTaskPaths objects that are currently being flushed.
// (It will only contain elements when fFlushing is true.)
- SkSTArray<4, sk_sp<GrCCPerOpListPaths>> fFlushingPaths;
+ SkSTArray<4, sk_sp<GrCCPerOpsTaskPaths>> fFlushingPaths;
std::unique_ptr<GrCCPathCache> fPathCache;
diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp
index 0b61e4b..1b2a55e 100644
--- a/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp
+++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp
@@ -17,7 +17,7 @@
}
std::unique_ptr<GrFragmentProcessor> GrCoverageCountingPathRenderer::makeClipProcessor(
- uint32_t opListID, const SkPath& deviceSpacePath, const SkIRect& accessRect,
+ uint32_t opsTaskID, const SkPath& deviceSpacePath, const SkIRect& accessRect,
const GrCaps& caps) {
return nullptr;
}