Elevate the target list of Ganesh tasks to base class
A follow up CL will use the availability of this information - the full
list of targets of a GrRenderTask – to enable faster storage of the
lastRenderTask association for a given surface proxy in a given drawing
manager.
Bug: skia:10320
Change-Id: I3eb3276b483a7f09481774896a024172b73a4c84
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/296729
Reviewed-by: Robert Phillips <robertphillips@google.com>
Commit-Queue: Adlai Holler <adlai@google.com>
diff --git a/src/gpu/GrCopyRenderTask.cpp b/src/gpu/GrCopyRenderTask.cpp
index 46b102c..a578451 100644
--- a/src/gpu/GrCopyRenderTask.cpp
+++ b/src/gpu/GrCopyRenderTask.cpp
@@ -51,26 +51,26 @@
const SkIRect& srcRect,
GrSurfaceProxyView dstView,
const SkIPoint& dstPoint)
- : GrRenderTask(std::move(dstView))
+ : GrRenderTask()
, fSrcView(std::move(srcView))
, fSrcRect(srcRect)
, fDstPoint(dstPoint) {
- drawingMgr->setLastRenderTask(fTargetView.proxy(), this);
+ this->addTarget(drawingMgr, dstView);
}
void GrCopyRenderTask::gatherProxyIntervals(GrResourceAllocator* alloc) const {
// This renderTask doesn't have "normal" ops. In this case we still need to add an interval (so
// fEndOfOpsTaskOpIndices will remain in sync), so we create a fake op# to capture the fact that
- // we read fSrcView and copy to fTargetView.
+ // we read fSrcView and copy to target view.
alloc->addInterval(fSrcView.proxy(), alloc->curOp(), alloc->curOp(),
GrResourceAllocator::ActualUse::kYes);
- alloc->addInterval(fTargetView.proxy(), alloc->curOp(), alloc->curOp(),
+ alloc->addInterval(this->target(0).proxy(), alloc->curOp(), alloc->curOp(),
GrResourceAllocator::ActualUse::kYes);
alloc->incOps();
}
bool GrCopyRenderTask::onExecute(GrOpFlushState* flushState) {
- GrSurfaceProxy* dstProxy = fTargetView.proxy();
+ GrSurfaceProxy* dstProxy = this->target(0).proxy();
GrSurfaceProxy* srcProxy = fSrcView.proxy();
if (!srcProxy->isInstantiated() || !dstProxy->isInstantiated()) {
return false;
diff --git a/src/gpu/GrCopyRenderTask.h b/src/gpu/GrCopyRenderTask.h
index 89d5782..4ecd3c7 100644
--- a/src/gpu/GrCopyRenderTask.h
+++ b/src/gpu/GrCopyRenderTask.h
@@ -27,8 +27,6 @@
const SkIPoint& dstPoint);
bool onIsUsed(GrSurfaceProxy* proxy) const override {
- // This case should be handled by GrRenderTask.
- SkASSERT(proxy != fTargetView.proxy());
return proxy == fSrcView.proxy();
}
// If instantiation failed, at flush time we simply will skip doing the copy.
diff --git a/src/gpu/GrDrawingManager.cpp b/src/gpu/GrDrawingManager.cpp
index 5061780..5ae2684 100644
--- a/src/gpu/GrDrawingManager.cpp
+++ b/src/gpu/GrDrawingManager.cpp
@@ -65,8 +65,8 @@
}
bool GrDrawingManager::RenderTaskDAG::isUsed(GrSurfaceProxy* proxy) const {
- for (int i = 0; i < fRenderTasks.count(); ++i) {
- if (fRenderTasks[i] && fRenderTasks[i]->isUsed(proxy)) {
+ for (const auto& task : fRenderTasks) {
+ if (task && task->isUsed(proxy)) {
return true;
}
}
@@ -123,7 +123,7 @@
GrOpsTask* curOpsTask = fRenderTasks[i]->asOpsTask();
if (prevOpsTask && curOpsTask) {
- SkASSERT(prevOpsTask->fTargetView != curOpsTask->fTargetView);
+ SkASSERT(prevOpsTask->target(0).proxy() != curOpsTask->target(0).proxy());
}
prevOpsTask = curOpsTask;
@@ -836,7 +836,7 @@
}
fDAG.add(waitTask);
} else {
- if (fActiveOpsTask && (fActiveOpsTask->fTargetView.proxy() == proxy.get())) {
+ if (fActiveOpsTask && (fActiveOpsTask->target(0).proxy() == proxy.get())) {
SkASSERT(this->getLastRenderTask(proxy.get()) == fActiveOpsTask);
fDAG.addBeforeLast(waitTask);
// In this case we keep the current renderTask open but just insert the new waitTask
diff --git a/src/gpu/GrOpsTask.cpp b/src/gpu/GrOpsTask.cpp
index ec2c6c9..1f0a55f 100644
--- a/src/gpu/GrOpsTask.cpp
+++ b/src/gpu/GrOpsTask.cpp
@@ -360,11 +360,11 @@
GrOpsTask::GrOpsTask(GrDrawingManager* drawingMgr, GrRecordingContext::Arenas arenas,
GrSurfaceProxyView view,
GrAuditTrail* auditTrail)
- : GrRenderTask(std::move(view))
+ : GrRenderTask()
, fArenas(arenas)
, fAuditTrail(auditTrail)
SkDEBUGCODE(, fNumClips(0)) {
- drawingMgr->setLastRenderTask(fTargetView.proxy(), this);
+ this->addTarget(drawingMgr, std::move(view));
}
void GrOpsTask::deleteOps() {
@@ -393,12 +393,6 @@
this->deleteOps();
fClipAllocator.reset();
- GrSurfaceProxy* proxy = fTargetView.proxy();
- if (proxy && this == drawingMgr->getLastRenderTask(proxy)) {
- drawingMgr->setLastRenderTask(proxy, nullptr);
- }
-
- fTargetView.reset();
fDeferredProxies.reset();
fSampledProxies.reset();
fAuditTrail = nullptr;
@@ -422,7 +416,7 @@
for (const auto& chain : fOpChains) {
if (chain.shouldExecute()) {
chain.head()->prePrepare(context,
- &fTargetView,
+ &fTargets[0],
chain.appliedClip(),
chain.dstProxyView());
}
@@ -430,7 +424,7 @@
}
void GrOpsTask::onPrepare(GrOpFlushState* flushState) {
- SkASSERT(fTargetView.proxy()->peekRenderTarget());
+ SkASSERT(this->target(0).proxy()->peekRenderTarget());
SkASSERT(this->isClosed());
#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
TRACE_EVENT0("skia.gpu", TRACE_FUNC);
@@ -451,7 +445,7 @@
TRACE_EVENT0("skia.gpu", chain.head()->name());
#endif
GrOpFlushState::OpArgs opArgs(chain.head(),
- &fTargetView,
+ &fTargets[0],
chain.appliedClip(),
chain.dstProxyView());
@@ -459,7 +453,7 @@
// Temporary debugging helper: for debugging prePrepare w/o going through DDLs
// Delete once most of the GrOps have an onPrePrepare.
- // chain.head()->prePrepare(flushState->gpu()->getContext(), &fTargetView,
+ // chain.head()->prePrepare(flushState->gpu()->getContext(), &this->target(0),
// chain.appliedClip());
// GrOp::prePrepare may or may not have been called at this point
@@ -507,8 +501,8 @@
return false;
}
- SkASSERT(fTargetView.proxy());
- GrRenderTargetProxy* proxy = fTargetView.proxy()->asRenderTargetProxy();
+ SkASSERT(this->numTargets() == 1);
+ GrRenderTargetProxy* proxy = this->target(0).proxy()->asRenderTargetProxy();
SkASSERT(proxy);
TRACE_EVENT0("skia.gpu", TRACE_FUNC);
@@ -575,7 +569,7 @@
: GrStoreOp::kStore;
GrOpsRenderPass* renderPass = create_render_pass(
- flushState->gpu(), proxy->peekRenderTarget(), stencil, fTargetView.origin(),
+ flushState->gpu(), proxy->peekRenderTarget(), stencil, this->target(0).origin(),
fClippedContentBounds, fColorLoadOp, fLoadClearColor, stencilLoadOp, stencilStoreOp,
fSampledProxies);
if (!renderPass) {
@@ -594,7 +588,7 @@
#endif
GrOpFlushState::OpArgs opArgs(chain.head(),
- &fTargetView,
+ &fTargets[0],
chain.appliedClip(),
chain.dstProxyView());
@@ -614,7 +608,7 @@
fColorLoadOp = op;
fLoadClearColor = color;
if (GrLoadOp::kClear == fColorLoadOp) {
- GrSurfaceProxy* proxy = fTargetView.proxy();
+ GrSurfaceProxy* proxy = this->target(0).proxy();
SkASSERT(proxy);
fTotalBounds = proxy->backingStoreBoundsRect();
}
@@ -635,7 +629,7 @@
// If the opsTask is using a render target which wraps a vulkan command buffer, we can't do
// a clear load since we cannot change the render pass that we are using. Thus we fall back
// to making a clear op in this case.
- return !fTargetView.asRenderTargetProxy()->wrapsVkSecondaryCB();
+ return !this->target(0).asRenderTargetProxy()->wrapsVkSecondaryCB();
}
// Could not empty the task, so an op must be added to handle the clear
@@ -761,7 +755,7 @@
alloc->addInterval(fDeferredProxies[i], 0, 0, GrResourceAllocator::ActualUse::kNo);
}
- GrSurfaceProxy* targetProxy = fTargetView.proxy();
+ GrSurfaceProxy* targetProxy = this->target(0).proxy();
// Add the interval for all the writes to this GrOpsTasks's target
if (fOpChains.count()) {
@@ -780,7 +774,7 @@
auto gather = [ alloc SkDEBUGCODE(, this) ] (GrSurfaceProxy* p, GrMipMapped) {
alloc->addInterval(p, alloc->curOp(), alloc->curOp(), GrResourceAllocator::ActualUse::kYes
- SkDEBUGCODE(, fTargetView.proxy() == p));
+ SkDEBUGCODE(, this->target(0).proxy() == p));
};
for (const OpChain& recordedOp : fOpChains) {
recordedOp.visitProxies(gather);
@@ -796,7 +790,7 @@
const DstProxyView* dstProxyView, const GrCaps& caps) {
SkDEBUGCODE(op->validate();)
SkASSERT(processorAnalysis.requiresDstTexture() == (dstProxyView && dstProxyView->proxy()));
- GrSurfaceProxy* proxy = fTargetView.proxy();
+ GrSurfaceProxy* proxy = this->target(0).proxy();
SkASSERT(proxy);
// A closed GrOpsTask should never receive new/more ops
@@ -896,11 +890,11 @@
fClosedObservers.reset();
});
if (!this->isNoOp()) {
- GrSurfaceProxy* proxy = fTargetView.proxy();
+ GrSurfaceProxy* proxy = this->target(0).proxy();
// Use the entire backing store bounds since the GPU doesn't clip automatically to the
// logical dimensions.
SkRect clippedContentBounds = proxy->backingStoreBoundsRect();
- // TODO: If we can fix up GLPrograms test to always intersect the fTargetView proxy bounds
+ // TODO: If we can fix up GLPrograms test to always intersect the target proxy bounds
// then we can simply assert here that the bounds intersect.
if (clippedContentBounds.intersect(fTotalBounds)) {
clippedContentBounds.roundOut(&fClippedContentBounds);
diff --git a/src/gpu/GrRenderTask.cpp b/src/gpu/GrRenderTask.cpp
index 1cf2efc..fece63f 100644
--- a/src/gpu/GrRenderTask.cpp
+++ b/src/gpu/GrRenderTask.cpp
@@ -26,29 +26,24 @@
, fFlags(0) {
}
-GrRenderTask::GrRenderTask(GrSurfaceProxyView targetView)
- : fTargetView(std::move(targetView))
- , fUniqueID(CreateUniqueID())
- , fFlags(0) {
-}
-
void GrRenderTask::disown(GrDrawingManager* drawingMgr) {
if (this->isSetFlag(kDisowned_Flag)) {
return;
}
this->setFlag(kDisowned_Flag);
- GrSurfaceProxy* proxy = fTargetView.proxy();
- if (proxy && this == drawingMgr->getLastRenderTask(proxy)) {
- // Ensure the drawing manager doesn't hold a dangling pointer.
- drawingMgr->setLastRenderTask(proxy, nullptr);
+
+ for (const GrSurfaceProxyView& target : fTargets) {
+ if (this == drawingMgr->getLastRenderTask(target.proxy())) {
+ drawingMgr->setLastRenderTask(target.proxy(), nullptr);
+ }
}
}
+#ifdef SK_DEBUG
GrRenderTask::~GrRenderTask() {
SkASSERT(this->isSetFlag(kDisowned_Flag));
}
-#ifdef SK_DEBUG
bool GrRenderTask::deferredProxiesAreInstantiated() const {
for (int i = 0; i < fDeferredProxies.count(); ++i) {
if (!fDeferredProxies[i]->isInstantiated()) {
@@ -67,13 +62,13 @@
SkIRect targetUpdateBounds;
if (ExpectedOutcome::kTargetDirty == this->onMakeClosed(caps, &targetUpdateBounds)) {
- GrSurfaceProxy* proxy = fTargetView.proxy();
+ GrSurfaceProxy* proxy = this->target(0).proxy();
if (proxy->requiresManualMSAAResolve()) {
- SkASSERT(fTargetView.asRenderTargetProxy());
- fTargetView.asRenderTargetProxy()->markMSAADirty(targetUpdateBounds,
- fTargetView.origin());
+ SkASSERT(this->target(0).asRenderTargetProxy());
+ this->target(0).asRenderTargetProxy()->markMSAADirty(targetUpdateBounds,
+ this->target(0).origin());
}
- GrTextureProxy* textureProxy = fTargetView.asTextureProxy();
+ GrTextureProxy* textureProxy = this->target(0).asTextureProxy();
if (textureProxy && GrMipMapped::kYes == textureProxy->mipMapped()) {
textureProxy->markMipMapsDirty();
}
@@ -259,11 +254,11 @@
}
bool GrRenderTask::isInstantiated() const {
- // Some renderTasks (e.g. GrTransferFromRenderTask) don't have a target.
- GrSurfaceProxy* proxy = fTargetView.proxy();
- if (!proxy) {
+ // Some renderTasks (e.g. GrTransferFromRenderTask) don't have any targets.
+ if (0 == this->numTargets()) {
return true;
}
+ GrSurfaceProxy* proxy = this->target(0).proxy();
if (!proxy->isInstantiated()) {
return false;
@@ -277,16 +272,29 @@
return true;
}
+void GrRenderTask::addTarget(GrDrawingManager* drawingMgr, GrSurfaceProxyView view) {
+ SkASSERT(view);
+ drawingMgr->setLastRenderTask(view.proxy(), this);
+ fTargets.push_back(std::move(view));
+}
+
#ifdef SK_DEBUG
void GrRenderTask::dump(bool printDependencies) const {
SkDebugf("--------------------------------------------------------------\n");
- GrSurfaceProxy* proxy = fTargetView.proxy();
- SkDebugf("%s - renderTaskID: %d - proxyID: %d - surfaceID: %d\n",
- this->name(), fUniqueID,
- proxy ? proxy->uniqueID().asUInt() : -1,
- proxy && proxy->peekSurface()
- ? proxy->peekSurface()->uniqueID().asUInt()
- : -1);
+ SkDebugf("%s - renderTaskID: %d\n", this->name(), fUniqueID);
+
+ if (!fTargets.empty()) {
+ SkDebugf("Targets: \n");
+ for (int i = 0; i < fTargets.count(); ++i) {
+ GrSurfaceProxy* proxy = fTargets[i].proxy();
+ SkDebugf("[%d]: proxyID: %d - surfaceID: %d\n",
+ i,
+ proxy ? proxy->uniqueID().asUInt() : -1,
+ proxy && proxy->peekSurface()
+ ? proxy->peekSurface()->uniqueID().asUInt()
+ : -1);
+ }
+ }
if (printDependencies) {
SkDebugf("I rely On (%d): ", fDependencies.count());
diff --git a/src/gpu/GrRenderTask.h b/src/gpu/GrRenderTask.h
index ac55f9a..64431a1 100644
--- a/src/gpu/GrRenderTask.h
+++ b/src/gpu/GrRenderTask.h
@@ -27,8 +27,7 @@
class GrRenderTask : public SkRefCnt {
public:
GrRenderTask();
- GrRenderTask(GrSurfaceProxyView);
- ~GrRenderTask() override;
+ SkDEBUGCODE(~GrRenderTask() override);
void makeClosed(const GrCaps&);
@@ -68,7 +67,8 @@
bool dependsOn(const GrRenderTask* dependedOn) const;
uint32_t uniqueID() const { return fUniqueID; }
- GrSurfaceProxyView targetView() const { return fTargetView; }
+ int numTargets() const { return fTargets.count(); }
+ const GrSurfaceProxyView& target(int i) const { return fTargets[i]; }
/*
* Safely cast this GrRenderTask to a GrOpsTask (if possible).
@@ -88,8 +88,8 @@
void visitTargetAndSrcProxies_debugOnly(const GrOp::VisitProxyFunc& fn) const {
this->visitProxies_debugOnly(fn);
- if (fTargetView.proxy()) {
- fn(fTargetView.proxy(), GrMipMapped::kNo);
+ for (int i = 0; i < this->numTargets(); ++i) {
+ fn(this->target(i).proxy(), GrMipMapped::kNo);
}
}
#endif
@@ -101,6 +101,10 @@
SkDEBUGCODE(bool deferredProxiesAreInstantiated() const;)
+ // Add a target surface proxy to the list of targets for this task.
+ // This also informs the drawing manager to update the lastRenderTask association.
+ void addTarget(GrDrawingManager*, GrSurfaceProxyView);
+
enum class ExpectedOutcome : bool {
kTargetUnchanged,
kTargetDirty,
@@ -113,7 +117,7 @@
// targetUpdateBounds must not extend beyond the proxy bounds.
virtual ExpectedOutcome onMakeClosed(const GrCaps&, SkIRect* targetUpdateBounds) = 0;
- GrSurfaceProxyView fTargetView;
+ SkSTArray<1, GrSurfaceProxyView> fTargets;
// List of texture proxies whose contents are being prepared on a worker thread
// TODO: this list exists so we can fire off the proper upload when an renderTask begins
@@ -125,15 +129,19 @@
friend class GrDrawingManager;
// Drops any pending operations that reference proxies that are not instantiated.
- // NOTE: Derived classes don't need to check fTargetView. That is handled when the
+ // NOTE: Derived classes don't need to check targets. That is handled when the
// drawingManager calls isInstantiated.
virtual void handleInternalAllocationFailure() = 0;
+ // Derived classes can override to indicate usage of proxies _other than target proxies_.
+ // GrRenderTask itself will handle checking the target proxies.
virtual bool onIsUsed(GrSurfaceProxy*) const = 0;
bool isUsed(GrSurfaceProxy* proxy) const {
- if (proxy == fTargetView.proxy()) {
- return true;
+ for (const GrSurfaceProxyView& target : fTargets) {
+ if (target.proxy() == proxy) {
+ return true;
+ }
}
return this->onIsUsed(proxy);
diff --git a/src/gpu/GrTextureResolveRenderTask.cpp b/src/gpu/GrTextureResolveRenderTask.cpp
index 79e1531..1a915ba 100644
--- a/src/gpu/GrTextureResolveRenderTask.cpp
+++ b/src/gpu/GrTextureResolveRenderTask.cpp
@@ -14,19 +14,12 @@
#include "src/gpu/GrResourceAllocator.h"
#include "src/gpu/GrTexturePriv.h"
-void GrTextureResolveRenderTask::disown(GrDrawingManager* drawingMgr) {
- for (const auto& resolve : fResolves) {
- drawingMgr->setLastRenderTask(resolve.fProxy.get(), nullptr);
- }
- GrRenderTask::disown(drawingMgr);
-}
-
void GrTextureResolveRenderTask::addProxy(GrDrawingManager* drawingMgr,
sk_sp<GrSurfaceProxy> proxyRef,
GrSurfaceProxy::ResolveFlags flags,
const GrCaps& caps) {
- fResolves.emplace_back(std::move(proxyRef), flags);
- GrSurfaceProxy* proxy = fResolves.back().fProxy.get();
+ Resolve& resolve = fResolves.emplace_back(flags);
+ GrSurfaceProxy* proxy = proxyRef.get();
// Ensure the last render task that operated on the proxy is closed. That's where msaa and
// mipmaps should have been marked dirty.
@@ -38,7 +31,7 @@
GrRenderTargetProxy* renderTargetProxy = proxy->asRenderTargetProxy();
SkASSERT(renderTargetProxy);
SkASSERT(renderTargetProxy->isMSAADirty());
- fResolves.back().fMSAAResolveRect = renderTargetProxy->msaaDirtyRect();
+ resolve.fMSAAResolveRect = renderTargetProxy->msaaDirtyRect();
renderTargetProxy->markMSAAResolved();
}
@@ -53,7 +46,7 @@
// generating mipmap levels and/or resolving MSAA.
this->addDependency(drawingMgr, proxy, GrMipMapped::kNo,
GrTextureResolveManager(nullptr), caps);
- drawingMgr->setLastRenderTask(proxy, this);
+ this->addTarget(drawingMgr, GrSurfaceProxyView(std::move(proxyRef)));
}
void GrTextureResolveRenderTask::gatherProxyIntervals(GrResourceAllocator* alloc) const {
@@ -61,8 +54,9 @@
// fEndOfOpsTaskOpIndices will remain in sync. We create fake op#'s to capture the fact that we
// manipulate the resolve proxies.
auto fakeOp = alloc->curOp();
- for (const auto& resolve : fResolves) {
- alloc->addInterval(resolve.fProxy.get(), fakeOp, fakeOp,
+ SkASSERT(fResolves.count() == this->numTargets());
+ for (const GrSurfaceProxyView& target : fTargets) {
+ alloc->addInterval(target.proxy(), fakeOp, fakeOp,
GrResourceAllocator::ActualUse::kYes);
}
alloc->incOps();
@@ -70,9 +64,11 @@
bool GrTextureResolveRenderTask::onExecute(GrOpFlushState* flushState) {
// Resolve all msaa back-to-back, before regenerating mipmaps.
- for (const auto& resolve : fResolves) {
+ SkASSERT(fResolves.count() == this->numTargets());
+ for (int i = 0; i < fResolves.count(); ++i) {
+ const Resolve& resolve = fResolves[i];
if (GrSurfaceProxy::ResolveFlags::kMSAA & resolve.fFlags) {
- GrSurfaceProxy* proxy = resolve.fProxy.get();
+ GrSurfaceProxy* proxy = this->target(i).proxy();
// peekRenderTarget might be null if there was an instantiation error.
if (GrRenderTarget* renderTarget = proxy->peekRenderTarget()) {
flushState->gpu()->resolveRenderTarget(renderTarget, resolve.fMSAAResolveRect,
@@ -81,10 +77,11 @@
}
}
// Regenerate all mipmaps back-to-back.
- for (const auto& resolve : fResolves) {
+ for (int i = 0; i < fResolves.count(); ++i) {
+ const Resolve& resolve = fResolves[i];
if (GrSurfaceProxy::ResolveFlags::kMipMaps & resolve.fFlags) {
// peekTexture might be null if there was an instantiation error.
- GrTexture* texture = resolve.fProxy->peekTexture();
+ GrTexture* texture = this->target(i).proxy()->peekTexture();
if (texture && texture->texturePriv().mipMapsAreDirty()) {
flushState->gpu()->regenerateMipMapLevels(texture);
SkASSERT(!texture->texturePriv().mipMapsAreDirty());
@@ -96,9 +93,5 @@
}
#ifdef SK_DEBUG
-void GrTextureResolveRenderTask::visitProxies_debugOnly(const GrOp::VisitProxyFunc& fn) const {
- for (const auto& resolve : fResolves) {
- fn(resolve.fProxy.get(), GrMipMapped::kNo);
- }
-}
+void GrTextureResolveRenderTask::visitProxies_debugOnly(const GrOp::VisitProxyFunc& fn) const {}
#endif
diff --git a/src/gpu/GrTextureResolveRenderTask.h b/src/gpu/GrTextureResolveRenderTask.h
index e7f55c9..379aa4b 100644
--- a/src/gpu/GrTextureResolveRenderTask.h
+++ b/src/gpu/GrTextureResolveRenderTask.h
@@ -14,15 +14,11 @@
public:
GrTextureResolveRenderTask() : GrRenderTask() {}
- void disown(GrDrawingManager*) override;
-
void addProxy(GrDrawingManager*, sk_sp<GrSurfaceProxy> proxy,
GrSurfaceProxy::ResolveFlags, const GrCaps&);
private:
bool onIsUsed(GrSurfaceProxy* proxy) const override {
- // This case should be handled by GrRenderTask.
- SkASSERT(proxy != fTargetView.proxy());
return false;
}
void handleInternalAllocationFailure() override {
@@ -42,9 +38,7 @@
#endif
struct Resolve {
- Resolve(sk_sp<GrSurfaceProxy> proxy, GrSurfaceProxy::ResolveFlags flags)
- : fProxy(std::move(proxy)), fFlags(flags) {}
- sk_sp<GrSurfaceProxy> fProxy;
+ Resolve(GrSurfaceProxy::ResolveFlags flags) : fFlags(flags) {}
GrSurfaceProxy::ResolveFlags fFlags;
SkIRect fMSAAResolveRect;
};
diff --git a/src/gpu/GrTransferFromRenderTask.h b/src/gpu/GrTransferFromRenderTask.h
index 2126ae8..93a0f7a 100644
--- a/src/gpu/GrTransferFromRenderTask.h
+++ b/src/gpu/GrTransferFromRenderTask.h
@@ -28,7 +28,7 @@
private:
bool onIsUsed(GrSurfaceProxy* proxy) const override {
- SkASSERT(!fTargetView.proxy());
+ SkASSERT(0 == this->numTargets());
return proxy == fSrcProxy.get();
}
// If fSrcProxy is uninstantiated at flush time we simply will skip doing the transfer.
diff --git a/src/gpu/GrWaitRenderTask.cpp b/src/gpu/GrWaitRenderTask.cpp
index d7b2a7c..4b3855b 100644
--- a/src/gpu/GrWaitRenderTask.cpp
+++ b/src/gpu/GrWaitRenderTask.cpp
@@ -14,8 +14,10 @@
void GrWaitRenderTask::gatherProxyIntervals(GrResourceAllocator* alloc) const {
// This renderTask doesn't have "normal" ops. In this case we still need to add an interval (so
// fEndOfOpsTaskOpIndices will remain in sync), so we create a fake op# to capture the fact that
- // we manipulate fTargetView's proxy.
- alloc->addInterval(fTargetView.proxy(), alloc->curOp(), alloc->curOp(),
+ // we manipulate our target's proxy.
+ SkASSERT(0 == this->numTargets());
+ auto fakeOp = alloc->curOp();
+ alloc->addInterval(fWaitedOn.proxy(), fakeOp, fakeOp,
GrResourceAllocator::ActualUse::kYes);
alloc->incOps();
}
diff --git a/src/gpu/GrWaitRenderTask.h b/src/gpu/GrWaitRenderTask.h
index 18e7157..4681f76 100644
--- a/src/gpu/GrWaitRenderTask.h
+++ b/src/gpu/GrWaitRenderTask.h
@@ -16,15 +16,14 @@
GrWaitRenderTask(GrSurfaceProxyView surfaceView,
std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores,
int numSemaphores)
- : GrRenderTask(std::move(surfaceView))
+ : GrRenderTask()
, fSemaphores(std::move(semaphores))
- , fNumSemaphores(numSemaphores) {}
+ , fNumSemaphores(numSemaphores)
+ , fWaitedOn(std::move(surfaceView)) {}
private:
bool onIsUsed(GrSurfaceProxy* proxy) const override {
- // This case should be handled by GrRenderTask.
- SkASSERT(proxy != fTargetView.proxy());
- return false;
+ return proxy == fWaitedOn.proxy();
}
void handleInternalAllocationFailure() override {}
void gatherProxyIntervals(GrResourceAllocator*) const override;
@@ -42,6 +41,11 @@
#endif
std::unique_ptr<std::unique_ptr<GrSemaphore>[]> fSemaphores;
int fNumSemaphores;
+
+ // This field is separate from the main "targets" field on GrRenderTask because this task
+ // does not actually write to the surface and so should not participate in the normal
+ // lastRenderTask tracking that written-to targets do.
+ GrSurfaceProxyView fWaitedOn;
};
#endif