Mark mipmaps dirty from makeClosed() rather than opList constructors
Bug: skia:
Change-Id: Ia90877399777134e735ac6328fff629748ef49d3
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/235158
Commit-Queue: Chris Dalton <csmartdalton@google.com>
Reviewed-by: Robert Phillips <robertphillips@google.com>
Reviewed-by: Brian Salomon <bsalomon@google.com>
diff --git a/src/gpu/GrOpList.cpp b/src/gpu/GrOpList.cpp
index e80442e..2035dd5 100644
--- a/src/gpu/GrOpList.cpp
+++ b/src/gpu/GrOpList.cpp
@@ -29,17 +29,3 @@
fDeferredProxies.reset();
fAuditTrail = nullptr;
}
-
-#ifdef SK_DEBUG
-static const char* op_to_name(GrLoadOp op) {
- return GrLoadOp::kLoad == op ? "load" : GrLoadOp::kClear == op ? "clear" : "discard";
-}
-
-void GrOpList::dump(bool printDependencies) const {
- GrRenderTask::dump(printDependencies);
- SkDebugf("ColorLoadOp: %s %x StencilLoadOp: %s\n",
- op_to_name(fColorLoadOp),
- GrLoadOp::kClear == fColorLoadOp ? fLoadClearColor.toBytes_RGBA() : 0x0,
- op_to_name(fStencilLoadOp));
-}
-#endif
diff --git a/src/gpu/GrOpList.h b/src/gpu/GrOpList.h
index 4de0a21..f77e0a2 100644
--- a/src/gpu/GrOpList.h
+++ b/src/gpu/GrOpList.h
@@ -39,21 +39,12 @@
void endFlush() override;
- /*
- * Dump out the GrOpList dependency DAG
- */
- SkDEBUGCODE(void dump(bool printDependencies) const override;)
-
protected:
// This is a backpointer to the GrOpMemoryPool that holds the memory for this opLists' ops.
// In the DDL case, these back pointers keep the DDL's GrOpMemoryPool alive as long as its
// constituent opLists survive.
sk_sp<GrOpMemoryPool> fOpMemoryPool;
GrAuditTrail* fAuditTrail;
-
- GrLoadOp fColorLoadOp = GrLoadOp::kLoad;
- SkPMColor4f fLoadClearColor = SK_PMColor4fTRANSPARENT;
- GrLoadOp fStencilLoadOp = GrLoadOp::kLoad;
};
#endif
diff --git a/src/gpu/GrRenderTargetOpList.cpp b/src/gpu/GrRenderTargetOpList.cpp
index 932a818..ca39aa0 100644
--- a/src/gpu/GrRenderTargetOpList.cpp
+++ b/src/gpu/GrRenderTargetOpList.cpp
@@ -355,11 +355,6 @@
: INHERITED(std::move(opMemoryPool), std::move(proxy), auditTrail)
, fLastClipStackGenID(SK_InvalidUniqueID)
SkDEBUGCODE(, fNumClips(0)) {
- if (GrTextureProxy* textureProxy = fTarget->asTextureProxy()) {
- if (GrMipMapped::kYes == textureProxy->mipMapped()) {
- textureProxy->markMipMapsDirty();
- }
- }
fTarget->setLastRenderTask(this);
}
@@ -377,9 +372,18 @@
////////////////////////////////////////////////////////////////////////////////
#ifdef SK_DEBUG
+static const char* load_op_to_name(GrLoadOp op) {
+ return GrLoadOp::kLoad == op ? "load" : GrLoadOp::kClear == op ? "clear" : "discard";
+}
+
void GrRenderTargetOpList::dump(bool printDependencies) const {
INHERITED::dump(printDependencies);
+ SkDebugf("ColorLoadOp: %s %x StencilLoadOp: %s\n",
+ load_op_to_name(fColorLoadOp),
+ GrLoadOp::kClear == fColorLoadOp ? fLoadClearColor.toBytes_RGBA() : 0x0,
+ load_op_to_name(fStencilLoadOp));
+
SkDebugf("ops (%d):\n", fOpChains.count());
for (int i = 0; i < fOpChains.count(); ++i) {
SkDebugf("*******************************\n");
@@ -466,23 +470,7 @@
// is at flush time). However, we need to store the RenderTargetProxy in the
// Ops and instantiate them here.
bool GrRenderTargetOpList::onExecute(GrOpFlushState* flushState) {
- // TODO: Forcing the execution of the discard here isn't ideal since it will cause us to do a
- // discard and then store the data back in memory so that the load op on future draws doesn't
- // think the memory is unitialized. Ideally we would want a system where we are tracking whether
- // the proxy itself has valid data or not, and then use that as a signal on whether we should be
- // loading or discarding. In that world we wouldni;t need to worry about executing oplists with
- // no ops just to do a discard.
- if (fOpChains.empty() && GrLoadOp::kClear != fColorLoadOp &&
- GrLoadOp::kDiscard != fColorLoadOp) {
- // TEMPORARY: We are in the process of moving GrMipMapsStatus from the texture to the proxy.
- // During this time we want to assert that the proxy resolves mipmaps at the exact same
- // times the old code would have. A null opList is very exceptional, and the proxy will have
- // assumed mipmaps are dirty in this scenario. We mark them dirty here on the texture as
- // well, in order to keep the assert passing.
- GrTexture* tex = fTarget->peekTexture();
- if (tex && GrMipMapped::kYes == tex->texturePriv().mipMapped()) {
- tex->texturePriv().markMipMapsDirty();
- }
+ if (this->isNoOp()) {
return false;
}
diff --git a/src/gpu/GrRenderTargetOpList.h b/src/gpu/GrRenderTargetOpList.h
index a8119bb..efacbd0 100644
--- a/src/gpu/GrRenderTargetOpList.h
+++ b/src/gpu/GrRenderTargetOpList.h
@@ -37,16 +37,6 @@
~GrRenderTargetOpList() override;
- void makeClosed(const GrCaps& caps) override {
- if (this->isClosed()) {
- return;
- }
-
- this->forwardCombine(caps);
-
- INHERITED::makeClosed(caps);
- }
-
bool isEmpty() const { return fOpChains.empty(); }
/**
@@ -119,6 +109,20 @@
// however, requires that the RTC be able to coordinate with the op list to achieve similar ends
friend class GrRenderTargetContext;
+ bool isNoOp() const {
+ // TODO: GrLoadOp::kDiscard -> [empty opList] -> GrStoreOp::kStore should also be a no-op.
+ // We don't count it as a no-op right now because of Vulkan. There are real cases where we
+ // store a discard, and if we skip that render pass, then the next time we load the render
+ // target, Vulkan detects loading of uninitialized memory and complains. If we don't skip
+ // storing the discard, then we trick Vulkan and it doesn't notice us doing anything wrong.
+ // We should definitely address this issue properly.
+ //
+ // TODO: We should also consider stencil load/store here. We get away with it for now
+ // because we never discard stencil buffers.
+ return fOpChains.empty() && GrLoadOp::kClear != fColorLoadOp &&
+ GrLoadOp::kDiscard != fColorLoadOp;
+ }
+
bool onIsUsed(GrSurfaceProxy*) const override;
// Must only be called if native stencil buffer clearing is enabled
@@ -223,6 +227,15 @@
void forwardCombine(const GrCaps&);
+ ExpectedOutcome onMakeClosed(const GrCaps& caps) override {
+ this->forwardCombine(caps);
+ return (this->isNoOp()) ? ExpectedOutcome::kTargetUnchanged : ExpectedOutcome::kTargetDirty;
+ }
+
+ GrLoadOp fColorLoadOp = GrLoadOp::kLoad;
+ SkPMColor4f fLoadClearColor = SK_PMColor4fTRANSPARENT;
+ GrLoadOp fStencilLoadOp = GrLoadOp::kLoad;
+
uint32_t fLastClipStackGenID;
SkIRect fLastDevClipBounds;
int fLastClipNumAnalyticFPs;
@@ -231,7 +244,7 @@
bool fHasWaitOp = false;;
// For ops/opList we have mean: 5 stdDev: 28
- SkSTArray<25, OpChain, true> fOpChains;
+ SkSTArray<25, OpChain, true> fOpChains;
// MDB TODO: 4096 for the first allocation of the clip space will be huge overkill.
// Gather statistics to determine the correct size.
diff --git a/src/gpu/GrRenderTask.cpp b/src/gpu/GrRenderTask.cpp
index 9909e6c..0d54208 100644
--- a/src/gpu/GrRenderTask.cpp
+++ b/src/gpu/GrRenderTask.cpp
@@ -45,6 +45,22 @@
}
#endif
+void GrRenderTask::makeClosed(const GrCaps& caps) {
+ if (this->isClosed()) {
+ return;
+ }
+
+ if (ExpectedOutcome::kTargetDirty == this->onMakeClosed(caps)) {
+ GrTextureProxy* textureProxy = fTarget->asTextureProxy();
+ if (textureProxy && GrMipMapped::kYes == textureProxy->mipMapped()) {
+ textureProxy->markMipMapsDirty();
+ }
+ }
+
+ this->setFlag(kClosed_Flag);
+}
+
+
void GrRenderTask::prepare(GrOpFlushState* flushState) {
for (int i = 0; i < fDeferredProxies.count(); ++i) {
fDeferredProxies[i]->texPriv().scheduleUpload(flushState);
@@ -71,9 +87,27 @@
void GrRenderTask::addDependency(GrSurfaceProxy* dependedOn, GrMipMapped mipMapped,
GrTextureResolveManager textureResolveManager,
const GrCaps& caps) {
- GrRenderTask* dependedOnTask = dependedOn->getLastRenderTask();
- GrTextureProxy* textureProxy = dependedOn->asTextureProxy();
+ // If it is still receiving dependencies, this GrRenderTask shouldn't be closed
+ SkASSERT(!this->isClosed());
+ GrRenderTask* dependedOnTask = dependedOn->getLastRenderTask();
+
+ if (dependedOnTask == this) {
+ // self-read - presumably for dst reads. We can't make it closed in the self-read case.
+ SkASSERT(GrMipMapped::kNo == mipMapped);
+ SkASSERT(!dependedOn->asTextureProxy() ||
+ !dependedOn->asTextureProxy()->texPriv().isDeferred());
+ return;
+ }
+
+ if (dependedOnTask) {
+ // We are closing 'dependedOnTask' here bc the current contents of it are what 'this'
+ // renderTask depends on. We need a break in 'dependedOnTask' so that the usage of
+ // that state has a chance to execute.
+ dependedOnTask->makeClosed(caps);
+ }
+
+ GrTextureProxy* textureProxy = dependedOn->asTextureProxy();
if (GrMipMapped::kYes == mipMapped) {
SkASSERT(textureProxy);
if (GrMipMapped::kYes != textureProxy->mipMapped()) {
@@ -85,9 +119,6 @@
// Does this proxy have mipmaps that need to be regenerated?
if (GrMipMapped::kYes == mipMapped && textureProxy->mipMapsAreDirty()) {
- // We only read our own target during dst reads, and we shouldn't use mipmaps in that case.
- SkASSERT(dependedOnTask != this);
-
// Create an opList that resolves the texture's mipmap data.
GrRenderTask* textureResolveTask = textureResolveManager.newTextureResolveRenderTask(
sk_ref_sp(textureProxy), GrTextureResolveFlags::kMipMaps, caps);
@@ -98,10 +129,11 @@
SkASSERT(!textureProxy->texPriv().isDeferred() ||
textureResolveTask->fDeferredProxies.back() == textureProxy);
- // The GrTextureResolveRenderTask factory should have also marked the mipmaps clean and set
- // the last opList on the textureProxy to textureResolveTask.
+ // The GrTextureResolveRenderTask factory should have also marked the mipmaps clean, set the
+ // last opList on the textureProxy to textureResolveTask, and closed textureResolveTask.
SkASSERT(!textureProxy->mipMapsAreDirty());
SkASSERT(textureProxy->getLastRenderTask() == textureResolveTask);
+ SkASSERT(textureResolveTask->isClosed());
// Fall through and add textureResolveTask as a dependency of "this".
dependedOnTask = textureResolveTask;
@@ -110,19 +142,7 @@
}
if (dependedOnTask) {
- // If it is still receiving dependencies, this GrRenderTask shouldn't be closed
- SkASSERT(!this->isClosed());
-
- if (dependedOnTask == this) {
- // self-read - presumably for dst reads. We can't make it closed in the self-read case.
- } else {
- this->addDependency(dependedOnTask);
-
- // We are closing 'dependedOnTask' here bc the current contents of it are what 'this'
- // dependedOnTask depends on. We need a break in 'dependedOnTask' so that the usage of
- // that state has a chance to execute.
- dependedOnTask->makeClosed(caps);
- }
+ this->addDependency(dependedOnTask);
}
}
diff --git a/src/gpu/GrRenderTask.h b/src/gpu/GrRenderTask.h
index 6b4edc8..044817a 100644
--- a/src/gpu/GrRenderTask.h
+++ b/src/gpu/GrRenderTask.h
@@ -28,16 +28,12 @@
GrRenderTask(sk_sp<GrSurfaceProxy> target);
~GrRenderTask() override;
+ void makeClosed(const GrCaps&);
+
// These two methods are only invoked at flush time
void prepare(GrOpFlushState* flushState);
bool execute(GrOpFlushState* flushState) { return this->onExecute(flushState); }
- virtual void makeClosed(const GrCaps&) {
- if (!this->isClosed()) {
- this->setFlag(kClosed_Flag);
- }
- }
-
// Called when this class will survive a flush and needs to truncate its ops and start over.
// TODO: ultimately it should be invalid for an op list to survive a flush.
// https://bugs.chromium.org/p/skia/issues/detail?id=7111
@@ -82,6 +78,13 @@
SkDEBUGCODE(bool deferredProxiesAreInstantiated() const;)
+ enum class ExpectedOutcome : bool {
+ kTargetUnchanged,
+ kTargetDirty,
+ };
+
+ virtual ExpectedOutcome onMakeClosed(const GrCaps&) = 0;
+
sk_sp<GrSurfaceProxy> fTarget;
// List of texture proxies whose contents are being prepared on a worker thread
diff --git a/src/gpu/GrTextureOpList.cpp b/src/gpu/GrTextureOpList.cpp
index 5b346d4..5a9e02c 100644
--- a/src/gpu/GrTextureOpList.cpp
+++ b/src/gpu/GrTextureOpList.cpp
@@ -25,12 +25,9 @@
GrTextureOpList::GrTextureOpList(sk_sp<GrOpMemoryPool> opMemoryPool,
sk_sp<GrTextureProxy> proxy,
GrAuditTrail* auditTrail)
- : INHERITED(std::move(opMemoryPool), proxy, auditTrail) {
+ : INHERITED(std::move(opMemoryPool), std::move(proxy), auditTrail) {
SkASSERT(fOpMemoryPool);
- SkASSERT(!proxy->readOnly());
- if (GrMipMapped::kYes == proxy->mipMapped()) {
- proxy->markMipMapsDirty();
- }
+ SkASSERT(!fTarget->readOnly());
fTarget->setLastRenderTask(this);
}
@@ -101,16 +98,7 @@
}
bool GrTextureOpList::onExecute(GrOpFlushState* flushState) {
- if (0 == fRecordedOps.count()) {
- // TEMPORARY: We are in the process of moving GrMipMapsStatus from the texture to the proxy.
- // During this time we want to assert that the proxy resolves mipmaps at the exact same
- // times the old code would have. A null opList is very exceptional, and the proxy will have
- // assumed mipmaps are dirty in this scenario. We mark them dirty here on the texture as
- // well, in order to keep the assert passing.
- GrTexture* tex = fTarget->peekTexture();
- if (tex && GrMipMapped::kYes == tex->texturePriv().mipMapped()) {
- tex->texturePriv().markMipMapsDirty();
- }
+ if (fRecordedOps.empty()) {
return false;
}
diff --git a/src/gpu/GrTextureOpList.h b/src/gpu/GrTextureOpList.h
index 756a390..7029b75 100644
--- a/src/gpu/GrTextureOpList.h
+++ b/src/gpu/GrTextureOpList.h
@@ -59,6 +59,11 @@
void recordOp(std::unique_ptr<GrOp>);
+ ExpectedOutcome onMakeClosed(const GrCaps&) override {
+ return (fRecordedOps.empty()) ?
+ ExpectedOutcome::kTargetUnchanged : ExpectedOutcome::kTargetDirty;
+ }
+
// The memory for the ops in 'fOpChains' is actually stored in 'fOpMemoryPool'
SkSTArray<2, std::unique_ptr<GrOp>, true> fRecordedOps;
diff --git a/src/gpu/GrTextureResolveRenderTask.h b/src/gpu/GrTextureResolveRenderTask.h
index eadcdca..00d835c 100644
--- a/src/gpu/GrTextureResolveRenderTask.h
+++ b/src/gpu/GrTextureResolveRenderTask.h
@@ -29,6 +29,11 @@
}
void handleInternalAllocationFailure() override {}
void gatherProxyIntervals(GrResourceAllocator*) const override;
+
+ ExpectedOutcome onMakeClosed(const GrCaps&) override {
+ return ExpectedOutcome::kTargetUnchanged;
+ }
+
bool onExecute(GrOpFlushState*) override;
const GrTextureResolveFlags fResolveFlags;
diff --git a/src/gpu/GrTransferFromRenderTask.h b/src/gpu/GrTransferFromRenderTask.h
index 2ad02e8..d30ecc4 100644
--- a/src/gpu/GrTransferFromRenderTask.h
+++ b/src/gpu/GrTransferFromRenderTask.h
@@ -35,6 +35,11 @@
// If fSrcProxy is uninstantiated at flush time we simply will skip doing the transfer.
void handleInternalAllocationFailure() override {}
void gatherProxyIntervals(GrResourceAllocator*) const override;
+
+ ExpectedOutcome onMakeClosed(const GrCaps&) override {
+ return ExpectedOutcome::kTargetUnchanged;
+ }
+
bool onExecute(GrOpFlushState*) override;
sk_sp<GrSurfaceProxy> fSrcProxy;