Swap GrSurfaceProxy over to being held via sk_sp
This CL:
replaces GrProxyRef with sk_sp
streamlines GrIORefProxy to be more like SkRefCntBase (i.e., move the fTarget pointer to GrSurfaceProxy)
Change-Id: I17d515100bb2d9104eed64269bd3bf75c1ebbbb8
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/221997
Reviewed-by: Brian Salomon <bsalomon@google.com>
Commit-Queue: Robert Phillips <robertphillips@google.com>
diff --git a/src/gpu/GrFragmentProcessor.cpp b/src/gpu/GrFragmentProcessor.cpp
index 520bf43..0570763 100644
--- a/src/gpu/GrFragmentProcessor.cpp
+++ b/src/gpu/GrFragmentProcessor.cpp
@@ -428,7 +428,7 @@
void GrFragmentProcessor::TextureSampler::reset(sk_sp<GrTextureProxy> proxy,
const GrSamplerState& samplerState) {
- fProxyRef.setProxy(std::move(proxy));
+ fProxy = std::move(proxy);
fSamplerState = samplerState;
fSamplerState.setFilterMode(SkTMin(samplerState.filter(), this->proxy()->highestFilterMode()));
}
@@ -436,7 +436,7 @@
void GrFragmentProcessor::TextureSampler::reset(sk_sp<GrTextureProxy> proxy,
GrSamplerState::Filter filterMode,
GrSamplerState::WrapMode wrapXAndY) {
- fProxyRef.setProxy(std::move(proxy));
+ fProxy = std::move(proxy);
filterMode = SkTMin(filterMode, this->proxy()->highestFilterMode());
fSamplerState = GrSamplerState(wrapXAndY, filterMode);
}
diff --git a/src/gpu/GrFragmentProcessor.h b/src/gpu/GrFragmentProcessor.h
index 92ab9c0..44004c0 100644
--- a/src/gpu/GrFragmentProcessor.h
+++ b/src/gpu/GrFragmentProcessor.h
@@ -9,7 +9,6 @@
#define GrFragmentProcessor_DEFINED
#include "src/gpu/GrProcessor.h"
-#include "src/gpu/GrProxyRef.h"
#include "src/gpu/ops/GrOp.h"
class GrCoordTransform;
@@ -412,7 +411,7 @@
* in pending execution state.
*/
explicit TextureSampler(const TextureSampler& that)
- : fProxyRef(sk_ref_sp(that.fProxyRef.get()))
+ : fProxy(that.fProxy)
, fSamplerState(that.fSamplerState) {}
TextureSampler(sk_sp<GrTextureProxy>, const GrSamplerState&);
@@ -438,27 +437,23 @@
// 'instantiate' should only ever be called at flush time.
// TODO: this can go away once explicit allocation has stuck
bool instantiate(GrResourceProvider* resourceProvider) const {
- return fProxyRef.get()->isInstantiated();
+ return fProxy->isInstantiated();
}
// 'peekTexture' should only ever be called after a successful 'instantiate' call
GrTexture* peekTexture() const {
- SkASSERT(fProxyRef.get()->peekTexture());
- return fProxyRef.get()->peekTexture();
+ SkASSERT(fProxy->isInstantiated());
+ return fProxy->peekTexture();
}
- GrTextureProxy* proxy() const { return fProxyRef.get(); }
+ GrTextureProxy* proxy() const { return fProxy.get(); }
const GrSamplerState& samplerState() const { return fSamplerState; }
- bool isInitialized() const { return SkToBool(fProxyRef.get()); }
- /**
- * For internal use by GrFragmentProcessor.
- */
- const GrTextureProxyRef* proxyRef() const { return &fProxyRef; }
+ bool isInitialized() const { return SkToBool(fProxy.get()); }
private:
- GrTextureProxyRef fProxyRef;
- GrSamplerState fSamplerState;
+ sk_sp<GrTextureProxy> fProxy;
+ GrSamplerState fSamplerState;
};
//////////////////////////////////////////////////////////////////////////////
diff --git a/src/gpu/GrOpList.cpp b/src/gpu/GrOpList.cpp
index 9d85aa8..8b7bdb1 100644
--- a/src/gpu/GrOpList.cpp
+++ b/src/gpu/GrOpList.cpp
@@ -32,26 +32,26 @@
, fUniqueID(CreateUniqueID())
, fFlags(0) {
SkASSERT(fOpMemoryPool);
- fTarget.setProxy(std::move(surfaceProxy));
- fTarget.get()->setLastOpList(this);
+ fTarget = std::move(surfaceProxy);
+ fTarget->setLastOpList(this);
}
GrOpList::~GrOpList() {
- if (fTarget.get() && this == fTarget.get()->getLastOpList()) {
+ if (fTarget && this == fTarget->getLastOpList()) {
// Ensure the target proxy doesn't keep hold of a dangling back pointer.
- fTarget.get()->setLastOpList(nullptr);
+ fTarget->setLastOpList(nullptr);
}
}
// TODO: this can go away when explicit allocation has stuck
bool GrOpList::instantiate(GrResourceProvider* resourceProvider) {
- SkASSERT(fTarget.get()->isInstantiated());
+ SkASSERT(fTarget->isInstantiated());
return true;
}
void GrOpList::endFlush() {
- if (fTarget.get() && this == fTarget.get()->getLastOpList()) {
- fTarget.get()->setLastOpList(nullptr);
+ if (fTarget && this == fTarget->getLastOpList()) {
+ fTarget->setLastOpList(nullptr);
}
fTarget.reset();
@@ -148,7 +148,7 @@
}
#endif
-bool GrOpList::isInstantiated() const { return fTarget.get()->isInstantiated(); }
+bool GrOpList::isInstantiated() const { return fTarget->isInstantiated(); }
void GrOpList::closeThoseWhoDependOnMe(const GrCaps& caps) {
for (int i = 0; i < fDependents.count(); ++i) {
@@ -163,20 +163,19 @@
return false;
}
- GrSurfaceProxy* proxy = fTarget.get();
- bool needsStencil = proxy->asRenderTargetProxy()
- ? proxy->asRenderTargetProxy()->needsStencil()
+ bool needsStencil = fTarget->asRenderTargetProxy()
+ ? fTarget->asRenderTargetProxy()->needsStencil()
: false;
if (needsStencil) {
- GrRenderTarget* rt = proxy->peekRenderTarget();
+ GrRenderTarget* rt = fTarget->peekRenderTarget();
if (!rt->renderTargetPriv().getStencilAttachment()) {
return false;
}
}
- GrSurface* surface = proxy->peekSurface();
+ GrSurface* surface = fTarget->peekSurface();
if (surface->wasDestroyed()) {
return false;
}
@@ -192,9 +191,9 @@
void GrOpList::dump(bool printDependencies) const {
SkDebugf("--------------------------------------------------------------\n");
SkDebugf("opListID: %d - proxyID: %d - surfaceID: %d\n", fUniqueID,
- fTarget.get() ? fTarget.get()->uniqueID().asUInt() : -1,
- fTarget.get() && fTarget.get()->peekSurface()
- ? fTarget.get()->peekSurface()->uniqueID().asUInt()
+ fTarget ? fTarget->uniqueID().asUInt() : -1,
+ fTarget && fTarget->peekSurface()
+ ? fTarget->peekSurface()->uniqueID().asUInt()
: -1);
SkDebugf("ColorLoadOp: %s %x StencilLoadOp: %s\n",
op_to_name(fColorLoadOp),
diff --git a/src/gpu/GrOpList.h b/src/gpu/GrOpList.h
index 066582b..987ddb0 100644
--- a/src/gpu/GrOpList.h
+++ b/src/gpu/GrOpList.h
@@ -11,7 +11,6 @@
#include "include/core/SkRefCnt.h"
#include "include/private/SkColorData.h"
#include "include/private/SkTDArray.h"
-#include "src/gpu/GrProxyRef.h"
#include "src/gpu/GrTextureProxy.h"
class GrAuditTrail;
@@ -99,7 +98,7 @@
// In the DDL case, these back pointers keep the DDL's GrOpMemoryPool alive as long as its
// constituent opLists survive.
sk_sp<GrOpMemoryPool> fOpMemoryPool;
- GrSurfaceProxyRef fTarget;
+ sk_sp<GrSurfaceProxy> fTarget;
GrAuditTrail* fAuditTrail;
GrLoadOp fColorLoadOp = GrLoadOp::kLoad;
diff --git a/src/gpu/GrPrimitiveProcessor.h b/src/gpu/GrPrimitiveProcessor.h
index 196a2c0..4fa67f6 100644
--- a/src/gpu/GrPrimitiveProcessor.h
+++ b/src/gpu/GrPrimitiveProcessor.h
@@ -11,7 +11,6 @@
#include "src/gpu/GrColor.h"
#include "src/gpu/GrNonAtomicRef.h"
#include "src/gpu/GrProcessor.h"
-#include "src/gpu/GrProxyRef.h"
#include "src/gpu/GrShaderVar.h"
class GrCoordTransform;
diff --git a/src/gpu/GrProxyRef.h b/src/gpu/GrProxyRef.h
deleted file mode 100644
index 1048a49..0000000
--- a/src/gpu/GrProxyRef.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright 2018 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef GrProxyRef_DEFINED
-#define GrProxyRef_DEFINED
-
-#include "include/private/GrTypesPriv.h"
-#include "src/gpu/GrSurfaceProxy.h"
-#include "src/gpu/GrTextureProxy.h"
-
-/**
- * Helper for owning a ref on a GrSurfaceProxy.
- */
-template <typename T> class GrProxyRef {
-public:
- GrProxyRef() = default;
- GrProxyRef(const GrProxyRef&) = delete;
- GrProxyRef& operator=(const GrProxyRef&) = delete;
-
- GrProxyRef(sk_sp<T> proxy) { this->setProxy(std::move(proxy)); }
-
- ~GrProxyRef() { this->reset(); }
-
- void setProxy(sk_sp<T> proxy) {
- fProxy = std::move(proxy);
- }
-
- T* get() const { return fProxy.get(); }
-
- void reset() {
- fProxy = nullptr;
- }
-
-private:
- sk_sp<T> fProxy;
-};
-
-using GrSurfaceProxyRef = GrProxyRef<GrSurfaceProxy>;
-using GrTextureProxyRef = GrProxyRef<GrTextureProxy>;
-
-#endif
diff --git a/src/gpu/GrRenderTargetContext.h b/src/gpu/GrRenderTargetContext.h
index 6dd958f..11ba4cc 100644
--- a/src/gpu/GrRenderTargetContext.h
+++ b/src/gpu/GrRenderTargetContext.h
@@ -491,7 +491,9 @@
GrTextTarget* textTarget() { return fTextTarget.get(); }
- bool isWrapped_ForTesting() const;
+#if GR_TEST_UTILS
+ bool testingOnly_IsInstantiated() const { return fRenderTargetProxy->isInstantiated(); }
+#endif
protected:
GrRenderTargetContext(GrRecordingContext*, sk_sp<GrRenderTargetProxy>,
diff --git a/src/gpu/GrRenderTargetOpList.cpp b/src/gpu/GrRenderTargetOpList.cpp
index 3fd6d85..f07d77d 100644
--- a/src/gpu/GrRenderTargetOpList.cpp
+++ b/src/gpu/GrRenderTargetOpList.cpp
@@ -402,7 +402,7 @@
#endif
void GrRenderTargetOpList::onPrepare(GrOpFlushState* flushState) {
- SkASSERT(fTarget.get()->peekRenderTarget());
+ SkASSERT(fTarget->peekRenderTarget());
SkASSERT(this->isClosed());
#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
TRACE_EVENT0("skia", TRACE_FUNC);
@@ -416,7 +416,7 @@
#endif
GrOpFlushState::OpArgs opArgs = {
chain.head(),
- fTarget.get()->asRenderTargetProxy(),
+ fTarget->asRenderTargetProxy(),
chain.appliedClip(),
chain.dstProxy()
};
@@ -468,7 +468,7 @@
return false;
}
- SkASSERT(fTarget.get()->peekRenderTarget());
+ SkASSERT(fTarget->peekRenderTarget());
TRACE_EVENT0("skia", TRACE_FUNC);
// TODO: at the very least, we want the stencil store op to always be discard (at this
@@ -481,9 +481,9 @@
!flushState->gpu()->caps()->performStencilClearsAsDraws());
GrGpuRTCommandBuffer* commandBuffer = create_command_buffer(
flushState->gpu(),
- fTarget.get()->peekRenderTarget(),
- fTarget.get()->origin(),
- fTarget.get()->getBoundsRect(),
+ fTarget->peekRenderTarget(),
+ fTarget->origin(),
+ fTarget->getBoundsRect(),
fColorLoadOp,
fLoadClearColor,
fStencilLoadOp);
@@ -501,7 +501,7 @@
GrOpFlushState::OpArgs opArgs {
chain.head(),
- fTarget.get()->asRenderTargetProxy(),
+ fTarget->asRenderTargetProxy(),
chain.appliedClip(),
chain.dstProxy()
};
@@ -558,14 +558,14 @@
// modifying the stencil buffer we will need a more elaborate tracking system (skbug.com/7002).
// Additionally, if we previously recorded a wait op, we cannot delete the wait op. Until we
// track the wait ops separately from normal ops, we have to avoid clearing out any ops.
- if (this->isEmpty() || (!fTarget.get()->asRenderTargetProxy()->needsStencil() && !fHasWaitOp)) {
+ if (this->isEmpty() || (!fTarget->asRenderTargetProxy()->needsStencil() && !fHasWaitOp)) {
this->deleteOps();
fDeferredProxies.reset();
// If the opList is using a render target which wraps a vulkan command buffer, we can't do a
// clear load since we cannot change the render pass that we are using. Thus we fall back to
// making a clear op in this case.
- return !fTarget.get()->asRenderTargetProxy()->wrapsVkSecondaryCB();
+ return !fTarget->asRenderTargetProxy()->wrapsVkSecondaryCB();
}
// Could not empty the list, so an op must be added to handle the clear
@@ -669,7 +669,7 @@
const DstProxy* dstProxy, const GrCaps& caps) {
SkDEBUGCODE(op->validate();)
SkASSERT(processorAnalysis.requiresDstTexture() == (dstProxy && dstProxy->proxy()));
- SkASSERT(fTarget.get());
+ SkASSERT(fTarget);
// A closed GrOpList should never receive new/more ops
SkASSERT(!this->isClosed());
@@ -682,7 +682,7 @@
// 1) check every op
// 2) intersect with something
// 3) find a 'blocker'
- GR_AUDIT_TRAIL_ADD_OP(fAuditTrail, op.get(), fTarget.get()->uniqueID());
+ GR_AUDIT_TRAIL_ADD_OP(fAuditTrail, op.get(), fTarget->uniqueID());
GrOP_INFO("opList: %d Recording (%s, opID: %u)\n"
"\tBounds [L: %.2f, T: %.2f R: %.2f B: %.2f]\n",
this->uniqueID(),
diff --git a/src/gpu/GrRenderTargetProxy.cpp b/src/gpu/GrRenderTargetProxy.cpp
index 50e0c5c..c385d26 100644
--- a/src/gpu/GrRenderTargetProxy.cpp
+++ b/src/gpu/GrRenderTargetProxy.cpp
@@ -79,8 +79,9 @@
GrMipMapped::kNo, nullptr)) {
return false;
}
- SkASSERT(fTarget->asRenderTarget());
- SkASSERT(!fTarget->asTexture());
+
+ SkASSERT(this->peekRenderTarget());
+ SkASSERT(!this->peekTexture());
return true;
}
@@ -110,11 +111,12 @@
}
bool GrRenderTargetProxy::refsWrappedObjects() const {
- if (!fTarget) {
+ if (!this->isInstantiated()) {
return false;
}
- return fTarget->resourcePriv().refsWrappedObjects();
+ GrSurface* surface = this->peekSurface();
+ return surface->resourcePriv().refsWrappedObjects();
}
#ifdef SK_DEBUG
diff --git a/src/gpu/GrResourceAllocator.cpp b/src/gpu/GrResourceAllocator.cpp
index 82c8b82..67c80c6 100644
--- a/src/gpu/GrResourceAllocator.cpp
+++ b/src/gpu/GrResourceAllocator.cpp
@@ -498,15 +498,13 @@
unsigned int min = std::numeric_limits<unsigned int>::max();
unsigned int max = 0;
for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) {
- SkDebugf("{ %3d,%3d }: [%2d, %2d] - proxyRefs:%d surfaceRefs:%d R:%d W:%d\n",
+ SkDebugf("{ %3d,%3d }: [%2d, %2d] - proxyRefs:%d surfaceRefs:%d\n",
cur->proxy()->uniqueID().asUInt(),
cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1,
cur->start(),
cur->end(),
cur->proxy()->priv().getProxyRefCnt(),
- cur->proxy()->getBackingRefCnt_TestOnly(),
- cur->proxy()->getPendingReadCnt_TestOnly(),
- cur->proxy()->getPendingWriteCnt_TestOnly());
+ cur->proxy()->testingOnly_getBackingRefCnt());
min = SkTMin(min, cur->start());
max = SkTMax(max, cur->end());
}
diff --git a/src/gpu/GrSurfaceProxy.cpp b/src/gpu/GrSurfaceProxy.cpp
index 466f799..407a005 100644
--- a/src/gpu/GrSurfaceProxy.cpp
+++ b/src/gpu/GrSurfaceProxy.cpp
@@ -51,16 +51,6 @@
}
#endif
-#if GR_TEST_UTILS
-int32_t GrIORefProxy::getBackingRefCnt_TestOnly() const {
- if (fTarget) {
- return fTarget->fRefCnt;
- }
-
- return -1; // no backing GrSurface
-}
-#endif
-
// Lazy-callback version
GrSurfaceProxy::GrSurfaceProxy(LazyInstantiateCallback&& callback, LazyInstantiationType lazyType,
const GrBackendFormat& format, const GrSurfaceDesc& desc,
@@ -98,7 +88,7 @@
// Wrapped version
GrSurfaceProxy::GrSurfaceProxy(sk_sp<GrSurface> surface, GrSurfaceOrigin origin,
const GrSwizzle& textureSwizzle, SkBackingFit fit)
- : INHERITED(std::move(surface))
+ : fTarget(std::move(surface))
, fSurfaceFlags(fTarget->surfacePriv().flags())
, fFormat(fTarget->backendFormat())
, fConfig(fTarget->config())
@@ -406,6 +396,20 @@
budgeted);
}
+#if GR_TEST_UTILS
+int32_t GrSurfaceProxy::testingOnly_getBackingRefCnt() const {
+ if (fTarget) {
+ return fTarget->testingOnly_getRefCnt();
+ }
+
+ return -1; // no backing GrSurface
+}
+
+GrInternalSurfaceFlags GrSurfaceProxy::testingOnly_getFlags() const {
+ return fSurfaceFlags;
+}
+#endif
+
void GrSurfaceProxyPriv::exactify() {
SkASSERT(GrSurfaceProxy::LazyState::kFully != fProxy->lazyInstantiationState());
if (this->isExact()) {
diff --git a/src/gpu/GrSurfaceProxy.h b/src/gpu/GrSurfaceProxy.h
index cc19d93..0b289e3 100644
--- a/src/gpu/GrSurfaceProxy.h
+++ b/src/gpu/GrSurfaceProxy.h
@@ -28,45 +28,34 @@
class GrTextureOpList;
class GrTextureProxy;
-// This class replicates the functionality GrIORef<GrSurface> but tracks the
-// utilitization for later resource allocation (for the deferred case) and
-// forwards on the utilization in the wrapped case
+// This is basically SkRefCntBase except Ganesh uses internalGetProxyRefCnt for more than asserts.
class GrIORefProxy : public SkNoncopyable {
public:
+ GrIORefProxy() : fRefCnt(1) {}
+
+ virtual ~GrIORefProxy() {}
+
+ bool unique() const {
+ SkASSERT(fRefCnt > 0);
+ return 1 == fRefCnt;
+ }
+
void ref() const {
- SkASSERT(fRefCnt >= 1);
+ SkASSERT(fRefCnt > 0);
++fRefCnt;
}
void unref() const {
- SkASSERT(fRefCnt >= 1);
+ SkASSERT(fRefCnt > 0);
--fRefCnt;
if (0 == fRefCnt) {
delete this;
}
}
- bool unique() const {
- SkASSERT(fRefCnt >= 1);
- return 1 == fRefCnt;
- }
-
-#if GR_TEST_UTILS
- int32_t getBackingRefCnt_TestOnly() const;
-#endif
-
protected:
- GrIORefProxy() : fRefCnt(1) {}
- GrIORefProxy(sk_sp<GrSurface> surface) : fTarget(std::move(surface)), fRefCnt(1) {}
-
- virtual ~GrIORefProxy() {}
-
int32_t internalGetProxyRefCnt() const { return fRefCnt; }
- // For deferred proxies this will be null until the proxy is instantiated.
- // For wrapped proxies it will point to the wrapped resource.
- sk_sp<GrSurface> fTarget;
-
private:
mutable int32_t fRefCnt;
};
@@ -320,7 +309,10 @@
static sk_sp<GrTextureProxy> Copy(GrRecordingContext*, GrSurfaceProxy* src, GrMipMapped,
SkBackingFit, SkBudgeted);
- bool isWrapped_ForTesting() const;
+#if GR_TEST_UTILS
+ int32_t testingOnly_getBackingRefCnt() const;
+ GrInternalSurfaceFlags testingOnly_getFlags() const;
+#endif
SkDEBUGCODE(void validate(GrContext_Base*) const;)
@@ -328,15 +320,6 @@
inline GrSurfaceProxyPriv priv();
inline const GrSurfaceProxyPriv priv() const;
- /**
- * Provides privileged access to select callers to be able to add a ref to a GrSurfaceProxy
- * with zero refs.
- */
- class FirstRefAccess;
- inline FirstRefAccess firstRefAccess();
-
- GrInternalSurfaceFlags testingOnly_getFlags() const;
-
protected:
// Deferred version
GrSurfaceProxy(const GrBackendFormat& format, const GrSurfaceDesc& desc,
@@ -357,7 +340,7 @@
GrSurfaceProxy(sk_sp<GrSurface>, GrSurfaceOrigin, const GrSwizzle& textureSwizzle,
SkBackingFit);
- virtual ~GrSurfaceProxy();
+ ~GrSurfaceProxy() override;
friend class GrSurfaceProxyPriv;
@@ -389,6 +372,10 @@
bool instantiateImpl(GrResourceProvider* resourceProvider, int sampleCnt, bool needsStencil,
GrSurfaceDescFlags descFlags, GrMipMapped, const GrUniqueKey*);
+ // For deferred proxies this will be null until the proxy is instantiated.
+ // For wrapped proxies it will point to the wrapped resource.
+ sk_sp<GrSurface> fTarget;
+
// In many cases these flags aren't actually known until the proxy has been instantiated.
// However, Ganesh frequently needs to change its behavior based on these settings. For
// internally create proxies we will know these properties ahead of time. For wrapped
diff --git a/src/gpu/GrTextureOpList.cpp b/src/gpu/GrTextureOpList.cpp
index 732e6cb..86f8f60 100644
--- a/src/gpu/GrTextureOpList.cpp
+++ b/src/gpu/GrTextureOpList.cpp
@@ -74,7 +74,7 @@
#endif
void GrTextureOpList::onPrepare(GrOpFlushState* flushState) {
- SkASSERT(fTarget.get()->peekTexture());
+ SkASSERT(fTarget->peekTexture());
SkASSERT(this->isClosed());
// Loop over the ops that haven't yet generated their geometry
@@ -99,11 +99,11 @@
return false;
}
- SkASSERT(fTarget.get()->peekTexture());
+ SkASSERT(fTarget->peekTexture());
GrGpuTextureCommandBuffer* commandBuffer(
- flushState->gpu()->getCommandBuffer(fTarget.get()->peekTexture(),
- fTarget.get()->origin()));
+ flushState->gpu()->getCommandBuffer(fTarget->peekTexture(),
+ fTarget->origin()));
flushState->setCommandBuffer(commandBuffer);
for (int i = 0; i < fRecordedOps.count(); ++i) {
@@ -231,11 +231,11 @@
}
void GrTextureOpList::recordOp(std::unique_ptr<GrOp> op) {
- SkASSERT(fTarget.get());
+ SkASSERT(fTarget);
// A closed GrOpList should never receive new/more ops
SkASSERT(!this->isClosed());
- GR_AUDIT_TRAIL_ADD_OP(fAuditTrail, op.get(), fTarget.get()->uniqueID());
+ GR_AUDIT_TRAIL_ADD_OP(fAuditTrail, op.get(), fTarget->uniqueID());
GrOP_INFO("Re-Recording (%s, opID: %u)\n"
"\tBounds LRTB (%f, %f, %f, %f)\n",
op->name(),
diff --git a/src/gpu/GrTextureProxy.cpp b/src/gpu/GrTextureProxy.cpp
index 6a9fd26..4a66daf 100644
--- a/src/gpu/GrTextureProxy.cpp
+++ b/src/gpu/GrTextureProxy.cpp
@@ -76,8 +76,8 @@
return false;
}
- SkASSERT(!fTarget->asRenderTarget());
- SkASSERT(fTarget->asTexture());
+ SkASSERT(!this->peekRenderTarget());
+ SkASSERT(this->peekTexture());
return true;
}
@@ -102,7 +102,7 @@
void GrTextureProxyPriv::scheduleUpload(GrOpFlushState* flushState) {
// The texture proxy's contents may already have been uploaded or instantiation may have failed
- if (fTextureProxy->fDeferredUploader && fTextureProxy->fTarget) {
+ if (fTextureProxy->fDeferredUploader && fTextureProxy->isInstantiated()) {
fTextureProxy->fDeferredUploader->scheduleUpload(flushState, fTextureProxy);
}
}
diff --git a/src/gpu/GrTextureProxy.h b/src/gpu/GrTextureProxy.h
index a1e926b..251f6ad 100644
--- a/src/gpu/GrTextureProxy.h
+++ b/src/gpu/GrTextureProxy.h
@@ -57,13 +57,16 @@
*/
const GrUniqueKey& getUniqueKey() const {
#ifdef SK_DEBUG
- if (fTarget && fUniqueKey.isValid() && fSyncTargetKey) {
- SkASSERT(fTarget->getUniqueKey().isValid());
+ if (this->isInstantiated() && fUniqueKey.isValid() && fSyncTargetKey) {
+ GrSurface* surface = this->peekSurface();
+ SkASSERT(surface);
+
+ SkASSERT(surface->getUniqueKey().isValid());
// It is possible for a non-keyed proxy to have a uniquely keyed resource assigned to
// it. This just means that a future user of the resource will be filling it with unique
// data. However, if the proxy has a unique key its attached resource should also
// have that key.
- SkASSERT(fUniqueKey == fTarget->getUniqueKey());
+ SkASSERT(fUniqueKey == surface->getUniqueKey());
}
#endif
diff --git a/src/gpu/GrTextureRenderTargetProxy.cpp b/src/gpu/GrTextureRenderTargetProxy.cpp
index 8366e75..8c6e0fa 100644
--- a/src/gpu/GrTextureRenderTargetProxy.cpp
+++ b/src/gpu/GrTextureRenderTargetProxy.cpp
@@ -99,8 +99,8 @@
SkASSERT(key == this->getUniqueKey());
}
- SkASSERT(fTarget->asRenderTarget());
- SkASSERT(fTarget->asTexture());
+ SkASSERT(this->peekRenderTarget());
+ SkASSERT(this->peekTexture());
return true;
}