Hoist GrVkResource up so it can be used for D3D and Metal.
Bug: skia:9935
Change-Id: Ie13b9077c5db805020973e5cbab1aa8468c88742
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/276214
Reviewed-by: Greg Daniel <egdaniel@google.com>
Commit-Queue: Jim Van Verth <jvanverth@google.com>
diff --git a/src/gpu/GrManagedResource.cpp b/src/gpu/GrManagedResource.cpp
new file mode 100644
index 0000000..df76a45
--- /dev/null
+++ b/src/gpu/GrManagedResource.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2020 Google LLC
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "src/gpu/GrManagedResource.h"
+
+#include "src/gpu/GrGpuResourcePriv.h"
+#include "src/gpu/GrTexture.h"
+
+void GrTextureResource::addIdleProc(GrTexture* owningTexture,
+ sk_sp<GrRefCntedCallback> idleProc) const {
+ SkASSERT(!fOwningTexture || fOwningTexture == owningTexture);
+ fOwningTexture = owningTexture;
+ fIdleProcs.push_back(std::move(idleProc));
+}
+
+int GrTextureResource::idleProcCnt() const { return fIdleProcs.count(); }
+
+sk_sp<GrRefCntedCallback> GrTextureResource::idleProc(int i) const { return fIdleProcs[i]; }
+
+void GrTextureResource::resetIdleProcs() const { fIdleProcs.reset(); }
+
+void GrTextureResource::removeOwningTexture() const { fOwningTexture = nullptr; }
+
+void GrTextureResource::notifyQueuedForWorkOnGpu() const { ++fNumOwners; }
+
+void GrTextureResource::notifyFinishedWithWorkOnGpu() const {
+ SkASSERT(fNumOwners);
+ if (--fNumOwners || !fIdleProcs.count()) {
+ return;
+ }
+ if (fOwningTexture) {
+ if (fOwningTexture->resourcePriv().hasRef()) {
+ // Wait for the texture to become idle in the cache to call the procs.
+ return;
+ }
+ fOwningTexture->callIdleProcsOnBehalfOfResource();
+ } else {
+ fIdleProcs.reset();
+ }
+}
diff --git a/src/gpu/GrManagedResource.h b/src/gpu/GrManagedResource.h
new file mode 100644
index 0000000..94fd06a
--- /dev/null
+++ b/src/gpu/GrManagedResource.h
@@ -0,0 +1,270 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrManagedResource_DEFINED
+#define GrManagedResource_DEFINED
+
+#include "include/private/GrTypesPriv.h"
+#include "include/private/SkMutex.h"
+#include "include/private/SkTHash.h"
+#include "include/utils/SkRandom.h"
+#include <atomic>
+
+class GrGpu;
+class GrTexture;
+
+// uncomment to enable tracing of resource refs
+#ifdef SK_DEBUG
+#define SK_TRACE_MANAGED_RESOURCES
+#endif
+
+/** \class GrManagedResource
+
+ GrManagedResource is the base class for GPU resources that may be shared by
+ multiple objects, in particular objects that are tracked by a command buffer.
+ When an existing owner wants to share a reference, it calls ref().
+ When an owner wants to release its reference, it calls unref(). When the
+ shared object's reference count goes to zero as the result of an unref()
+ call, its (virtual) destructor is called. It is an error for the
+ destructor to be called explicitly (or via the object going out of scope on
+ the stack or calling delete) if getRefCnt() > 1.
+
+ This is nearly identical to SkRefCntBase. The exceptions are that unref()
+ takes a GrGpu, and any derived classes must implement freeGPUData().
+*/
+
+class GrManagedResource : SkNoncopyable {
+public:
+ // Simple refCount tracing, to ensure that everything ref'ed is unref'ed.
+#ifdef SK_TRACE_MANAGED_RESOURCES
+ struct Hash {
+ uint32_t operator()(const GrManagedResource* const& r) const {
+ SkASSERT(r);
+ return r->fKey;
+ }
+ };
+
+ class Trace {
+ public:
+ ~Trace() {
+ fHashSet.foreach([](const GrManagedResource* r) {
+ r->dumpInfo();
+ });
+ SkASSERT(0 == fHashSet.count());
+ }
+
+ void add(const GrManagedResource* r) {
+ SkAutoMutexExclusive locked(fLock);
+ fHashSet.add(r);
+ }
+
+ void remove(const GrManagedResource* r) {
+ SkAutoMutexExclusive locked(fLock);
+ fHashSet.remove(r);
+ }
+
+ private:
+ SkMutex fLock;
+ SkTHashSet<const GrManagedResource*, GrManagedResource::Hash> fHashSet SK_GUARDED_BY(fLock);
+ };
+
+ static std::atomic<uint32_t> fKeyCounter;
+#endif
+
+ /** Default construct, initializing the reference count to 1.
+ */
+ GrManagedResource() : fRefCnt(1) {
+#ifdef SK_TRACE_MANAGED_RESOURCES
+ fKey = fKeyCounter.fetch_add(+1, std::memory_order_relaxed);
+ GetTrace()->add(this);
+#endif
+ }
+
+ /** Destruct, asserting that the reference count is 1.
+ */
+ virtual ~GrManagedResource() {
+#ifdef SK_DEBUG
+ auto count = this->getRefCnt();
+ SkASSERTF(count == 1, "fRefCnt was %d", count);
+ fRefCnt.store(0); // illegal value, to catch us if we reuse after delete
+#endif
+ }
+
+#ifdef SK_DEBUG
+ /** Return the reference count. Use only for debugging. */
+ int32_t getRefCnt() const { return fRefCnt.load(); }
+#endif
+
+ /** May return true if the caller is the only owner.
+ * Ensures that all previous owner's actions are complete.
+ */
+ bool unique() const {
+ // The acquire barrier is only really needed if we return true. It
+ // prevents code conditioned on the result of unique() from running
+ // until previous owners are all totally done calling unref().
+ return 1 == fRefCnt.load(std::memory_order_acquire);
+ }
+
+ /** Increment the reference count.
+ Must be balanced by a call to unref() or unrefAndFreeResources().
+ */
+ void ref() const {
+ // No barrier required.
+ SkDEBUGCODE(int newRefCount = )fRefCnt.fetch_add(+1, std::memory_order_relaxed);
+ SkASSERT(newRefCount >= 1);
+ }
+
+ /** Decrement the reference count. If the reference count is 1 before the
+ decrement, then delete the object. Note that if this is the case, then
+ the object needs to have been allocated via new, and not on the stack.
+ Any GPU data associated with this resource will be freed before it's deleted.
+ */
+ void unref(GrGpu* gpu) const {
+ SkASSERT(gpu);
+ // A release here acts in place of all releases we "should" have been doing in ref().
+ int newRefCount = fRefCnt.fetch_add(-1, std::memory_order_acq_rel);
+ SkASSERT(newRefCount >= 0);
+ if (newRefCount == 1) {
+ // Like unique(), the acquire is only needed on success, to make sure
+ // code in internal_dispose() doesn't happen before the decrement.
+ this->internal_dispose(gpu);
+ }
+ }
+
+ // Called every time this resource is queued for use on the GPU (typically because
+ // it was added to a command buffer).
+ virtual void notifyQueuedForWorkOnGpu() const {}
+ // Called every time this resource has finished its use on the GPU (typically because
+ // the command buffer finished execution on the GPU.)
+ virtual void notifyFinishedWithWorkOnGpu() const {}
+
+#ifdef SK_DEBUG
+ void validate() const {
+ SkASSERT(this->getRefCnt() > 0);
+ }
+#endif
+
+#ifdef SK_TRACE_MANAGED_RESOURCES
+ /** Output a human-readable dump of this resource's information
+ */
+ virtual void dumpInfo() const = 0;
+#endif
+
+private:
+#ifdef SK_TRACE_MANAGED_RESOURCES
+ static Trace* GetTrace() {
+ static Trace kTrace;
+ return &kTrace;
+ }
+#endif
+
+ /** Must be implemented by any subclasses.
+ * Deletes any GPU data associated with this resource
+ */
+ virtual void freeGPUData(GrGpu* gpu) const = 0;
+
+ /**
+ * Called when the ref count goes to 0. Will free GPU resources.
+ */
+ void internal_dispose(GrGpu* gpu) const {
+ this->freeGPUData(gpu);
+#ifdef SK_TRACE_MANAGED_RESOURCES
+ GetTrace()->remove(this);
+#endif
+
+#ifdef SK_DEBUG
+ SkASSERT(0 == this->getRefCnt());
+ fRefCnt.store(1);
+#endif
+ delete this;
+ }
+
+ mutable std::atomic<int32_t> fRefCnt;
+#ifdef SK_TRACE_MANAGED_RESOURCES
+ uint32_t fKey;
+#endif
+
+ typedef SkNoncopyable INHERITED;
+};
+
+// This subclass allows for recycling
+class GrRecycledResource : public GrManagedResource {
+public:
+ // When recycle is called and there is only one ref left on the resource, we will signal that
+ // the resource can be recycled for reuse. If the subclass (or whoever is managing this resource)
+ // decides not to recycle the objects, it is their responsibility to call unref on the object.
+ void recycle(GrGpu* gpu) const {
+ if (this->unique()) {
+ this->onRecycle(gpu);
+ } else {
+ this->unref(gpu);
+ }
+ }
+
+private:
+ virtual void onRecycle(GrGpu* gpu) const = 0;
+};
+
+/** \class GrTextureResource
+
+ GrTextureResource is the base class for managed texture resources, and implements the
+ basic idleProc and releaseProc functionality for them.
+
+*/
+class GrTextureResource : public GrManagedResource {
+public:
+ GrTextureResource() {}
+
+ ~GrTextureResource() override {
+ SkASSERT(!fReleaseHelper);
+ }
+
+ void setRelease(sk_sp<GrRefCntedCallback> releaseHelper) {
+ fReleaseHelper = std::move(releaseHelper);
+ }
+
+ /**
+ * These are used to coordinate calling the "finished" idle procs between the GrTexture
+ * and the GrTextureResource. If the GrTexture becomes purgeable and if there are no command
+ * buffers referring to the GrTextureResource then it calls the procs. Otherwise, the
+ * GrTextureResource calls them when the last command buffer reference goes away and the
+ * GrTexture is purgeable.
+ */
+ void addIdleProc(GrTexture*, sk_sp<GrRefCntedCallback>) const;
+ int idleProcCnt() const;
+ sk_sp<GrRefCntedCallback> idleProc(int) const;
+ void resetIdleProcs() const;
+ void removeOwningTexture() const;
+
+ /**
+ * We track how many outstanding references this GrTextureResource has in command buffers and
+ * when the count reaches zero we call the idle proc.
+ */
+ void notifyQueuedForWorkOnGpu() const override;
+ void notifyFinishedWithWorkOnGpu() const override;
+ bool isQueuedForWorkOnGpu() const { return fNumOwners > 0; }
+
+protected:
+ mutable sk_sp<GrRefCntedCallback> fReleaseHelper;
+ mutable GrTexture* fOwningTexture = nullptr;
+
+ void invokeReleaseProc() const {
+ if (fReleaseHelper) {
+ // Depending on the ref count of fReleaseHelper this may or may not actually trigger
+ // the ReleaseProc to be called.
+ fReleaseHelper.reset();
+ }
+ }
+
+private:
+ mutable int fNumOwners = 0;
+ mutable SkTArray<sk_sp<GrRefCntedCallback>> fIdleProcs;
+
+ typedef GrManagedResource INHERITED;
+};
+
+#endif
diff --git a/src/gpu/GrTexture.h b/src/gpu/GrTexture.h
index 7045a0e..b22cfde 100644
--- a/src/gpu/GrTexture.h
+++ b/src/gpu/GrTexture.h
@@ -84,6 +84,7 @@
// We're about to be idle in the resource cache. Do our part to trigger the idle callbacks.
fIdleProcs.reset();
}
+ virtual void callIdleProcsOnBehalfOfResource() {}
void computeScratchKey(GrScratchKey*) const override;
private:
@@ -95,6 +96,7 @@
GrMipMapsStatus fMipMapsStatus;
int fMaxMipMapLevel;
friend class GrTexturePriv;
+ friend class GrTextureResource;
typedef GrSurface INHERITED;
};
diff --git a/src/gpu/vk/GrVkBuffer.cpp b/src/gpu/vk/GrVkBuffer.cpp
index 2163f9a..c608976 100644
--- a/src/gpu/vk/GrVkBuffer.cpp
+++ b/src/gpu/vk/GrVkBuffer.cpp
@@ -104,11 +104,12 @@
&bufferMemoryBarrier);
}
-void GrVkBuffer::Resource::freeGPUData(GrVkGpu* gpu) const {
+void GrVkBuffer::Resource::freeGPUData(GrGpu* gpu) const {
SkASSERT(fBuffer);
SkASSERT(fAlloc.fMemory);
- VK_CALL(gpu, DestroyBuffer(gpu->device(), fBuffer, nullptr));
- GrVkMemory::FreeBufferMemory(gpu, fType, fAlloc);
+ GrVkGpu* vkGpu = (GrVkGpu*)gpu;
+ VK_CALL(vkGpu, DestroyBuffer(vkGpu->device(), fBuffer, nullptr));
+ GrVkMemory::FreeBufferMemory(vkGpu, fType, fAlloc);
}
void GrVkBuffer::vkRelease(const GrVkGpu* gpu) {
diff --git a/src/gpu/vk/GrVkBuffer.h b/src/gpu/vk/GrVkBuffer.h
index 9181050..89f9708 100644
--- a/src/gpu/vk/GrVkBuffer.h
+++ b/src/gpu/vk/GrVkBuffer.h
@@ -9,7 +9,7 @@
#define GrVkBuffer_DEFINED
#include "include/gpu/vk/GrVkTypes.h"
-#include "src/gpu/vk/GrVkResource.h"
+#include "src/gpu/GrManagedResource.h"
class GrVkGpu;
@@ -27,7 +27,7 @@
VkBuffer buffer() const { return fResource->fBuffer; }
const GrVkAlloc& alloc() const { return fResource->fAlloc; }
- const GrVkRecycledResource* resource() const { return fResource; }
+ const GrRecycledResource* resource() const { return fResource; }
size_t size() const { return fDesc.fSizeInBytes; }
VkDeviceSize offset() const { return fOffset; }
@@ -54,12 +54,12 @@
bool fDynamic;
};
- class Resource : public GrVkRecycledResource {
+ class Resource : public GrRecycledResource {
public:
Resource(VkBuffer buf, const GrVkAlloc& alloc, Type type)
: INHERITED(), fBuffer(buf), fAlloc(alloc), fType(type) {}
-#ifdef SK_TRACE_VK_RESOURCES
+#ifdef SK_TRACE_MANAGED_RESOURCES
void dumpInfo() const override {
SkDebugf("GrVkBuffer: %d (%d refs)\n", fBuffer, this->getRefCnt());
}
@@ -69,11 +69,11 @@
Type fType;
private:
- void freeGPUData(GrVkGpu* gpu) const override;
+ void freeGPUData(GrGpu* gpu) const override;
- void onRecycle(GrVkGpu* gpu) const override { this->unref(gpu); }
+ void onRecycle(GrGpu* gpu) const override { this->unref(gpu); }
- typedef GrVkRecycledResource INHERITED;
+ typedef GrRecycledResource INHERITED;
};
// convenience routine for raw buffer creation
diff --git a/src/gpu/vk/GrVkCommandBuffer.cpp b/src/gpu/vk/GrVkCommandBuffer.cpp
index 696a5307..12504fb 100644
--- a/src/gpu/vk/GrVkCommandBuffer.cpp
+++ b/src/gpu/vk/GrVkCommandBuffer.cpp
@@ -40,7 +40,7 @@
}
}
-void GrVkCommandBuffer::freeGPUData(GrVkGpu* gpu, VkCommandPool cmdPool) const {
+void GrVkCommandBuffer::freeGPUData(GrGpu* gpu, VkCommandPool cmdPool) const {
TRACE_EVENT0("skia.gpu", TRACE_FUNC);
SkASSERT(!fIsActive);
SkASSERT(!fTrackedResources.count());
@@ -48,9 +48,10 @@
SkASSERT(cmdPool != VK_NULL_HANDLE);
SkASSERT(!this->isWrapped());
- GR_VK_CALL(gpu->vkInterface(), FreeCommandBuffers(gpu->device(), cmdPool, 1, &fCmdBuffer));
+ GrVkGpu* vkGpu = (GrVkGpu*)gpu;
+ GR_VK_CALL(vkGpu->vkInterface(), FreeCommandBuffers(vkGpu->device(), cmdPool, 1, &fCmdBuffer));
- this->onFreeGPUData(gpu);
+ this->onFreeGPUData(vkGpu);
}
void GrVkCommandBuffer::releaseResources(GrVkGpu* gpu) {
@@ -58,11 +59,11 @@
SkDEBUGCODE(fResourcesReleased = true;)
SkASSERT(!fIsActive);
for (int i = 0; i < fTrackedResources.count(); ++i) {
- fTrackedResources[i]->notifyRemovedFromCommandBuffer();
+ fTrackedResources[i]->notifyFinishedWithWorkOnGpu();
fTrackedResources[i]->unref(gpu);
}
for (int i = 0; i < fTrackedRecycledResources.count(); ++i) {
- fTrackedRecycledResources[i]->notifyRemovedFromCommandBuffer();
+ fTrackedRecycledResources[i]->notifyFinishedWithWorkOnGpu();
fTrackedRecycledResources[i]->recycle(const_cast<GrVkGpu*>(gpu));
}
@@ -87,7 +88,7 @@
////////////////////////////////////////////////////////////////////////////////
void GrVkCommandBuffer::pipelineBarrier(const GrVkGpu* gpu,
- const GrVkResource* resource,
+ const GrManagedResource* resource,
VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask,
bool byRegion,
@@ -643,10 +644,10 @@
}
void GrVkPrimaryCommandBuffer::blitImage(const GrVkGpu* gpu,
- const GrVkResource* srcResource,
+ const GrManagedResource* srcResource,
VkImage srcImage,
VkImageLayout srcLayout,
- const GrVkResource* dstResource,
+ const GrManagedResource* dstResource,
VkImage dstImage,
VkImageLayout dstLayout,
uint32_t blitRegionCount,
diff --git a/src/gpu/vk/GrVkCommandBuffer.h b/src/gpu/vk/GrVkCommandBuffer.h
index 9a14ab4..37c985d 100644
--- a/src/gpu/vk/GrVkCommandBuffer.h
+++ b/src/gpu/vk/GrVkCommandBuffer.h
@@ -9,8 +9,8 @@
#define GrVkCommandBuffer_DEFINED
#include "include/gpu/vk/GrVkTypes.h"
+#include "src/gpu/GrManagedResource.h"
#include "src/gpu/vk/GrVkGpu.h"
-#include "src/gpu/vk/GrVkResource.h"
#include "src/gpu/vk/GrVkSemaphore.h"
#include "src/gpu/vk/GrVkUtil.h"
@@ -40,7 +40,7 @@
};
void pipelineBarrier(const GrVkGpu* gpu,
- const GrVkResource* resource,
+ const GrManagedResource* resource,
VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask,
bool byRegion,
@@ -96,24 +96,24 @@
// Add ref-counted resource that will be tracked and released when this command buffer finishes
// execution
- void addResource(const GrVkResource* resource) {
+ void addResource(const GrManagedResource* resource) {
SkASSERT(resource);
resource->ref();
- resource->notifyAddedToCommandBuffer();
+ resource->notifyQueuedForWorkOnGpu();
fTrackedResources.append(1, &resource);
}
// Add ref-counted resource that will be tracked and released when this command buffer finishes
// execution. When it is released, it will signal that the resource can be recycled for reuse.
- void addRecycledResource(const GrVkRecycledResource* resource) {
+ void addRecycledResource(const GrRecycledResource* resource) {
resource->ref();
- resource->notifyAddedToCommandBuffer();
+ resource->notifyQueuedForWorkOnGpu();
fTrackedRecycledResources.append(1, &resource);
}
void releaseResources(GrVkGpu* gpu);
- void freeGPUData(GrVkGpu* gpu, VkCommandPool pool) const;
+ void freeGPUData(GrGpu* gpu, VkCommandPool pool) const;
bool hasWork() const { return fHasWork; }
@@ -132,8 +132,8 @@
void submitPipelineBarriers(const GrVkGpu* gpu);
- SkTDArray<const GrVkResource*> fTrackedResources;
- SkTDArray<const GrVkRecycledResource*> fTrackedRecycledResources;
+ SkTDArray<const GrManagedResource*> fTrackedResources;
+ SkTDArray<const GrRecycledResource*> fTrackedRecycledResources;
// Tracks whether we are in the middle of a command buffer begin/end calls and thus can add
// new commands to the buffer;
@@ -232,10 +232,10 @@
const VkImageCopy* copyRegions);
void blitImage(const GrVkGpu* gpu,
- const GrVkResource* srcResource,
+ const GrManagedResource* srcResource,
VkImage srcImage,
VkImageLayout srcLayout,
- const GrVkResource* dstResource,
+ const GrManagedResource* dstResource,
VkImage dstImage,
VkImageLayout dstLayout,
uint32_t blitRegionCount,
diff --git a/src/gpu/vk/GrVkCommandPool.cpp b/src/gpu/vk/GrVkCommandPool.cpp
index 95b90d9..adc25a6 100644
--- a/src/gpu/vk/GrVkCommandPool.cpp
+++ b/src/gpu/vk/GrVkCommandPool.cpp
@@ -86,19 +86,20 @@
fPrimaryCommandBuffer->recycleSecondaryCommandBuffers(this);
}
-void GrVkCommandPool::freeGPUData(GrVkGpu* gpu) const {
- // TODO: having freeGPUData virtual on GrVkResource be const seems like a bad restriction since
+void GrVkCommandPool::freeGPUData(GrGpu* gpu) const {
+ // TODO: having freeGPUData virtual on GrManagedResource be const seems like a bad restriction since
// we are changing the internal objects of these classes when it is called. We should go back a
// revisit how much of a headache it would be to make this function non-const
+ GrVkGpu* vkGpu = (GrVkGpu*)gpu;
GrVkCommandPool* nonConstThis = const_cast<GrVkCommandPool*>(this);
nonConstThis->close();
- nonConstThis->releaseResources(gpu);
+ nonConstThis->releaseResources(vkGpu);
fPrimaryCommandBuffer->freeGPUData(gpu, fCommandPool);
for (const auto& buffer : fAvailableSecondaryBuffers) {
buffer->freeGPUData(gpu, fCommandPool);
}
if (fCommandPool != VK_NULL_HANDLE) {
- GR_VK_CALL(gpu->vkInterface(),
- DestroyCommandPool(gpu->device(), fCommandPool, nullptr));
+ GR_VK_CALL(vkGpu->vkInterface(),
+ DestroyCommandPool(vkGpu->device(), fCommandPool, nullptr));
}
}
diff --git a/src/gpu/vk/GrVkCommandPool.h b/src/gpu/vk/GrVkCommandPool.h
index 31a563f..fb300d3 100644
--- a/src/gpu/vk/GrVkCommandPool.h
+++ b/src/gpu/vk/GrVkCommandPool.h
@@ -8,15 +8,15 @@
#ifndef GrVkCommandPool_DEFINED
#define GrVkCommandPool_DEFINED
+#include "src/gpu/GrManagedResource.h"
#include "src/gpu/vk/GrVkInterface.h"
-#include "src/gpu/vk/GrVkResource.h"
#include "src/gpu/vk/GrVkResourceProvider.h"
class GrVkPrimaryCommandBuffer;
class GrVkSecondaryCommandBuffer;
class GrVkGpu;
-class GrVkCommandPool : public GrVkResource {
+class GrVkCommandPool : public GrManagedResource {
public:
static GrVkCommandPool* Create(GrVkGpu* gpu);
@@ -52,7 +52,7 @@
GrVkCommandPool(GrVkGpu* gpu, VkCommandPool commandPool, GrVkPrimaryCommandBuffer*);
- void freeGPUData(GrVkGpu* gpu) const override;
+ void freeGPUData(GrGpu* gpu) const override;
bool fOpen = true;
diff --git a/src/gpu/vk/GrVkDescriptorPool.cpp b/src/gpu/vk/GrVkDescriptorPool.cpp
index 3d7d9cd..0d7e28a 100644
--- a/src/gpu/vk/GrVkDescriptorPool.cpp
+++ b/src/gpu/vk/GrVkDescriptorPool.cpp
@@ -45,8 +45,9 @@
return fType == type && count <= fCount;
}
-void GrVkDescriptorPool::freeGPUData(GrVkGpu* gpu) const {
+void GrVkDescriptorPool::freeGPUData(GrGpu* gpu) const {
// Destroying the VkDescriptorPool will automatically free and delete any VkDescriptorSets
// allocated from the pool.
- GR_VK_CALL(gpu->vkInterface(), DestroyDescriptorPool(gpu->device(), fDescPool, nullptr));
+ GrVkGpu* vkGpu = (GrVkGpu*)gpu;
+ GR_VK_CALL(vkGpu->vkInterface(), DestroyDescriptorPool(vkGpu->device(), fDescPool, nullptr));
}
diff --git a/src/gpu/vk/GrVkDescriptorPool.h b/src/gpu/vk/GrVkDescriptorPool.h
index b1a199e..4976e3f 100644
--- a/src/gpu/vk/GrVkDescriptorPool.h
+++ b/src/gpu/vk/GrVkDescriptorPool.h
@@ -9,7 +9,7 @@
#define GrVkDescriptorPool_DEFINED
#include "include/gpu/vk/GrVkTypes.h"
-#include "src/gpu/vk/GrVkResource.h"
+#include "src/gpu/GrManagedResource.h"
class GrVkGpu;
@@ -18,7 +18,7 @@
* make one type of descriptor set. Thus a single VkDescriptorPool will only allocated space for
* for one type of descriptor.
*/
-class GrVkDescriptorPool : public GrVkResource {
+class GrVkDescriptorPool : public GrManagedResource {
public:
static GrVkDescriptorPool* Create(GrVkGpu* gpu, VkDescriptorType type, uint32_t count);
@@ -28,7 +28,7 @@
// not in use by another draw, to support the requested type and count.
bool isCompatible(VkDescriptorType type, uint32_t count) const;
-#ifdef SK_TRACE_VK_RESOURCES
+#ifdef SK_TRACE_MANAGED_RESOURCES
void dumpInfo() const override {
SkDebugf("GrVkDescriptorPool: %d, type %d (%d refs)\n", fDescPool, fType,
this->getRefCnt());
@@ -38,13 +38,13 @@
private:
GrVkDescriptorPool(VkDescriptorPool pool, VkDescriptorType type, uint32_t count);
- void freeGPUData(GrVkGpu* gpu) const override;
+ void freeGPUData(GrGpu* gpu) const override;
VkDescriptorType fType;
uint32_t fCount;
VkDescriptorPool fDescPool;
- typedef GrVkResource INHERITED;
+ typedef GrManagedResource INHERITED;
};
#endif
diff --git a/src/gpu/vk/GrVkDescriptorSet.cpp b/src/gpu/vk/GrVkDescriptorSet.cpp
index 81ffca0..ce029fc 100644
--- a/src/gpu/vk/GrVkDescriptorSet.cpp
+++ b/src/gpu/vk/GrVkDescriptorSet.cpp
@@ -20,11 +20,12 @@
fPool->ref();
}
-void GrVkDescriptorSet::freeGPUData(GrVkGpu* gpu) const {
+void GrVkDescriptorSet::freeGPUData(GrGpu* gpu) const {
fPool->unref(gpu);
}
-void GrVkDescriptorSet::onRecycle(GrVkGpu* gpu) const {
- gpu->resourceProvider().recycleDescriptorSet(this, fHandle);
+void GrVkDescriptorSet::onRecycle(GrGpu* gpu) const {
+ GrVkGpu* vkGpu = (GrVkGpu*)gpu;
+ vkGpu->resourceProvider().recycleDescriptorSet(this, fHandle);
}
diff --git a/src/gpu/vk/GrVkDescriptorSet.h b/src/gpu/vk/GrVkDescriptorSet.h
index 99eda88..8647f5c 100644
--- a/src/gpu/vk/GrVkDescriptorSet.h
+++ b/src/gpu/vk/GrVkDescriptorSet.h
@@ -9,13 +9,13 @@
#define GrVkDescriptorSet_DEFINED
#include "include/gpu/vk/GrVkTypes.h"
+#include "src/gpu/GrManagedResource.h"
#include "src/gpu/vk/GrVkDescriptorSetManager.h"
-#include "src/gpu/vk/GrVkResource.h"
class GrVkDescriptorPool;
class GrVkGpu;
-class GrVkDescriptorSet : public GrVkRecycledResource {
+class GrVkDescriptorSet : public GrRecycledResource {
public:
GrVkDescriptorSet(VkDescriptorSet descSet,
GrVkDescriptorPool* pool,
@@ -25,15 +25,15 @@
const VkDescriptorSet* descriptorSet() const { return &fDescSet; }
-#ifdef SK_TRACE_VK_RESOURCES
+#ifdef SK_TRACE_MANAGED_RESOURCES
void dumpInfo() const override {
SkDebugf("GrVkDescriptorSet: %d (%d refs)\n", fDescSet, this->getRefCnt());
}
#endif
private:
- void freeGPUData(GrVkGpu* gpu) const override;
- void onRecycle(GrVkGpu* gpu) const override;
+ void freeGPUData(GrGpu* gpu) const override;
+ void onRecycle(GrGpu* gpu) const override;
VkDescriptorSet fDescSet;
SkDEBUGCODE(mutable) GrVkDescriptorPool* fPool;
diff --git a/src/gpu/vk/GrVkFramebuffer.cpp b/src/gpu/vk/GrVkFramebuffer.cpp
index f256999..cf97b24 100644
--- a/src/gpu/vk/GrVkFramebuffer.cpp
+++ b/src/gpu/vk/GrVkFramebuffer.cpp
@@ -50,7 +50,8 @@
return new GrVkFramebuffer(framebuffer);
}
-void GrVkFramebuffer::freeGPUData(GrVkGpu* gpu) const {
+void GrVkFramebuffer::freeGPUData(GrGpu* gpu) const {
SkASSERT(fFramebuffer);
- GR_VK_CALL(gpu->vkInterface(), DestroyFramebuffer(gpu->device(), fFramebuffer, nullptr));
+ GrVkGpu* vkGpu = (GrVkGpu*)gpu;
+ GR_VK_CALL(vkGpu->vkInterface(), DestroyFramebuffer(vkGpu->device(), fFramebuffer, nullptr));
}
diff --git a/src/gpu/vk/GrVkFramebuffer.h b/src/gpu/vk/GrVkFramebuffer.h
index 1a97d57..eff8be5 100644
--- a/src/gpu/vk/GrVkFramebuffer.h
+++ b/src/gpu/vk/GrVkFramebuffer.h
@@ -10,13 +10,13 @@
#include "include/gpu/GrTypes.h"
#include "include/gpu/vk/GrVkTypes.h"
-#include "src/gpu/vk/GrVkResource.h"
+#include "src/gpu/GrManagedResource.h"
class GrVkGpu;
class GrVkImageView;
class GrVkRenderPass;
-class GrVkFramebuffer : public GrVkResource {
+class GrVkFramebuffer : public GrManagedResource {
public:
static GrVkFramebuffer* Create(GrVkGpu* gpu,
int width, int height,
@@ -26,7 +26,7 @@
VkFramebuffer framebuffer() const { return fFramebuffer; }
-#ifdef SK_TRACE_VK_RESOURCES
+#ifdef SK_TRACE_MANAGED_RESOURCES
void dumpInfo() const override {
SkDebugf("GrVkFramebuffer: %d (%d refs)\n", fFramebuffer, this->getRefCnt());
}
@@ -38,11 +38,11 @@
GrVkFramebuffer(const GrVkFramebuffer&);
GrVkFramebuffer& operator=(const GrVkFramebuffer&);
- void freeGPUData(GrVkGpu* gpu) const override;
+ void freeGPUData(GrGpu* gpu) const override;
VkFramebuffer fFramebuffer;
- typedef GrVkResource INHERITED;
+ typedef GrManagedResource INHERITED;
};
#endif
diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp
index 17f8d7d..c02d868 100644
--- a/src/gpu/vk/GrVkGpu.cpp
+++ b/src/gpu/vk/GrVkGpu.cpp
@@ -2077,7 +2077,7 @@
////////////////////////////////////////////////////////////////////////////////
-void GrVkGpu::addBufferMemoryBarrier(const GrVkResource* resource,
+void GrVkGpu::addBufferMemoryBarrier(const GrManagedResource* resource,
VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask,
bool byRegion,
@@ -2093,7 +2093,7 @@
barrier);
}
-void GrVkGpu::addImageMemoryBarrier(const GrVkResource* resource,
+void GrVkGpu::addImageMemoryBarrier(const GrManagedResource* resource,
VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask,
bool byRegion,
diff --git a/src/gpu/vk/GrVkGpu.h b/src/gpu/vk/GrVkGpu.h
index fe6375e..dc62f77 100644
--- a/src/gpu/vk/GrVkGpu.h
+++ b/src/gpu/vk/GrVkGpu.h
@@ -100,12 +100,12 @@
const GrOpsRenderPass::StencilLoadAndStoreInfo&,
const SkTArray<GrSurfaceProxy*, true>& sampledProxies) override;
- void addBufferMemoryBarrier(const GrVkResource*,
+ void addBufferMemoryBarrier(const GrManagedResource*,
VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask,
bool byRegion,
VkBufferMemoryBarrier* barrier) const;
- void addImageMemoryBarrier(const GrVkResource*,
+ void addImageMemoryBarrier(const GrManagedResource*,
VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask,
bool byRegion,
diff --git a/src/gpu/vk/GrVkImage.cpp b/src/gpu/vk/GrVkImage.cpp
index facc5f1..8e10ad0 100644
--- a/src/gpu/vk/GrVkImage.cpp
+++ b/src/gpu/vk/GrVkImage.cpp
@@ -5,7 +5,6 @@
* found in the LICENSE file.
*/
-#include "src/gpu/GrGpuResourcePriv.h"
#include "src/gpu/vk/GrVkGpu.h"
#include "src/gpu/vk/GrVkImage.h"
#include "src/gpu/vk/GrVkMemory.h"
@@ -271,47 +270,15 @@
fResource->setRelease(std::move(releaseHelper));
}
-void GrVkImage::Resource::freeGPUData(GrVkGpu* gpu) const {
+void GrVkImage::Resource::freeGPUData(GrGpu* gpu) const {
this->invokeReleaseProc();
- VK_CALL(gpu, DestroyImage(gpu->device(), fImage, nullptr));
+ GrVkGpu* vkGpu = (GrVkGpu*)gpu;
+ VK_CALL(vkGpu, DestroyImage(vkGpu->device(), fImage, nullptr));
bool isLinear = (VK_IMAGE_TILING_LINEAR == fImageTiling);
- GrVkMemory::FreeImageMemory(gpu, isLinear, fAlloc);
+ GrVkMemory::FreeImageMemory(vkGpu, isLinear, fAlloc);
}
-void GrVkImage::Resource::addIdleProc(GrVkTexture* owningTexture,
- sk_sp<GrRefCntedCallback> idleProc) const {
- SkASSERT(!fOwningTexture || fOwningTexture == owningTexture);
- fOwningTexture = owningTexture;
- fIdleProcs.push_back(std::move(idleProc));
-}
-
-int GrVkImage::Resource::idleProcCnt() const { return fIdleProcs.count(); }
-
-sk_sp<GrRefCntedCallback> GrVkImage::Resource::idleProc(int i) const { return fIdleProcs[i]; }
-
-void GrVkImage::Resource::resetIdleProcs() const { fIdleProcs.reset(); }
-
-void GrVkImage::Resource::removeOwningTexture() const { fOwningTexture = nullptr; }
-
-void GrVkImage::Resource::notifyAddedToCommandBuffer() const { ++fNumCommandBufferOwners; }
-
-void GrVkImage::Resource::notifyRemovedFromCommandBuffer() const {
- SkASSERT(fNumCommandBufferOwners);
- if (--fNumCommandBufferOwners || !fIdleProcs.count()) {
- return;
- }
- if (fOwningTexture) {
- if (fOwningTexture->resourcePriv().hasRef()) {
- // Wait for the texture to become idle in the cache to call the procs.
- return;
- }
- fOwningTexture->callIdleProcsOnBehalfOfResource();
- } else {
- fIdleProcs.reset();
- }
-}
-
-void GrVkImage::BorrowedResource::freeGPUData(GrVkGpu* gpu) const {
+void GrVkImage::BorrowedResource::freeGPUData(GrGpu* gpu) const {
this->invokeReleaseProc();
}
diff --git a/src/gpu/vk/GrVkImage.h b/src/gpu/vk/GrVkImage.h
index 081e3c7..5ac10c3 100644
--- a/src/gpu/vk/GrVkImage.h
+++ b/src/gpu/vk/GrVkImage.h
@@ -12,9 +12,9 @@
#include "include/gpu/GrBackendSurface.h"
#include "include/gpu/vk/GrVkTypes.h"
#include "include/private/GrTypesPriv.h"
+#include "src/gpu/GrManagedResource.h"
#include "src/gpu/GrTexture.h"
#include "src/gpu/vk/GrVkImageLayout.h"
-#include "src/gpu/vk/GrVkResource.h"
class GrVkGpu;
class GrVkTexture;
@@ -165,7 +165,7 @@
bool fIsBorrowed;
private:
- class Resource : public GrVkResource {
+ class Resource : public GrTextureResource {
public:
Resource()
: fImage(VK_NULL_HANDLE) {
@@ -178,61 +178,22 @@
, fAlloc(alloc)
, fImageTiling(tiling) {}
- ~Resource() override {
- SkASSERT(!fReleaseHelper);
- }
+ ~Resource() override {}
-#ifdef SK_TRACE_VK_RESOURCES
+#ifdef SK_TRACE_MANAGED_RESOURCES
void dumpInfo() const override {
SkDebugf("GrVkImage: %d (%d refs)\n", fImage, this->getRefCnt());
}
#endif
- void setRelease(sk_sp<GrRefCntedCallback> releaseHelper) {
- fReleaseHelper = std::move(releaseHelper);
- }
-
- /**
- * These are used to coordinate calling the "finished" idle procs between the GrVkTexture
- * and the Resource. If the GrVkTexture becomes purgeable and if there are no command
- * buffers referring to the Resource then it calls the procs. Otherwise, the Resource calls
- * them when the last command buffer reference goes away and the GrVkTexture is purgeable.
- */
- void addIdleProc(GrVkTexture*, sk_sp<GrRefCntedCallback>) const;
- int idleProcCnt() const;
- sk_sp<GrRefCntedCallback> idleProc(int) const;
- void resetIdleProcs() const;
- void removeOwningTexture() const;
-
- /**
- * We track how many outstanding references this Resource has in command buffers and
- * when the count reaches zero we call the idle proc.
- */
- void notifyAddedToCommandBuffer() const override;
- void notifyRemovedFromCommandBuffer() const override;
- bool isOwnedByCommandBuffer() const { return fNumCommandBufferOwners > 0; }
-
- protected:
- mutable sk_sp<GrRefCntedCallback> fReleaseHelper;
-
- void invokeReleaseProc() const {
- if (fReleaseHelper) {
- // Depending on the ref count of fReleaseHelper this may or may not actually trigger
- // the ReleaseProc to be called.
- fReleaseHelper.reset();
- }
- }
private:
- void freeGPUData(GrVkGpu* gpu) const override;
+ void freeGPUData(GrGpu* gpu) const override;
VkImage fImage;
GrVkAlloc fAlloc;
VkImageTiling fImageTiling;
- mutable int fNumCommandBufferOwners = 0;
- mutable SkTArray<sk_sp<GrRefCntedCallback>> fIdleProcs;
- mutable GrVkTexture* fOwningTexture = nullptr;
- typedef GrVkResource INHERITED;
+ typedef GrTextureResource INHERITED;
};
// for wrapped textures
@@ -242,7 +203,7 @@
: Resource(image, alloc, tiling) {
}
private:
- void freeGPUData(GrVkGpu* gpu) const override;
+ void freeGPUData(GrGpu* gpu) const override;
};
Resource* fResource;
diff --git a/src/gpu/vk/GrVkImageView.cpp b/src/gpu/vk/GrVkImageView.cpp
index 0298bfa..e65bcc5 100644
--- a/src/gpu/vk/GrVkImageView.cpp
+++ b/src/gpu/vk/GrVkImageView.cpp
@@ -61,11 +61,12 @@
return new GrVkImageView(imageView, ycbcrConversion);
}
-void GrVkImageView::freeGPUData(GrVkGpu* gpu) const {
- GR_VK_CALL(gpu->vkInterface(), DestroyImageView(gpu->device(), fImageView, nullptr));
+void GrVkImageView::freeGPUData(GrGpu* gpu) const {
+ GrVkGpu* vkGpu = (GrVkGpu*)gpu;
+ GR_VK_CALL(vkGpu->vkInterface(), DestroyImageView(vkGpu->device(), fImageView, nullptr));
if (fYcbcrConversion) {
- fYcbcrConversion->unref(gpu);
+ fYcbcrConversion->unref(vkGpu);
}
}
diff --git a/src/gpu/vk/GrVkImageView.h b/src/gpu/vk/GrVkImageView.h
index 2ca52fa..1e88ea8 100644
--- a/src/gpu/vk/GrVkImageView.h
+++ b/src/gpu/vk/GrVkImageView.h
@@ -10,12 +10,12 @@
#include "include/gpu/GrTypes.h"
#include "include/gpu/vk/GrVkTypes.h"
-#include "src/gpu/vk/GrVkResource.h"
+#include "src/gpu/GrManagedResource.h"
class GrVkSamplerYcbcrConversion;
struct GrVkYcbcrConversionInfo;
-class GrVkImageView : public GrVkResource {
+class GrVkImageView : public GrManagedResource {
public:
enum Type {
kColor_Type,
@@ -28,7 +28,7 @@
VkImageView imageView() const { return fImageView; }
-#ifdef SK_TRACE_VK_RESOURCES
+#ifdef SK_TRACE_MANAGED_RESOURCES
void dumpInfo() const override {
SkDebugf("GrVkImageView: %d (%d refs)\n", fImageView, this->getRefCnt());
}
@@ -41,12 +41,12 @@
GrVkImageView(const GrVkImageView&);
GrVkImageView& operator=(const GrVkImageView&);
- void freeGPUData(GrVkGpu* gpu) const override;
+ void freeGPUData(GrGpu* gpu) const override;
VkImageView fImageView;
GrVkSamplerYcbcrConversion* fYcbcrConversion;
- typedef GrVkResource INHERITED;
+ typedef GrManagedResource INHERITED;
};
#endif
diff --git a/src/gpu/vk/GrVkOpsRenderPass.cpp b/src/gpu/vk/GrVkOpsRenderPass.cpp
index 4be047a..cce4cbd 100644
--- a/src/gpu/vk/GrVkOpsRenderPass.cpp
+++ b/src/gpu/vk/GrVkOpsRenderPass.cpp
@@ -191,7 +191,7 @@
if (this->wrapsSecondaryCommandBuffer()) {
// We pass the ownership of the GrVkSecondaryCommandBuffer to the special wrapped
// GrVkRenderTarget since it's lifetime matches the lifetime we need to keep the
- // GrVkResources on the GrVkSecondaryCommandBuffer alive.
+ // GrManagedResources on the GrVkSecondaryCommandBuffer alive.
static_cast<GrVkRenderTarget*>(fRenderTarget)->addWrappedGrSecondaryCommandBuffer(
std::move(fCurrentSecondaryCommandBuffer));
return;
diff --git a/src/gpu/vk/GrVkPipeline.cpp b/src/gpu/vk/GrVkPipeline.cpp
index 24b1f17..dac1407 100644
--- a/src/gpu/vk/GrVkPipeline.cpp
+++ b/src/gpu/vk/GrVkPipeline.cpp
@@ -655,9 +655,10 @@
return new GrVkPipeline(vkPipeline, layout);
}
-void GrVkPipeline::freeGPUData(GrVkGpu* gpu) const {
- GR_VK_CALL(gpu->vkInterface(), DestroyPipeline(gpu->device(), fPipeline, nullptr));
- GR_VK_CALL(gpu->vkInterface(), DestroyPipelineLayout(gpu->device(), fPipelineLayout, nullptr));
+void GrVkPipeline::freeGPUData(GrGpu* gpu) const {
+ GrVkGpu* vkGpu = (GrVkGpu*)gpu;
+ GR_VK_CALL(vkGpu->vkInterface(), DestroyPipeline(vkGpu->device(), fPipeline, nullptr));
+ GR_VK_CALL(vkGpu->vkInterface(), DestroyPipelineLayout(vkGpu->device(), fPipelineLayout, nullptr));
}
void GrVkPipeline::SetDynamicScissorRectState(GrVkGpu* gpu,
diff --git a/src/gpu/vk/GrVkPipeline.h b/src/gpu/vk/GrVkPipeline.h
index 34b835b..2a50bb3 100644
--- a/src/gpu/vk/GrVkPipeline.h
+++ b/src/gpu/vk/GrVkPipeline.h
@@ -10,7 +10,7 @@
#include "include/gpu/vk/GrVkTypes.h"
#include "include/private/GrTypesPriv.h"
-#include "src/gpu/vk/GrVkResource.h"
+#include "src/gpu/GrManagedResource.h"
class GrPipeline;
class GrPrimitiveProcessor;
@@ -22,7 +22,7 @@
class GrVkRenderPass;
struct SkIRect;
-class GrVkPipeline : public GrVkResource {
+class GrVkPipeline : public GrManagedResource {
public:
static GrVkPipeline* Create(GrVkGpu*,
const GrProgramInfo&,
@@ -42,7 +42,7 @@
const GrSwizzle& outputSwizzle,
const GrXferProcessor&);
-#ifdef SK_TRACE_VK_RESOURCES
+#ifdef SK_TRACE_MANAGED_RESOURCES
void dumpInfo() const override {
SkDebugf("GrVkPipeline: %d (%d refs)\n", fPipeline, this->getRefCnt());
}
@@ -56,9 +56,9 @@
VkPipelineLayout fPipelineLayout;
private:
- void freeGPUData(GrVkGpu* gpu) const override;
+ void freeGPUData(GrGpu* gpu) const override;
- typedef GrVkResource INHERITED;
+ typedef GrManagedResource INHERITED;
};
#endif
diff --git a/src/gpu/vk/GrVkPipelineState.h b/src/gpu/vk/GrVkPipelineState.h
index fbf50bd..8862305 100644
--- a/src/gpu/vk/GrVkPipelineState.h
+++ b/src/gpu/vk/GrVkPipelineState.h
@@ -110,7 +110,7 @@
// Helper for setData() that sets the view matrix and loads the render target height uniform
void setRenderTargetState(const GrRenderTarget*, GrSurfaceOrigin);
- // GrVkResources
+ // GrManagedResources
GrVkPipeline* fPipeline;
const GrVkDescriptorSet* fUniformDescriptorSet;
diff --git a/src/gpu/vk/GrVkRenderPass.cpp b/src/gpu/vk/GrVkRenderPass.cpp
index 6ee7f94..e059d79 100644
--- a/src/gpu/vk/GrVkRenderPass.cpp
+++ b/src/gpu/vk/GrVkRenderPass.cpp
@@ -182,9 +182,10 @@
, fClearValueCount(clearValueCount) {
}
-void GrVkRenderPass::freeGPUData(GrVkGpu* gpu) const {
+void GrVkRenderPass::freeGPUData(GrGpu* gpu) const {
+ GrVkGpu* vkGpu = (GrVkGpu*)gpu;
if (!(fAttachmentFlags & kExternal_AttachmentFlag)) {
- GR_VK_CALL(gpu->vkInterface(), DestroyRenderPass(gpu->device(), fRenderPass, nullptr));
+ GR_VK_CALL(vkGpu->vkInterface(), DestroyRenderPass(vkGpu->device(), fRenderPass, nullptr));
}
}
diff --git a/src/gpu/vk/GrVkRenderPass.h b/src/gpu/vk/GrVkRenderPass.h
index 1474c7b..68baeb4 100644
--- a/src/gpu/vk/GrVkRenderPass.h
+++ b/src/gpu/vk/GrVkRenderPass.h
@@ -10,13 +10,13 @@
#include "include/gpu/GrTypes.h"
#include "include/gpu/vk/GrVkTypes.h"
-#include "src/gpu/vk/GrVkResource.h"
+#include "src/gpu/GrManagedResource.h"
class GrProcessorKeyBuilder;
class GrVkGpu;
class GrVkRenderTarget;
-class GrVkRenderPass : public GrVkResource {
+class GrVkRenderPass : public GrManagedResource {
public:
struct LoadStoreOps {
VkAttachmentLoadOp fLoadOp;
@@ -118,7 +118,7 @@
void genKey(GrProcessorKeyBuilder* b) const;
-#ifdef SK_TRACE_VK_RESOURCES
+#ifdef SK_TRACE_MANAGED_RESOURCES
void dumpInfo() const override {
SkDebugf("GrVkRenderPass: %d (%d refs)\n", fRenderPass, this->getRefCnt());
}
@@ -136,7 +136,7 @@
bool isCompatible(const AttachmentsDescriptor&, const AttachmentFlags&) const;
- void freeGPUData(GrVkGpu* gpu) const override;
+ void freeGPUData(GrGpu* gpu) const override;
VkRenderPass fRenderPass;
AttachmentFlags fAttachmentFlags;
@@ -146,7 +146,7 @@
// For internally created render passes we assume the color attachment index is always 0.
uint32_t fColorAttachmentIndex = 0;
- typedef GrVkResource INHERITED;
+ typedef GrManagedResource INHERITED;
};
GR_MAKE_BITFIELD_OPS(GrVkRenderPass::AttachmentFlags);
diff --git a/src/gpu/vk/GrVkRenderTarget.cpp b/src/gpu/vk/GrVkRenderTarget.cpp
index cf136f3..b93c3c3 100644
--- a/src/gpu/vk/GrVkRenderTarget.cpp
+++ b/src/gpu/vk/GrVkRenderTarget.cpp
@@ -380,7 +380,7 @@
this->grVkImageLayout());
}
-const GrVkResource* GrVkRenderTarget::stencilImageResource() const {
+const GrManagedResource* GrVkRenderTarget::stencilImageResource() const {
SkASSERT(!this->wrapsSecondaryCommandBuffer());
const GrStencilAttachment* stencil = this->renderTargetPriv().getStencilAttachment();
if (stencil) {
diff --git a/src/gpu/vk/GrVkRenderTarget.h b/src/gpu/vk/GrVkRenderTarget.h
index 224c40c..daae1a0 100644
--- a/src/gpu/vk/GrVkRenderTarget.h
+++ b/src/gpu/vk/GrVkRenderTarget.h
@@ -45,7 +45,7 @@
const GrVkFramebuffer* getFramebuffer();
const GrVkImageView* colorAttachmentView() const { return fColorAttachmentView; }
- const GrVkResource* msaaImageResource() const {
+ const GrManagedResource* msaaImageResource() const {
if (fMSAAImage) {
return fMSAAImage->fResource;
}
@@ -53,7 +53,7 @@
}
GrVkImage* msaaImage() { return fMSAAImage.get(); }
const GrVkImageView* resolveAttachmentView() const { return fResolveAttachmentView; }
- const GrVkResource* stencilImageResource() const;
+ const GrManagedResource* stencilImageResource() const;
const GrVkImageView* stencilAttachmentView() const;
const GrVkRenderPass* getSimpleRenderPass();
@@ -184,15 +184,15 @@
// VkCommandBuffer and not VK_NULL_HANDLE. In this case the render target will not be backed by
// an actual VkImage and will thus be limited in terms of what it can be used for.
VkCommandBuffer fSecondaryCommandBuffer = VK_NULL_HANDLE;
- // When we wrap a secondary command buffer, we will record GrVkResources onto it which need to
- // be kept alive till the command buffer gets submitted and the GPU has finished. However, in
+ // When we wrap a secondary command buffer, we will record GrManagedResources onto it which need
+ // to be kept alive till the command buffer gets submitted and the GPU has finished. However, in
// the wrapped case, we don't know when the command buffer gets submitted and when it is
// finished on the GPU since the client is in charge of that. However, we do require that the
// client keeps the GrVkSecondaryCBDrawContext alive and call releaseResources on it once the
// GPU is finished all the work. Thus we can use this to manage the lifetime of our
// GrVkSecondaryCommandBuffers. By storing them on the GrVkRenderTarget, which is owned by the
- // SkGpuDevice on the GrVkSecondaryCBDrawContext, we assure that the GrVkResources held by the
- // GrVkSecondaryCommandBuffer don't get deleted before they are allowed to.
+ // SkGpuDevice on the GrVkSecondaryCBDrawContext, we assure that the GrManagedResources held by
+ // the GrVkSecondaryCommandBuffer don't get deleted before they are allowed to.
SkTArray<std::unique_ptr<GrVkCommandBuffer>> fGrSecondaryCommandBuffers;
};
diff --git a/src/gpu/vk/GrVkResource.h b/src/gpu/vk/GrVkResource.h
deleted file mode 100644
index 725e7ee..0000000
--- a/src/gpu/vk/GrVkResource.h
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Copyright 2015 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef GrVkResource_DEFINED
-#define GrVkResource_DEFINED
-
-
-#include "include/private/SkMutex.h"
-#include "include/private/SkTHash.h"
-#include "include/utils/SkRandom.h"
-#include <atomic>
-
-class GrVkGpu;
-
-// uncomment to enable tracing of resource refs
-#ifdef SK_DEBUG
-#define SK_TRACE_VK_RESOURCES
-#endif
-
-/** \class GrVkResource
-
- GrVkResource is the base class for Vulkan resources that may be shared by multiple
- objects. When an existing owner wants to share a reference, it calls ref().
- When an owner wants to release its reference, it calls unref(). When the
- shared object's reference count goes to zero as the result of an unref()
- call, its (virtual) destructor is called. It is an error for the
- destructor to be called explicitly (or via the object going out of scope on
- the stack or calling delete) if getRefCnt() > 1.
-
- This is nearly identical to SkRefCntBase. The exceptions are that unref()
- takes a GrVkGpu, and any derived classes must implement freeGPUData().
-*/
-
-class GrVkResource : SkNoncopyable {
-public:
- // Simple refCount tracing, to ensure that everything ref'ed is unref'ed.
-#ifdef SK_TRACE_VK_RESOURCES
- struct Hash {
- uint32_t operator()(const GrVkResource* const& r) const {
- SkASSERT(r);
- return r->fKey;
- }
- };
-
- class Trace {
- public:
- ~Trace() {
- fHashSet.foreach([](const GrVkResource* r) {
- r->dumpInfo();
- });
- SkASSERT(0 == fHashSet.count());
- }
-
- void add(const GrVkResource* r) {
- SkAutoMutexExclusive locked(fLock);
- fHashSet.add(r);
- }
-
- void remove(const GrVkResource* r) {
- SkAutoMutexExclusive locked(fLock);
- fHashSet.remove(r);
- }
-
- private:
- SkMutex fLock;
- SkTHashSet<const GrVkResource*, GrVkResource::Hash> fHashSet SK_GUARDED_BY(fLock);
- };
-
- static std::atomic<uint32_t> fKeyCounter;
-#endif
-
- /** Default construct, initializing the reference count to 1.
- */
- GrVkResource() : fRefCnt(1) {
-#ifdef SK_TRACE_VK_RESOURCES
- fKey = fKeyCounter.fetch_add(+1, std::memory_order_relaxed);
- GetTrace()->add(this);
-#endif
- }
-
- /** Destruct, asserting that the reference count is 1.
- */
- virtual ~GrVkResource() {
-#ifdef SK_DEBUG
- auto count = this->getRefCnt();
- SkASSERTF(count == 1, "fRefCnt was %d", count);
- fRefCnt.store(0); // illegal value, to catch us if we reuse after delete
-#endif
- }
-
-#ifdef SK_DEBUG
- /** Return the reference count. Use only for debugging. */
- int32_t getRefCnt() const { return fRefCnt.load(); }
-#endif
-
- /** May return true if the caller is the only owner.
- * Ensures that all previous owner's actions are complete.
- */
- bool unique() const {
- // The acquire barrier is only really needed if we return true. It
- // prevents code conditioned on the result of unique() from running
- // until previous owners are all totally done calling unref().
- return 1 == fRefCnt.load(std::memory_order_acquire);
- }
-
- /** Increment the reference count.
- Must be balanced by a call to unref() or unrefAndFreeResources().
- */
- void ref() const {
- // No barrier required.
- SkDEBUGCODE(int newRefCount = )fRefCnt.fetch_add(+1, std::memory_order_relaxed);
- SkASSERT(newRefCount >= 1);
- }
-
- /** Decrement the reference count. If the reference count is 1 before the
- decrement, then delete the object. Note that if this is the case, then
- the object needs to have been allocated via new, and not on the stack.
- Any GPU data associated with this resource will be freed before it's deleted.
- */
- void unref(GrVkGpu* gpu) const {
- SkASSERT(gpu);
- // A release here acts in place of all releases we "should" have been doing in ref().
- int newRefCount = fRefCnt.fetch_add(-1, std::memory_order_acq_rel);
- SkASSERT(newRefCount >= 0);
- if (newRefCount == 1) {
- // Like unique(), the acquire is only needed on success, to make sure
- // code in internal_dispose() doesn't happen before the decrement.
- this->internal_dispose(gpu);
- }
- }
-
- // Called every time this resource is added to a command buffer.
- virtual void notifyAddedToCommandBuffer() const {}
- // Called every time this resource is removed from a command buffer (typically because
- // the command buffer finished execution on the GPU.)
- virtual void notifyRemovedFromCommandBuffer() const {}
-
-#ifdef SK_DEBUG
- void validate() const {
- SkASSERT(this->getRefCnt() > 0);
- }
-#endif
-
-#ifdef SK_TRACE_VK_RESOURCES
- /** Output a human-readable dump of this resource's information
- */
- virtual void dumpInfo() const = 0;
-#endif
-
-private:
-#ifdef SK_TRACE_VK_RESOURCES
- static Trace* GetTrace() {
- static Trace kTrace;
- return &kTrace;
- }
-#endif
-
- /** Must be implemented by any subclasses.
- * Deletes any Vk data associated with this resource
- */
- virtual void freeGPUData(GrVkGpu* gpu) const = 0;
-
- /**
- * Called when the ref count goes to 0. Will free Vk resources.
- */
- void internal_dispose(GrVkGpu* gpu) const {
- this->freeGPUData(gpu);
-#ifdef SK_TRACE_VK_RESOURCES
- GetTrace()->remove(this);
-#endif
-
-#ifdef SK_DEBUG
- SkASSERT(0 == this->getRefCnt());
- fRefCnt.store(1);
-#endif
- delete this;
- }
-
- mutable std::atomic<int32_t> fRefCnt;
-#ifdef SK_TRACE_VK_RESOURCES
- uint32_t fKey;
-#endif
-
- typedef SkNoncopyable INHERITED;
-};
-
-// This subclass allows for recycling
-class GrVkRecycledResource : public GrVkResource {
-public:
- // When recycle is called and there is only one ref left on the resource, we will signal that
- // the resource can be recycled for reuse. If the sublass (or whoever is managing this resource)
- // decides not to recycle the objects, it is their responsibility to call unref on the object.
- void recycle(GrVkGpu* gpu) const {
- if (this->unique()) {
- this->onRecycle(gpu);
- } else {
- this->unref(gpu);
- }
- }
-
-private:
- virtual void onRecycle(GrVkGpu* gpu) const = 0;
-};
-
-#endif
diff --git a/src/gpu/vk/GrVkResourceProvider.cpp b/src/gpu/vk/GrVkResourceProvider.cpp
index 2166f16..198a946 100644
--- a/src/gpu/vk/GrVkResourceProvider.cpp
+++ b/src/gpu/vk/GrVkResourceProvider.cpp
@@ -18,8 +18,8 @@
#include "src/gpu/vk/GrVkUniformBuffer.h"
#include "src/gpu/vk/GrVkUtil.h"
-#ifdef SK_TRACE_VK_RESOURCES
-std::atomic<uint32_t> GrVkResource::fKeyCounter{0};
+#ifdef SK_TRACE_MANAGED_RESOURCES
+std::atomic<uint32_t> GrManagedResource::fKeyCounter{0};
#endif
GrVkResourceProvider::GrVkResourceProvider(GrVkGpu* gpu)
@@ -357,8 +357,8 @@
}
}
-const GrVkResource* GrVkResourceProvider::findOrCreateStandardUniformBufferResource() {
- const GrVkResource* resource = nullptr;
+const GrManagedResource* GrVkResourceProvider::findOrCreateStandardUniformBufferResource() {
+ const GrManagedResource* resource = nullptr;
int count = fAvailableUniformBufferResources.count();
if (count > 0) {
resource = fAvailableUniformBufferResources[count - 1];
@@ -369,7 +369,7 @@
return resource;
}
-void GrVkResourceProvider::recycleStandardUniformBufferResource(const GrVkResource* resource) {
+void GrVkResourceProvider::recycleStandardUniformBufferResource(const GrManagedResource* resource) {
fAvailableUniformBufferResources.push_back(resource);
}
diff --git a/src/gpu/vk/GrVkResourceProvider.h b/src/gpu/vk/GrVkResourceProvider.h
index 7e0ffa7..b45f0e4 100644
--- a/src/gpu/vk/GrVkResourceProvider.h
+++ b/src/gpu/vk/GrVkResourceProvider.h
@@ -13,13 +13,13 @@
#include "src/core/SkLRUCache.h"
#include "src/core/SkTDynamicHash.h"
#include "src/core/SkTInternalLList.h"
+#include "src/gpu/GrManagedResource.h"
#include "src/gpu/GrProgramDesc.h"
#include "src/gpu/GrResourceHandle.h"
#include "src/gpu/vk/GrVkDescriptorPool.h"
#include "src/gpu/vk/GrVkDescriptorSetManager.h"
#include "src/gpu/vk/GrVkPipelineStateBuilder.h"
#include "src/gpu/vk/GrVkRenderPass.h"
-#include "src/gpu/vk/GrVkResource.h"
#include "src/gpu/vk/GrVkSampler.h"
#include "src/gpu/vk/GrVkSamplerYcbcrConversion.h"
#include "src/gpu/vk/GrVkUtil.h"
@@ -148,11 +148,11 @@
// Creates or finds free uniform buffer resources of size GrVkUniformBuffer::kStandardSize.
// Anything larger will need to be created and released by the client.
- const GrVkResource* findOrCreateStandardUniformBufferResource();
+ const GrManagedResource* findOrCreateStandardUniformBufferResource();
// Signals that the resource passed to it (which should be a uniform buffer resource)
// can be reused by the next uniform buffer resource request.
- void recycleStandardUniformBufferResource(const GrVkResource*);
+ void recycleStandardUniformBufferResource(const GrManagedResource*);
void storePipelineCacheData();
@@ -257,7 +257,7 @@
SkSTArray<4, GrVkCommandPool*, true> fAvailableCommandPools;
// Array of available uniform buffer resources
- SkSTArray<16, const GrVkResource*, true> fAvailableUniformBufferResources;
+ SkSTArray<16, const GrManagedResource*, true> fAvailableUniformBufferResources;
// Stores GrVkSampler objects that we've already created so we can reuse them across multiple
// GrVkPipelineStates
diff --git a/src/gpu/vk/GrVkSampler.cpp b/src/gpu/vk/GrVkSampler.cpp
index 75c40b7..375d21a 100644
--- a/src/gpu/vk/GrVkSampler.cpp
+++ b/src/gpu/vk/GrVkSampler.cpp
@@ -112,9 +112,10 @@
return new GrVkSampler(sampler, ycbcrConversion, GenerateKey(samplerState, ycbcrInfo));
}
-void GrVkSampler::freeGPUData(GrVkGpu* gpu) const {
+void GrVkSampler::freeGPUData(GrGpu* gpu) const {
SkASSERT(fSampler);
- GR_VK_CALL(gpu->vkInterface(), DestroySampler(gpu->device(), fSampler, nullptr));
+ GrVkGpu* vkGpu = (GrVkGpu*)gpu;
+ GR_VK_CALL(vkGpu->vkInterface(), DestroySampler(vkGpu->device(), fSampler, nullptr));
if (fYcbcrConversion) {
fYcbcrConversion->unref(gpu);
}
diff --git a/src/gpu/vk/GrVkSampler.h b/src/gpu/vk/GrVkSampler.h
index 2add493..151e092 100644
--- a/src/gpu/vk/GrVkSampler.h
+++ b/src/gpu/vk/GrVkSampler.h
@@ -10,14 +10,14 @@
#include "include/gpu/vk/GrVkTypes.h"
#include "src/core/SkOpts.h"
-#include "src/gpu/vk/GrVkResource.h"
+#include "src/gpu/GrManagedResource.h"
#include "src/gpu/vk/GrVkSamplerYcbcrConversion.h"
#include <atomic>
class GrSamplerState;
class GrVkGpu;
-class GrVkSampler : public GrVkResource {
+class GrVkSampler : public GrManagedResource {
public:
static GrVkSampler* Create(GrVkGpu* gpu, GrSamplerState, const GrVkYcbcrConversionInfo&);
@@ -51,7 +51,7 @@
uint32_t uniqueID() const { return fUniqueID; }
-#ifdef SK_TRACE_VK_RESOURCES
+#ifdef SK_TRACE_MANAGED_RESOURCES
void dumpInfo() const override {
SkDebugf("GrVkSampler: %d (%d refs)\n", fSampler, this->getRefCnt());
}
@@ -65,7 +65,7 @@
, fKey(key)
, fUniqueID(GenID()) {}
- void freeGPUData(GrVkGpu* gpu) const override;
+ void freeGPUData(GrGpu* gpu) const override;
static uint32_t GenID() {
static std::atomic<uint32_t> nextID{1};
@@ -81,7 +81,7 @@
Key fKey;
uint32_t fUniqueID;
- typedef GrVkResource INHERITED;
+ typedef GrManagedResource INHERITED;
};
#endif
diff --git a/src/gpu/vk/GrVkSamplerYcbcrConversion.cpp b/src/gpu/vk/GrVkSamplerYcbcrConversion.cpp
index dda7ae6..845225e 100644
--- a/src/gpu/vk/GrVkSamplerYcbcrConversion.cpp
+++ b/src/gpu/vk/GrVkSamplerYcbcrConversion.cpp
@@ -81,10 +81,11 @@
return new GrVkSamplerYcbcrConversion(conversion, GenerateKey(info));
}
-void GrVkSamplerYcbcrConversion::freeGPUData(GrVkGpu* gpu) const {
+void GrVkSamplerYcbcrConversion::freeGPUData(GrGpu* gpu) const {
SkASSERT(fYcbcrConversion);
- GR_VK_CALL(gpu->vkInterface(), DestroySamplerYcbcrConversion(gpu->device(), fYcbcrConversion,
- nullptr));
+ GrVkGpu* vkGpu = (GrVkGpu*)gpu;
+ GR_VK_CALL(vkGpu->vkInterface(), DestroySamplerYcbcrConversion(vkGpu->device(),
+ fYcbcrConversion, nullptr));
}
GrVkSamplerYcbcrConversion::Key GrVkSamplerYcbcrConversion::GenerateKey(
diff --git a/src/gpu/vk/GrVkSamplerYcbcrConversion.h b/src/gpu/vk/GrVkSamplerYcbcrConversion.h
index 5ec3b3d..6f3f1af 100644
--- a/src/gpu/vk/GrVkSamplerYcbcrConversion.h
+++ b/src/gpu/vk/GrVkSamplerYcbcrConversion.h
@@ -8,14 +8,14 @@
#ifndef GrVkSamplerYcbcrConverison_DEFINED
#define GrVkSamplerYcbcrConverison_DEFINED
-#include "src/gpu/vk/GrVkResource.h"
+#include "src/gpu/GrManagedResource.h"
#include "include/gpu/vk/GrVkTypes.h"
#include "src/core/SkOpts.h"
class GrVkGpu;
-class GrVkSamplerYcbcrConversion : public GrVkResource {
+class GrVkSamplerYcbcrConversion : public GrManagedResource {
public:
static GrVkSamplerYcbcrConversion* Create(GrVkGpu* gpu, const GrVkYcbcrConversionInfo&);
@@ -51,7 +51,7 @@
return SkOpts::hash(reinterpret_cast<const uint32_t*>(&key), sizeof(Key));
}
-#ifdef SK_TRACE_VK_RESOURCES
+#ifdef SK_TRACE_MANAGED_RESOURCES
void dumpInfo() const override {
SkDebugf("GrVkSamplerYcbcrConversion: %d (%d refs)\n", fYcbcrConversion, this->getRefCnt());
}
@@ -63,12 +63,12 @@
, fYcbcrConversion(ycbcrConversion)
, fKey(key) {}
- void freeGPUData(GrVkGpu* gpu) const override;
+ void freeGPUData(GrGpu* gpu) const override;
VkSamplerYcbcrConversion fYcbcrConversion;
Key fKey;
- typedef GrVkResource INHERITED;
+ typedef GrManagedResource INHERITED;
};
#endif
diff --git a/src/gpu/vk/GrVkSemaphore.cpp b/src/gpu/vk/GrVkSemaphore.cpp
index 191b6a3..95c6642 100644
--- a/src/gpu/vk/GrVkSemaphore.cpp
+++ b/src/gpu/vk/GrVkSemaphore.cpp
@@ -59,10 +59,11 @@
}
}
-void GrVkSemaphore::Resource::freeGPUData(GrVkGpu* gpu) const {
+void GrVkSemaphore::Resource::freeGPUData(GrGpu* gpu) const {
if (fIsOwned) {
- GR_VK_CALL(gpu->vkInterface(),
- DestroySemaphore(gpu->device(), fSemaphore, nullptr));
+ GrVkGpu* vkGpu = (GrVkGpu*)gpu;
+ GR_VK_CALL(vkGpu->vkInterface(),
+ DestroySemaphore(vkGpu->device(), fSemaphore, nullptr));
}
}
diff --git a/src/gpu/vk/GrVkSemaphore.h b/src/gpu/vk/GrVkSemaphore.h
index b18db6d..4495ed4 100644
--- a/src/gpu/vk/GrVkSemaphore.h
+++ b/src/gpu/vk/GrVkSemaphore.h
@@ -11,8 +11,8 @@
#include "src/gpu/GrSemaphore.h"
#include "include/gpu/vk/GrVkTypes.h"
+#include "src/gpu/GrManagedResource.h"
#include "src/gpu/GrResourceProvider.h"
-#include "src/gpu/vk/GrVkResource.h"
class GrBackendSemaphore;
class GrVkGpu;
@@ -32,7 +32,7 @@
GrBackendSemaphore backendSemaphore() const override;
- class Resource : public GrVkResource {
+ class Resource : public GrManagedResource {
public:
Resource(VkSemaphore semaphore, bool prohibitSignal, bool prohibitWait, bool isOwned)
: INHERITED()
@@ -63,20 +63,20 @@
fIsOwned = true;
}
-#ifdef SK_TRACE_VK_RESOURCES
+#ifdef SK_TRACE_MANAGED_RESOURCES
void dumpInfo() const override {
SkDebugf("GrVkSemaphore: %d (%d refs)\n", fSemaphore, this->getRefCnt());
}
#endif
private:
- void freeGPUData(GrVkGpu* gpu) const override;
+ void freeGPUData(GrGpu* gpu) const override;
VkSemaphore fSemaphore;
bool fHasBeenSubmittedToQueueForSignal;
bool fHasBeenSubmittedToQueueForWait;
bool fIsOwned;
- typedef GrVkResource INHERITED;
+ typedef GrManagedResource INHERITED;
};
Resource* getResource() { return fResource; }
diff --git a/src/gpu/vk/GrVkStencilAttachment.h b/src/gpu/vk/GrVkStencilAttachment.h
index 568e0e5..5abaa26 100644
--- a/src/gpu/vk/GrVkStencilAttachment.h
+++ b/src/gpu/vk/GrVkStencilAttachment.h
@@ -29,7 +29,7 @@
~GrVkStencilAttachment() override;
- const GrVkResource* imageResource() const { return this->resource(); }
+ const GrManagedResource* imageResource() const { return this->resource(); }
const GrVkImageView* stencilView() const { return fStencilView; }
VkFormat vkFormat() const { return fFormat.fInternalFormat; }
diff --git a/src/gpu/vk/GrVkTexture.cpp b/src/gpu/vk/GrVkTexture.cpp
index ca8a2ce..1815636 100644
--- a/src/gpu/vk/GrVkTexture.cpp
+++ b/src/gpu/vk/GrVkTexture.cpp
@@ -136,10 +136,10 @@
}
void GrVkTexture::onRelease() {
- // We're about to be severed from our GrVkResource. If there are "finish" idle procs we have to
- // decide who will handle them. If the resource is still tied to a command buffer we let it
- // handle them. Otherwise, we handle them.
- if (this->hasResource() && this->resource()->isOwnedByCommandBuffer()) {
+ // We're about to be severed from our GrManagedResource. If there are "finish" idle procs we
+ // have to decide who will handle them. If the resource is still tied to a command buffer we let
+ // it handle them. Otherwise, we handle them.
+ if (this->hasResource() && this->resource()->isQueuedForWorkOnGpu()) {
this->removeFinishIdleProcs();
}
@@ -170,10 +170,10 @@
};
void GrVkTexture::onAbandon() {
- // We're about to be severed from our GrVkResource. If there are "finish" idle procs we have to
- // decide who will handle them. If the resource is still tied to a command buffer we let it
- // handle them. Otherwise, we handle them.
- if (this->hasResource() && this->resource()->isOwnedByCommandBuffer()) {
+ // We're about to be severed from our GrManagedResource. If there are "finish" idle procs we
+ // have to decide who will handle them. If the resource is still tied to a command buffer we let
+ // it handle them. Otherwise, we handle them.
+ if (this->hasResource() && this->resource()->isQueuedForWorkOnGpu()) {
this->removeFinishIdleProcs();
}
@@ -233,7 +233,7 @@
// This is called when the GrTexture is purgeable. However, we need to check whether the
// Resource is still owned by any command buffers. If it is then it will call the proc.
auto* resource = this->hasResource() ? this->resource() : nullptr;
- bool callFinishProcs = !resource || !resource->isOwnedByCommandBuffer();
+ bool callFinishProcs = !resource || !resource->isQueuedForWorkOnGpu();
if (callFinishProcs) {
// Everything must go!
fIdleProcs.reset();
diff --git a/src/gpu/vk/GrVkTexture.h b/src/gpu/vk/GrVkTexture.h
index 5b122cc..6a13f5a 100644
--- a/src/gpu/vk/GrVkTexture.h
+++ b/src/gpu/vk/GrVkTexture.h
@@ -46,7 +46,7 @@
const GrVkImageView* textureView();
void addIdleProc(sk_sp<GrRefCntedCallback>, IdleState) override;
- void callIdleProcsOnBehalfOfResource();
+ void callIdleProcsOnBehalfOfResource() override;
// For each GrVkTexture, there is a cache of GrVkDescriptorSets which only contain a single
// texture/sampler descriptor. If there is a cached descriptor set that matches the passed in
diff --git a/src/gpu/vk/GrVkUniformBuffer.cpp b/src/gpu/vk/GrVkUniformBuffer.cpp
index d5a4f9d..2626fa9 100644
--- a/src/gpu/vk/GrVkUniformBuffer.cpp
+++ b/src/gpu/vk/GrVkUniformBuffer.cpp
@@ -14,7 +14,7 @@
if (0 == size) {
return nullptr;
}
- const GrVkResource* resource = nullptr;
+ const GrManagedResource* resource = nullptr;
if (size <= GrVkUniformBuffer::kStandardSize) {
resource = gpu->resourceProvider().findOrCreateStandardUniformBufferResource();
} else {
@@ -39,7 +39,7 @@
}
// We implement our own creation function for special buffer resource type
-const GrVkResource* GrVkUniformBuffer::CreateResource(GrVkGpu* gpu, size_t size) {
+const GrManagedResource* GrVkUniformBuffer::CreateResource(GrVkGpu* gpu, size_t size) {
if (0 == size) {
return nullptr;
}
@@ -72,7 +72,7 @@
return nullptr;
}
- const GrVkResource* resource = new GrVkUniformBuffer::Resource(buffer, alloc);
+ const GrManagedResource* resource = new GrVkUniformBuffer::Resource(buffer, alloc);
if (!resource) {
VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
GrVkMemory::FreeBufferMemory(gpu, kUniform_Type, alloc);
@@ -84,7 +84,7 @@
const GrVkBuffer::Resource* GrVkUniformBuffer::createResource(GrVkGpu* gpu,
const GrVkBuffer::Desc& descriptor) {
- const GrVkResource* vkResource;
+ const GrManagedResource* vkResource;
if (descriptor.fSizeInBytes <= GrVkUniformBuffer::kStandardSize) {
GrVkResourceProvider& provider = gpu->resourceProvider();
vkResource = provider.findOrCreateStandardUniformBufferResource();
@@ -94,9 +94,10 @@
return (const GrVkBuffer::Resource*) vkResource;
}
-void GrVkUniformBuffer::Resource::onRecycle(GrVkGpu* gpu) const {
+void GrVkUniformBuffer::Resource::onRecycle(GrGpu* gpu) const {
if (fAlloc.fSize <= GrVkUniformBuffer::kStandardSize) {
- gpu->resourceProvider().recycleStandardUniformBufferResource(this);
+ GrVkGpu* vkGpu = (GrVkGpu*)gpu;
+ vkGpu->resourceProvider().recycleStandardUniformBufferResource(this);
} else {
this->unref(gpu);
}
diff --git a/src/gpu/vk/GrVkUniformBuffer.h b/src/gpu/vk/GrVkUniformBuffer.h
index f971c85..db9350a 100644
--- a/src/gpu/vk/GrVkUniformBuffer.h
+++ b/src/gpu/vk/GrVkUniformBuffer.h
@@ -17,7 +17,7 @@
public:
static GrVkUniformBuffer* Create(GrVkGpu* gpu, size_t size);
- static const GrVkResource* CreateResource(GrVkGpu* gpu, size_t size);
+ static const GrManagedResource* CreateResource(GrVkGpu* gpu, size_t size);
static const size_t kStandardSize = 256;
void* map(GrVkGpu* gpu) {
@@ -40,7 +40,7 @@
Resource(VkBuffer buf, const GrVkAlloc& alloc)
: INHERITED(buf, alloc, kUniform_Type) {}
- void onRecycle(GrVkGpu* gpu) const override;
+ void onRecycle(GrGpu* gpu) const override;
typedef GrVkBuffer::Resource INHERITED;
};