Update workaround for devices with broken vkQueueWaitIdle.
This is essentially just changing how we do a workaround we
already have in the code. It also does it in release.
Change-Id: I53b6539beb3e919497f2a13f583108ec10f1b9af
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/368556
Reviewed-by: Brian Salomon <bsalomon@google.com>
Commit-Queue: Greg Daniel <egdaniel@google.com>
diff --git a/src/gpu/vk/GrVkCaps.cpp b/src/gpu/vk/GrVkCaps.cpp
index d81f06e..d4e82a1 100644
--- a/src/gpu/vk/GrVkCaps.cpp
+++ b/src/gpu/vk/GrVkCaps.cpp
@@ -476,11 +476,11 @@
void GrVkCaps::applyDriverCorrectnessWorkarounds(const VkPhysicalDeviceProperties& properties) {
#if defined(SK_BUILD_FOR_WIN)
if (kNvidia_VkVendor == properties.vendorID || kIntel_VkVendor == properties.vendorID) {
- fMustSleepOnTearDown = true;
+ fMustSyncCommandBuffersWithQueue = true;
}
#elif defined(SK_BUILD_FOR_ANDROID)
if (kImagination_VkVendor == properties.vendorID) {
- fMustSleepOnTearDown = true;
+ fMustSyncCommandBuffersWithQueue = true;
}
#endif
diff --git a/src/gpu/vk/GrVkCaps.h b/src/gpu/vk/GrVkCaps.h
index d049c75..9c9e11e 100644
--- a/src/gpu/vk/GrVkCaps.h
+++ b/src/gpu/vk/GrVkCaps.h
@@ -75,10 +75,11 @@
return SkToBool(FormatInfo::kBlitSrc_Flag & flags);
}
- // Sometimes calls to QueueWaitIdle return before actually signalling the fences
- // on the command buffers even though they have completed. This causes an assert to fire when
- // destroying the command buffers. Therefore we add a sleep to make sure the fence signals.
- bool mustSleepOnTearDown() const { return fMustSleepOnTearDown; }
+ // On some GPUs (Windows Nvidia and Imagination) calls to QueueWaitIdle return before actually
+ // signalling the fences on the command buffers even though they have completed. This causes
+ // issues when then deleting the command buffers. Therefore we additionally will call
+ // vkWaitForFences on each outstanding command buffer to make sure the driver signals the fence.
+ bool mustSyncCommandBuffersWithQueue() const { return fMustSyncCommandBuffersWithQueue; }
// Returns true if we should always make dedicated allocations for VkImages.
bool shouldAlwaysUseDedicatedImageMemory() const {
@@ -345,7 +346,7 @@
SkSTArray<1, GrVkYcbcrConversionInfo> fYcbcrInfos;
- bool fMustSleepOnTearDown = false;
+ bool fMustSyncCommandBuffersWithQueue = false;
bool fShouldAlwaysUseDedicatedImageMemory = false;
bool fAvoidUpdateBuffers = false;
diff --git a/src/gpu/vk/GrVkCommandBuffer.cpp b/src/gpu/vk/GrVkCommandBuffer.cpp
index a886877..06a19f1 100644
--- a/src/gpu/vk/GrVkCommandBuffer.cpp
+++ b/src/gpu/vk/GrVkCommandBuffer.cpp
@@ -646,7 +646,9 @@
}
void GrVkPrimaryCommandBuffer::forceSync(GrVkGpu* gpu) {
- SkASSERT(fSubmitFence != VK_NULL_HANDLE);
+ if (fSubmitFence == VK_NULL_HANDLE) {
+ return;
+ }
GR_VK_CALL_ERRCHECK(gpu, WaitForFences(gpu->device(), 1, &fSubmitFence, true, UINT64_MAX));
}
diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp
index 350af29..8ee4fcf 100644
--- a/src/gpu/vk/GrVkGpu.cpp
+++ b/src/gpu/vk/GrVkGpu.cpp
@@ -50,14 +50,6 @@
#include <utility>
-#if !defined(SK_BUILD_FOR_WIN)
-#include <unistd.h>
-#endif // !defined(SK_BUILD_FOR_WIN)
-
-#if defined(SK_BUILD_FOR_WIN) && defined(SK_DEBUG)
-#include "src/core/SkLeanWindows.h"
-#endif
-
#define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
#define VK_CALL_RET(RET, X) GR_VK_CALL_RESULT(this, RET, X)
@@ -2172,19 +2164,9 @@
void GrVkGpu::finishOutstandingGpuWork() {
VK_CALL(QueueWaitIdle(fQueue));
- // On Windows and Imagination, sometimes calls to QueueWaitIdle return before actually
- // signalling the fences on the command buffers even though they have completed. This causes an
- // assert to fire when destroying the command buffers. Therefore we add asleep to make sure the
- // fence signals.
- #ifdef SK_DEBUG
- if (this->vkCaps().mustSleepOnTearDown()) {
- #if defined(SK_BUILD_FOR_WIN)
- Sleep(10); // In milliseconds
- #else
- sleep(1); // In seconds
- #endif
- }
- #endif
+ if (this->vkCaps().mustSyncCommandBuffersWithQueue()) {
+ fResourceProvider.forceSyncAllCommandBuffers();
+ }
}
void GrVkGpu::onReportSubmitHistograms() {
diff --git a/src/gpu/vk/GrVkResourceProvider.cpp b/src/gpu/vk/GrVkResourceProvider.cpp
index b8a1c7c..bcf2768 100644
--- a/src/gpu/vk/GrVkResourceProvider.cpp
+++ b/src/gpu/vk/GrVkResourceProvider.cpp
@@ -450,6 +450,16 @@
}
}
+void GrVkResourceProvider::forceSyncAllCommandBuffers() {
+ for (int i = fActiveCommandPools.count() - 1; fActiveCommandPools.count() && i >= 0; --i) {
+ GrVkCommandPool* pool = fActiveCommandPools[i];
+ if (!pool->isOpen()) {
+ GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
+ buffer->forceSync(fGpu);
+ }
+ }
+}
+
void GrVkResourceProvider::addFinishedProcToActiveCommandBuffers(
sk_sp<GrRefCntedCallback> finishedCallback) {
for (int i = 0; i < fActiveCommandPools.count(); ++i) {
diff --git a/src/gpu/vk/GrVkResourceProvider.h b/src/gpu/vk/GrVkResourceProvider.h
index 3790cc5..38b7744 100644
--- a/src/gpu/vk/GrVkResourceProvider.h
+++ b/src/gpu/vk/GrVkResourceProvider.h
@@ -103,6 +103,8 @@
void checkCommandBuffers();
+ void forceSyncAllCommandBuffers();
+
// We must add the finishedProc to all active command buffers since we may have flushed work
// that the client cares about before they explicitly called flush and the GPU may reorder
// command execution. So we make sure all previously submitted work finishes before we call the