Update min Vulkan version to 1.0.8.0, and fix various bugs
With updating the SDK, the debug layers also showed multiple bugs.
I have fixed those as well in this CL. These include:
1. Incorrectly tracking the allocated descriptor sets from the descriptor pools
2. Using MemoryBarriers inside render passes.
3. Correctly setting the Stencil Image layout anytime we are using a render pass with a stencil attachment
4. Setting the correct aspect mask for Depth/Stencil in a barrier.
TBR=bsalomon@google.com
BUG=skia:
GOLD_TRYBOT_URL= https://gold.skia.org/search2?unt=true&query=source_type%3Dgm&master=false&issue=1906623002
Review URL: https://codereview.chromium.org/1906623002
diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp
index 5fba475..040912e 100644
--- a/src/gpu/vk/GrVkGpu.cpp
+++ b/src/gpu/vk/GrVkGpu.cpp
@@ -619,18 +619,15 @@
void GrVkGpu::bindGeometry(const GrPrimitiveProcessor& primProc,
const GrNonInstancedMesh& mesh) {
+ // There is no need to put any memory barriers to make sure host writes have finished here.
+ // When a command buffer is submitted to a queue, there is an implicit memory barrier that
+ // occurs for all host writes. Additionally, BufferMemoryBarriers are not allowed inside of
+ // an active RenderPass.
GrVkVertexBuffer* vbuf;
vbuf = (GrVkVertexBuffer*)mesh.vertexBuffer();
SkASSERT(vbuf);
SkASSERT(!vbuf->isMapped());
- vbuf->addMemoryBarrier(this,
- VK_ACCESS_HOST_WRITE_BIT,
- VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
- VK_PIPELINE_STAGE_HOST_BIT,
- VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
- false);
-
fCurrentCmdBuffer->bindVertexBuffer(this, vbuf);
if (mesh.isIndexed()) {
@@ -638,13 +635,6 @@
SkASSERT(ibuf);
SkASSERT(!ibuf->isMapped());
- ibuf->addMemoryBarrier(this,
- VK_ACCESS_HOST_WRITE_BIT,
- VK_ACCESS_INDEX_READ_BIT,
- VK_PIPELINE_STAGE_HOST_BIT,
- VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
- false);
-
fCurrentCmdBuffer->bindIndexBuffer(this, ibuf);
}
}
@@ -783,6 +773,7 @@
info->fAlloc = alloc;
info->fImageTiling = imageTiling;
info->fImageLayout = initialLayout;
+ info->fFormat = pixelFormat;
return (GrBackendObject)info;
}
@@ -933,8 +924,7 @@
VkImageLayout origDstLayout = vkStencil->currentLayout();
VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
- VkPipelineStageFlags srcStageMask =
- GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
+ VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
vkStencil->setImageLayout(this,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
@@ -944,6 +934,21 @@
dstStageMask,
false);
+ // Change layout of our render target so it can be used as the color attachment. This is what
+ // the render pass expects when it begins.
+ VkImageLayout layout = vkRT->currentLayout();
+ srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
+ dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+ srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
+ dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ vkRT->setImageLayout(this,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
+ srcAccessMask,
+ dstAccessMask,
+ srcStageMask,
+ dstStageMask,
+ false);
+
VkClearRect clearRect;
// Flip rect if necessary
SkIRect vkRect = rect;
@@ -990,8 +995,7 @@
if (rect.width() != target->width() || rect.height() != target->height()) {
VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
- VkPipelineStageFlags srcStageMask =
- GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
+ VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
vkRT->setImageLayout(this,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
@@ -1001,6 +1005,25 @@
dstStageMask,
false);
+ // If we are using a stencil attachment we also need to change its layout to what the render
+ // pass is expecting.
+ if (GrStencilAttachment* stencil = vkRT->renderTargetPriv().getStencilAttachment()) {
+ GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
+ origDstLayout = vkStencil->currentLayout();
+ srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
+ dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
+ srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
+ dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+ vkStencil->setImageLayout(this,
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
+ srcAccessMask,
+ dstAccessMask,
+ srcStageMask,
+ dstStageMask,
+ false);
+ }
+
VkClearRect clearRect;
// Flip rect if necessary
SkIRect vkRect = rect;
@@ -1483,7 +1506,6 @@
const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
SkASSERT(renderPass);
- fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
GrPrimitiveType primitiveType = meshes[0].primitiveType();
sk_sp<GrVkPipelineState> pipelineState = this->prepareDrawState(pipeline,
@@ -1496,8 +1518,6 @@
// Change layout of our render target so it can be used as the color attachment
VkImageLayout layout = vkRT->currentLayout();
- // Our color attachment is purely a destination and won't be read so don't need to flush or
- // invalidate any caches
VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
@@ -1511,8 +1531,7 @@
false);
// If we are using a stencil attachment we also need to update its layout
- if (!pipeline.getStencil().isDisabled()) {
- GrStencilAttachment* stencil = vkRT->renderTargetPriv().getStencilAttachment();
+ if (GrStencilAttachment* stencil = vkRT->renderTargetPriv().getStencilAttachment()) {
GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
VkImageLayout origDstLayout = vkStencil->currentLayout();
VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
@@ -1530,12 +1549,9 @@
false);
}
+ fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
for (int i = 0; i < meshCount; ++i) {
- if (GrXferBarrierType barrierType = pipeline.xferBarrierType(*this->caps())) {
- this->xferBarrier(pipeline.getRenderTarget(), barrierType);
- }
-
const GrMesh& mesh = meshes[i];
GrMesh::Iterator iter;
const GrNonInstancedMesh* nonIdxMesh = iter.init(mesh);
@@ -1547,6 +1563,10 @@
pipelineState->freeTempResources(this);
SkDEBUGCODE(pipelineState = nullptr);
primitiveType = nonIdxMesh->primitiveType();
+ // It is illegal for us to have the necessary memory barriers for when we write and
+ // update the uniform buffers in prepareDrawState while in an active render pass.
+ // Thus we must end the current one and then start it up again.
+ fCurrentCmdBuffer->endRenderPass(this);
pipelineState = this->prepareDrawState(pipeline,
primProc,
primitiveType,
@@ -1554,6 +1574,7 @@
if (!pipelineState) {
return;
}
+ fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
}
SkASSERT(pipelineState);
this->bindGeometry(primProc, *nonIdxMesh);