blob: c9855d87922655bd5e6f66521198cc3def999656 [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2* Copyright 2015 Google Inc.
3*
4* Use of this source code is governed by a BSD-style license that can be
5* found in the LICENSE file.
6*/
7
Mike Kleinc0bd9f92019-04-23 12:05:21 -05008#include "src/gpu/vk/GrVkCommandBuffer.h"
Greg Daniel164a9f02016-02-22 09:56:40 -05009
Mike Kleinc0bd9f92019-04-23 12:05:21 -050010#include "include/core/SkRect.h"
11#include "src/gpu/vk/GrVkCommandPool.h"
12#include "src/gpu/vk/GrVkFramebuffer.h"
13#include "src/gpu/vk/GrVkGpu.h"
14#include "src/gpu/vk/GrVkImage.h"
15#include "src/gpu/vk/GrVkImageView.h"
Chris Dalton10ee0b22020-04-02 16:28:52 -060016#include "src/gpu/vk/GrVkMeshBuffer.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050017#include "src/gpu/vk/GrVkPipeline.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050018#include "src/gpu/vk/GrVkPipelineState.h"
19#include "src/gpu/vk/GrVkPipelineState.h"
20#include "src/gpu/vk/GrVkRenderPass.h"
21#include "src/gpu/vk/GrVkRenderTarget.h"
22#include "src/gpu/vk/GrVkTransferBuffer.h"
23#include "src/gpu/vk/GrVkUtil.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050024
25void GrVkCommandBuffer::invalidateState() {
Chris Dalton1d616352017-05-31 12:51:23 -060026 for (auto& boundInputBuffer : fBoundInputBuffers) {
27 boundInputBuffer = VK_NULL_HANDLE;
28 }
egdaniel470d77a2016-03-18 12:50:27 -070029 fBoundIndexBuffer = VK_NULL_HANDLE;
egdaniel470d77a2016-03-18 12:50:27 -070030
31 memset(&fCachedViewport, 0, sizeof(VkViewport));
32 fCachedViewport.width = - 1.0f; // Viewport must have a width greater than 0
33
34 memset(&fCachedScissor, 0, sizeof(VkRect2D));
35 fCachedScissor.offset.x = -1; // Scissor offset must be greater that 0 to be valid
36
37 for (int i = 0; i < 4; ++i) {
38 fCachedBlendConstant[i] = -1.0;
39 }
Greg Daniel164a9f02016-02-22 09:56:40 -050040}
41
Jim Van Verth5082df12020-03-11 16:14:51 -040042void GrVkCommandBuffer::freeGPUData(const GrGpu* gpu, VkCommandPool cmdPool) const {
Brian Salomone39526b2019-06-24 16:35:53 -040043 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
Greg Daniel164a9f02016-02-22 09:56:40 -050044 SkASSERT(!fIsActive);
Greg Daniel0addbdf2019-11-25 15:03:58 -050045 SkASSERT(!fTrackedResources.count());
46 SkASSERT(!fTrackedRecycledResources.count());
47 SkASSERT(cmdPool != VK_NULL_HANDLE);
48 SkASSERT(!this->isWrapped());
halcanary9d524f22016-03-29 09:03:52 -070049
Jim Van Verth3e192162020-03-10 16:23:16 -040050 GrVkGpu* vkGpu = (GrVkGpu*)gpu;
51 GR_VK_CALL(vkGpu->vkInterface(), FreeCommandBuffers(vkGpu->device(), cmdPool, 1, &fCmdBuffer));
egdaniel9cb63402016-06-23 08:37:05 -070052
Jim Van Verth3e192162020-03-10 16:23:16 -040053 this->onFreeGPUData(vkGpu);
Greg Daniel164a9f02016-02-22 09:56:40 -050054}
55
Jim Van Verth5082df12020-03-11 16:14:51 -040056void GrVkCommandBuffer::releaseResources() {
Brian Salomone39526b2019-06-24 16:35:53 -040057 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
jvanverth7ec92412016-07-06 09:24:57 -070058 SkASSERT(!fIsActive);
59 for (int i = 0; i < fTrackedResources.count(); ++i) {
Jim Van Verth3e192162020-03-10 16:23:16 -040060 fTrackedResources[i]->notifyFinishedWithWorkOnGpu();
Jim Van Verth5082df12020-03-11 16:14:51 -040061 fTrackedResources[i]->unref();
jvanverth7ec92412016-07-06 09:24:57 -070062 }
egdanielc1be9bc2016-07-20 08:33:00 -070063 for (int i = 0; i < fTrackedRecycledResources.count(); ++i) {
Jim Van Verth3e192162020-03-10 16:23:16 -040064 fTrackedRecycledResources[i]->notifyFinishedWithWorkOnGpu();
Jim Van Verth5082df12020-03-11 16:14:51 -040065 fTrackedRecycledResources[i]->recycle();
egdanielc1be9bc2016-07-20 08:33:00 -070066 }
egdaniel594739c2016-09-20 12:39:25 -070067
68 if (++fNumResets > kNumRewindResetsBeforeFullReset) {
69 fTrackedResources.reset();
70 fTrackedRecycledResources.reset();
71 fTrackedResources.setReserve(kInitialTrackedResourcesCount);
72 fTrackedRecycledResources.setReserve(kInitialTrackedResourcesCount);
73 fNumResets = 0;
74 } else {
75 fTrackedResources.rewind();
76 fTrackedRecycledResources.rewind();
77 }
78
jvanverth7ec92412016-07-06 09:24:57 -070079 this->invalidateState();
80
Jim Van Verth5082df12020-03-11 16:14:51 -040081 this->onReleaseResources();
jvanverth7ec92412016-07-06 09:24:57 -070082}
83
Greg Daniel164a9f02016-02-22 09:56:40 -050084////////////////////////////////////////////////////////////////////////////////
85// CommandBuffer commands
86////////////////////////////////////////////////////////////////////////////////
87
88void GrVkCommandBuffer::pipelineBarrier(const GrVkGpu* gpu,
Jim Van Verth3e192162020-03-10 16:23:16 -040089 const GrManagedResource* resource,
Greg Daniel164a9f02016-02-22 09:56:40 -050090 VkPipelineStageFlags srcStageMask,
91 VkPipelineStageFlags dstStageMask,
92 bool byRegion,
93 BarrierType barrierType,
Greg Daniel59dc1482019-02-22 10:46:38 -050094 void* barrier) {
Greg Daniel070cbaf2019-01-03 17:35:54 -050095 SkASSERT(!this->isWrapped());
Greg Daniel164a9f02016-02-22 09:56:40 -050096 SkASSERT(fIsActive);
egdaniel58a8d922016-04-21 08:03:10 -070097 // For images we can have barriers inside of render passes but they require us to add more
98 // support in subpasses which need self dependencies to have barriers inside them. Also, we can
99 // never have buffer barriers inside of a render pass. For now we will just assert that we are
100 // not in a render pass.
101 SkASSERT(!fActiveRenderPass);
Greg Danielf346df32019-04-03 14:52:13 -0400102
Greg Danielee54f232019-04-03 14:58:40 -0400103 if (barrierType == kBufferMemory_BarrierType) {
104 const VkBufferMemoryBarrier* barrierPtr = reinterpret_cast<VkBufferMemoryBarrier*>(barrier);
105 fBufferBarriers.push_back(*barrierPtr);
106 } else {
107 SkASSERT(barrierType == kImageMemory_BarrierType);
108 const VkImageMemoryBarrier* barrierPtr = reinterpret_cast<VkImageMemoryBarrier*>(barrier);
Greg Daniel212ff052019-04-09 10:41:34 -0400109 // We need to check if we are adding a pipeline barrier that covers part of the same
110 // subresource range as a barrier that is already in current batch. If it does, then we must
111 // submit the first batch because the vulkan spec does not define a specific ordering for
112 // barriers submitted in the same batch.
113 // TODO: Look if we can gain anything by merging barriers together instead of submitting
114 // the old ones.
115 for (int i = 0; i < fImageBarriers.count(); ++i) {
116 VkImageMemoryBarrier& currentBarrier = fImageBarriers[i];
117 if (barrierPtr->image == currentBarrier.image) {
118 const VkImageSubresourceRange newRange = barrierPtr->subresourceRange;
119 const VkImageSubresourceRange oldRange = currentBarrier.subresourceRange;
120 SkASSERT(newRange.aspectMask == oldRange.aspectMask);
121 SkASSERT(newRange.baseArrayLayer == oldRange.baseArrayLayer);
122 SkASSERT(newRange.layerCount == oldRange.layerCount);
123 uint32_t newStart = newRange.baseMipLevel;
124 uint32_t newEnd = newRange.baseMipLevel + newRange.levelCount - 1;
125 uint32_t oldStart = oldRange.baseMipLevel;
126 uint32_t oldEnd = oldRange.baseMipLevel + oldRange.levelCount - 1;
Brian Osman788b9162020-02-07 10:36:46 -0500127 if (std::max(newStart, oldStart) <= std::min(newEnd, oldEnd)) {
Greg Daniel212ff052019-04-09 10:41:34 -0400128 this->submitPipelineBarriers(gpu);
129 break;
130 }
131 }
132 }
Greg Danielee54f232019-04-03 14:58:40 -0400133 fImageBarriers.push_back(*barrierPtr);
Greg Daniel164a9f02016-02-22 09:56:40 -0500134 }
Greg Danielee54f232019-04-03 14:58:40 -0400135 fBarriersByRegion |= byRegion;
136
137 fSrcStageMask = fSrcStageMask | srcStageMask;
138 fDstStageMask = fDstStageMask | dstStageMask;
139
140 fHasWork = true;
Greg Daniel59dc1482019-02-22 10:46:38 -0500141 if (resource) {
142 this->addResource(resource);
143 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500144}
145
Greg Danielee54f232019-04-03 14:58:40 -0400146void GrVkCommandBuffer::submitPipelineBarriers(const GrVkGpu* gpu) {
147 SkASSERT(fIsActive);
148
149 // Currently we never submit a pipeline barrier without at least one memory barrier.
150 if (fBufferBarriers.count() || fImageBarriers.count()) {
151 // For images we can have barriers inside of render passes but they require us to add more
152 // support in subpasses which need self dependencies to have barriers inside them. Also, we
153 // can never have buffer barriers inside of a render pass. For now we will just assert that
154 // we are not in a render pass.
155 SkASSERT(!fActiveRenderPass);
156 SkASSERT(!this->isWrapped());
157 SkASSERT(fSrcStageMask && fDstStageMask);
158
159 VkDependencyFlags dependencyFlags = fBarriersByRegion ? VK_DEPENDENCY_BY_REGION_BIT : 0;
160 GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(
161 fCmdBuffer, fSrcStageMask, fDstStageMask, dependencyFlags, 0, nullptr,
162 fBufferBarriers.count(), fBufferBarriers.begin(),
163 fImageBarriers.count(), fImageBarriers.begin()));
164 fBufferBarriers.reset();
165 fImageBarriers.reset();
166 fBarriersByRegion = false;
167 fSrcStageMask = 0;
168 fDstStageMask = 0;
169 }
170 SkASSERT(!fBufferBarriers.count());
171 SkASSERT(!fImageBarriers.count());
172 SkASSERT(!fBarriersByRegion);
173 SkASSERT(!fSrcStageMask);
174 SkASSERT(!fDstStageMask);
175}
176
177
Greg Daniel6ecc9112017-06-16 16:17:03 +0000178void GrVkCommandBuffer::bindInputBuffer(GrVkGpu* gpu, uint32_t binding,
Chris Dalton10ee0b22020-04-02 16:28:52 -0600179 const GrVkMeshBuffer* vbuffer) {
Greg Daniel6ecc9112017-06-16 16:17:03 +0000180 VkBuffer vkBuffer = vbuffer->buffer();
181 SkASSERT(VK_NULL_HANDLE != vkBuffer);
182 SkASSERT(binding < kMaxInputBuffers);
183 // TODO: once vbuffer->offset() no longer always returns 0, we will need to track the offset
184 // to know if we can skip binding or not.
185 if (vkBuffer != fBoundInputBuffers[binding]) {
186 VkDeviceSize offset = vbuffer->offset();
187 GR_VK_CALL(gpu->vkInterface(), CmdBindVertexBuffers(fCmdBuffer,
188 binding,
189 1,
190 &vkBuffer,
191 &offset));
192 fBoundInputBuffers[binding] = vkBuffer;
Greg Daniel59dc1482019-02-22 10:46:38 -0500193 this->addResource(vbuffer->resource());
Greg Daniel6ecc9112017-06-16 16:17:03 +0000194 }
195}
196
Chris Dalton10ee0b22020-04-02 16:28:52 -0600197void GrVkCommandBuffer::bindIndexBuffer(GrVkGpu* gpu, const GrVkMeshBuffer* ibuffer) {
Greg Daniel6ecc9112017-06-16 16:17:03 +0000198 VkBuffer vkBuffer = ibuffer->buffer();
199 SkASSERT(VK_NULL_HANDLE != vkBuffer);
200 // TODO: once ibuffer->offset() no longer always returns 0, we will need to track the offset
201 // to know if we can skip binding or not.
202 if (vkBuffer != fBoundIndexBuffer) {
203 GR_VK_CALL(gpu->vkInterface(), CmdBindIndexBuffer(fCmdBuffer,
204 vkBuffer,
205 ibuffer->offset(),
206 VK_INDEX_TYPE_UINT16));
207 fBoundIndexBuffer = vkBuffer;
Greg Daniel59dc1482019-02-22 10:46:38 -0500208 this->addResource(ibuffer->resource());
Greg Daniel6ecc9112017-06-16 16:17:03 +0000209 }
210}
211
Greg Daniel164a9f02016-02-22 09:56:40 -0500212void GrVkCommandBuffer::clearAttachments(const GrVkGpu* gpu,
213 int numAttachments,
214 const VkClearAttachment* attachments,
215 int numRects,
Greg Danielf346df32019-04-03 14:52:13 -0400216 const VkClearRect* clearRects) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500217 SkASSERT(fIsActive);
218 SkASSERT(fActiveRenderPass);
219 SkASSERT(numAttachments > 0);
220 SkASSERT(numRects > 0);
Greg Danielf346df32019-04-03 14:52:13 -0400221
Greg Danielee54f232019-04-03 14:58:40 -0400222 this->addingWork(gpu);
Greg Danielf346df32019-04-03 14:52:13 -0400223
Greg Daniel164a9f02016-02-22 09:56:40 -0500224#ifdef SK_DEBUG
225 for (int i = 0; i < numAttachments; ++i) {
226 if (attachments[i].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) {
227 uint32_t testIndex;
228 SkAssertResult(fActiveRenderPass->colorAttachmentIndex(&testIndex));
229 SkASSERT(testIndex == attachments[i].colorAttachment);
230 }
231 }
232#endif
233 GR_VK_CALL(gpu->vkInterface(), CmdClearAttachments(fCmdBuffer,
234 numAttachments,
235 attachments,
236 numRects,
237 clearRects));
Greg Daniela718a612019-10-07 16:25:41 -0400238 if (gpu->vkCaps().mustInvalidatePrimaryCmdBufferStateAfterClearAttachments()) {
239 this->invalidateState();
240 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500241}
242
243void GrVkCommandBuffer::bindDescriptorSets(const GrVkGpu* gpu,
egdaniel22281c12016-03-23 13:49:40 -0700244 GrVkPipelineState* pipelineState,
Greg Danieleecc6872019-07-29 13:21:37 -0400245 VkPipelineLayout layout,
Greg Daniel164a9f02016-02-22 09:56:40 -0500246 uint32_t firstSet,
247 uint32_t setCount,
248 const VkDescriptorSet* descriptorSets,
249 uint32_t dynamicOffsetCount,
250 const uint32_t* dynamicOffsets) {
251 SkASSERT(fIsActive);
252 GR_VK_CALL(gpu->vkInterface(), CmdBindDescriptorSets(fCmdBuffer,
253 VK_PIPELINE_BIND_POINT_GRAPHICS,
Greg Danieleecc6872019-07-29 13:21:37 -0400254 layout,
Greg Daniel164a9f02016-02-22 09:56:40 -0500255 firstSet,
256 setCount,
257 descriptorSets,
258 dynamicOffsetCount,
259 dynamicOffsets));
egdanielbc9b2962016-09-27 08:00:53 -0700260}
261
egdaniel470d77a2016-03-18 12:50:27 -0700262void GrVkCommandBuffer::bindPipeline(const GrVkGpu* gpu, const GrVkPipeline* pipeline) {
263 SkASSERT(fIsActive);
egdaniel470d77a2016-03-18 12:50:27 -0700264 GR_VK_CALL(gpu->vkInterface(), CmdBindPipeline(fCmdBuffer,
265 VK_PIPELINE_BIND_POINT_GRAPHICS,
266 pipeline->pipeline()));
egdanielec440992016-09-13 09:54:11 -0700267 this->addResource(pipeline);
egdaniel470d77a2016-03-18 12:50:27 -0700268}
269
Greg Daniel164a9f02016-02-22 09:56:40 -0500270void GrVkCommandBuffer::drawIndexed(const GrVkGpu* gpu,
271 uint32_t indexCount,
272 uint32_t instanceCount,
273 uint32_t firstIndex,
274 int32_t vertexOffset,
Greg Danielf346df32019-04-03 14:52:13 -0400275 uint32_t firstInstance) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500276 SkASSERT(fIsActive);
277 SkASSERT(fActiveRenderPass);
Greg Danielee54f232019-04-03 14:58:40 -0400278 this->addingWork(gpu);
Greg Daniel164a9f02016-02-22 09:56:40 -0500279 GR_VK_CALL(gpu->vkInterface(), CmdDrawIndexed(fCmdBuffer,
280 indexCount,
281 instanceCount,
282 firstIndex,
283 vertexOffset,
284 firstInstance));
285}
286
287void GrVkCommandBuffer::draw(const GrVkGpu* gpu,
288 uint32_t vertexCount,
289 uint32_t instanceCount,
290 uint32_t firstVertex,
Greg Danielf346df32019-04-03 14:52:13 -0400291 uint32_t firstInstance) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500292 SkASSERT(fIsActive);
293 SkASSERT(fActiveRenderPass);
Greg Danielee54f232019-04-03 14:58:40 -0400294 this->addingWork(gpu);
Greg Daniel164a9f02016-02-22 09:56:40 -0500295 GR_VK_CALL(gpu->vkInterface(), CmdDraw(fCmdBuffer,
296 vertexCount,
297 instanceCount,
298 firstVertex,
299 firstInstance));
300}
egdaniel470d77a2016-03-18 12:50:27 -0700301
Chris Dalton03fdf6a2020-04-07 12:31:59 -0600302void GrVkCommandBuffer::drawIndirect(const GrVkGpu* gpu,
303 const GrVkMeshBuffer* indirectBuffer,
304 VkDeviceSize offset,
305 uint32_t drawCount,
306 uint32_t stride) {
307 SkASSERT(fIsActive);
308 SkASSERT(fActiveRenderPass);
309 SkASSERT(!indirectBuffer->isCpuBuffer());
310 this->addingWork(gpu);
311 GR_VK_CALL(gpu->vkInterface(), CmdDrawIndirect(fCmdBuffer,
312 indirectBuffer->buffer(),
313 offset,
314 drawCount,
315 stride));
316}
317
318void GrVkCommandBuffer::drawIndexedIndirect(const GrVkGpu* gpu,
319 const GrVkMeshBuffer* indirectBuffer,
320 VkDeviceSize offset,
321 uint32_t drawCount,
322 uint32_t stride) {
323 SkASSERT(fIsActive);
324 SkASSERT(fActiveRenderPass);
325 SkASSERT(!indirectBuffer->isCpuBuffer());
326 this->addingWork(gpu);
327 GR_VK_CALL(gpu->vkInterface(), CmdDrawIndexedIndirect(fCmdBuffer,
328 indirectBuffer->buffer(),
329 offset,
330 drawCount,
331 stride));
332}
333
egdaniel470d77a2016-03-18 12:50:27 -0700334void GrVkCommandBuffer::setViewport(const GrVkGpu* gpu,
335 uint32_t firstViewport,
336 uint32_t viewportCount,
337 const VkViewport* viewports) {
338 SkASSERT(fIsActive);
339 SkASSERT(1 == viewportCount);
340 if (memcmp(viewports, &fCachedViewport, sizeof(VkViewport))) {
341 GR_VK_CALL(gpu->vkInterface(), CmdSetViewport(fCmdBuffer,
342 firstViewport,
343 viewportCount,
344 viewports));
345 fCachedViewport = viewports[0];
346 }
347}
348
349void GrVkCommandBuffer::setScissor(const GrVkGpu* gpu,
350 uint32_t firstScissor,
351 uint32_t scissorCount,
352 const VkRect2D* scissors) {
353 SkASSERT(fIsActive);
354 SkASSERT(1 == scissorCount);
355 if (memcmp(scissors, &fCachedScissor, sizeof(VkRect2D))) {
356 GR_VK_CALL(gpu->vkInterface(), CmdSetScissor(fCmdBuffer,
357 firstScissor,
358 scissorCount,
359 scissors));
360 fCachedScissor = scissors[0];
361 }
362}
363
364void GrVkCommandBuffer::setBlendConstants(const GrVkGpu* gpu,
365 const float blendConstants[4]) {
366 SkASSERT(fIsActive);
367 if (memcmp(blendConstants, fCachedBlendConstant, 4 * sizeof(float))) {
368 GR_VK_CALL(gpu->vkInterface(), CmdSetBlendConstants(fCmdBuffer, blendConstants));
369 memcpy(fCachedBlendConstant, blendConstants, 4 * sizeof(float));
370 }
371}
egdaniel9a6cf802016-06-08 08:22:05 -0700372
Greg Danielee54f232019-04-03 14:58:40 -0400373void GrVkCommandBuffer::addingWork(const GrVkGpu* gpu) {
374 this->submitPipelineBarriers(gpu);
375 fHasWork = true;
376}
377
egdaniel9a6cf802016-06-08 08:22:05 -0700378///////////////////////////////////////////////////////////////////////////////
379// PrimaryCommandBuffer
380////////////////////////////////////////////////////////////////////////////////
egdaniel9cb63402016-06-23 08:37:05 -0700381GrVkPrimaryCommandBuffer::~GrVkPrimaryCommandBuffer() {
382 // Should have ended any render pass we're in the middle of
383 SkASSERT(!fActiveRenderPass);
384}
385
Greg Daniel315c8dc2019-11-26 15:41:27 -0500386GrVkPrimaryCommandBuffer* GrVkPrimaryCommandBuffer::Create(GrVkGpu* gpu,
Greg Daniel0addbdf2019-11-25 15:03:58 -0500387 VkCommandPool cmdPool) {
egdaniel9a6cf802016-06-08 08:22:05 -0700388 const VkCommandBufferAllocateInfo cmdInfo = {
389 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
Ben Wagnera93a14a2017-08-28 10:34:05 -0400390 nullptr, // pNext
Greg Daniel0addbdf2019-11-25 15:03:58 -0500391 cmdPool, // commandPool
egdaniel9a6cf802016-06-08 08:22:05 -0700392 VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
393 1 // bufferCount
394 };
395
396 VkCommandBuffer cmdBuffer;
Greg Daniel315c8dc2019-11-26 15:41:27 -0500397 VkResult err;
398 GR_VK_CALL_RESULT(gpu, err, AllocateCommandBuffers(gpu->device(), &cmdInfo, &cmdBuffer));
egdaniel9a6cf802016-06-08 08:22:05 -0700399 if (err) {
400 return nullptr;
401 }
Greg Daniel0addbdf2019-11-25 15:03:58 -0500402 return new GrVkPrimaryCommandBuffer(cmdBuffer);
egdaniel9a6cf802016-06-08 08:22:05 -0700403}
404
Greg Daniele643da62019-11-05 12:36:42 -0500405void GrVkPrimaryCommandBuffer::begin(GrVkGpu* gpu) {
egdaniel9a6cf802016-06-08 08:22:05 -0700406 SkASSERT(!fIsActive);
407 VkCommandBufferBeginInfo cmdBufferBeginInfo;
408 memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
409 cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
410 cmdBufferBeginInfo.pNext = nullptr;
411 cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
412 cmdBufferBeginInfo.pInheritanceInfo = nullptr;
413
Greg Daniele643da62019-11-05 12:36:42 -0500414 GR_VK_CALL_ERRCHECK(gpu, BeginCommandBuffer(fCmdBuffer, &cmdBufferBeginInfo));
egdaniel9a6cf802016-06-08 08:22:05 -0700415 fIsActive = true;
416}
417
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500418void GrVkPrimaryCommandBuffer::end(GrVkGpu* gpu) {
egdaniel9a6cf802016-06-08 08:22:05 -0700419 SkASSERT(fIsActive);
420 SkASSERT(!fActiveRenderPass);
Greg Danielee54f232019-04-03 14:58:40 -0400421
422 this->submitPipelineBarriers(gpu);
423
Greg Daniele643da62019-11-05 12:36:42 -0500424 GR_VK_CALL_ERRCHECK(gpu, EndCommandBuffer(fCmdBuffer));
egdaniel9a6cf802016-06-08 08:22:05 -0700425 this->invalidateState();
426 fIsActive = false;
Robert Phillips04d2ce22019-04-03 13:20:43 -0400427 fHasWork = false;
egdaniel9a6cf802016-06-08 08:22:05 -0700428}
429
Greg Danielfa3adf72019-11-07 09:53:41 -0500430bool GrVkPrimaryCommandBuffer::beginRenderPass(GrVkGpu* gpu,
egdaniel9cb63402016-06-23 08:37:05 -0700431 const GrVkRenderPass* renderPass,
Robert Phillips95214472017-08-08 18:00:03 -0400432 const VkClearValue clearValues[],
Greg Danielfa3adf72019-11-07 09:53:41 -0500433 GrVkRenderTarget* target,
egdaniel9cb63402016-06-23 08:37:05 -0700434 const SkIRect& bounds,
435 bool forSecondaryCB) {
egdaniel9a6cf802016-06-08 08:22:05 -0700436 SkASSERT(fIsActive);
437 SkASSERT(!fActiveRenderPass);
Greg Danielfa3adf72019-11-07 09:53:41 -0500438 SkASSERT(renderPass->isCompatible(*target));
439
440 const GrVkFramebuffer* framebuffer = target->getFramebuffer();
441 if (!framebuffer) {
442 return false;
443 }
egdaniel9cb63402016-06-23 08:37:05 -0700444
Greg Danielee54f232019-04-03 14:58:40 -0400445 this->addingWork(gpu);
Greg Danielf346df32019-04-03 14:52:13 -0400446
egdaniel9a6cf802016-06-08 08:22:05 -0700447 VkRenderPassBeginInfo beginInfo;
egdaniel9cb63402016-06-23 08:37:05 -0700448 VkRect2D renderArea;
449 renderArea.offset = { bounds.fLeft , bounds.fTop };
450 renderArea.extent = { (uint32_t)bounds.width(), (uint32_t)bounds.height() };
451
452 memset(&beginInfo, 0, sizeof(VkRenderPassBeginInfo));
453 beginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
454 beginInfo.pNext = nullptr;
455 beginInfo.renderPass = renderPass->vkRenderPass();
Greg Danielfa3adf72019-11-07 09:53:41 -0500456 beginInfo.framebuffer = framebuffer->framebuffer();
egdaniel9cb63402016-06-23 08:37:05 -0700457 beginInfo.renderArea = renderArea;
Greg Danielb68319a2018-02-23 16:08:28 -0500458 beginInfo.clearValueCount = renderPass->clearValueCount();
egdaniel9cb63402016-06-23 08:37:05 -0700459 beginInfo.pClearValues = clearValues;
460
461 VkSubpassContents contents = forSecondaryCB ? VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS
462 : VK_SUBPASS_CONTENTS_INLINE;
463
egdaniel9a6cf802016-06-08 08:22:05 -0700464 GR_VK_CALL(gpu->vkInterface(), CmdBeginRenderPass(fCmdBuffer, &beginInfo, contents));
465 fActiveRenderPass = renderPass;
466 this->addResource(renderPass);
Greg Danielfa3adf72019-11-07 09:53:41 -0500467 target->addResources(*this);
468 return true;
egdaniel9a6cf802016-06-08 08:22:05 -0700469}
470
471void GrVkPrimaryCommandBuffer::endRenderPass(const GrVkGpu* gpu) {
472 SkASSERT(fIsActive);
473 SkASSERT(fActiveRenderPass);
Greg Danielee54f232019-04-03 14:58:40 -0400474 this->addingWork(gpu);
egdaniel9a6cf802016-06-08 08:22:05 -0700475 GR_VK_CALL(gpu->vkInterface(), CmdEndRenderPass(fCmdBuffer));
476 fActiveRenderPass = nullptr;
477}
478
479void GrVkPrimaryCommandBuffer::executeCommands(const GrVkGpu* gpu,
Greg Daniel8daf3b72019-07-30 09:57:26 -0400480 std::unique_ptr<GrVkSecondaryCommandBuffer> buffer) {
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500481 // The Vulkan spec allows secondary command buffers to be executed on a primary command buffer
482 // if the command pools both were created from were created with the same queue family. However,
483 // we currently always create them from the same pool.
egdaniel9a6cf802016-06-08 08:22:05 -0700484 SkASSERT(fIsActive);
Greg Daniel77b53f62016-10-18 11:48:51 -0400485 SkASSERT(!buffer->fIsActive);
egdaniel9a6cf802016-06-08 08:22:05 -0700486 SkASSERT(fActiveRenderPass);
487 SkASSERT(fActiveRenderPass->isCompatible(*buffer->fActiveRenderPass));
488
Greg Danielee54f232019-04-03 14:58:40 -0400489 this->addingWork(gpu);
Greg Danielf346df32019-04-03 14:52:13 -0400490
egdaniel9a6cf802016-06-08 08:22:05 -0700491 GR_VK_CALL(gpu->vkInterface(), CmdExecuteCommands(fCmdBuffer, 1, &buffer->fCmdBuffer));
Greg Daniel8daf3b72019-07-30 09:57:26 -0400492 fSecondaryCommandBuffers.push_back(std::move(buffer));
egdaniel066df7c2016-06-08 14:02:27 -0700493 // When executing a secondary command buffer all state (besides render pass state) becomes
494 // invalidated and must be reset. This includes bound buffers, pipelines, dynamic state, etc.
495 this->invalidateState();
egdaniel9a6cf802016-06-08 08:22:05 -0700496}
497
Greg Daniele1185582019-12-04 11:29:44 -0500498static bool submit_to_queue(GrVkGpu* gpu,
Greg Daniel48661b82018-01-22 16:11:35 -0500499 VkQueue queue,
500 VkFence fence,
501 uint32_t waitCount,
502 const VkSemaphore* waitSemaphores,
503 const VkPipelineStageFlags* waitStages,
504 uint32_t commandBufferCount,
505 const VkCommandBuffer* commandBuffers,
506 uint32_t signalCount,
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400507 const VkSemaphore* signalSemaphores,
508 GrProtected protectedContext) {
509 VkProtectedSubmitInfo protectedSubmitInfo;
510 if (protectedContext == GrProtected::kYes) {
511 memset(&protectedSubmitInfo, 0, sizeof(VkProtectedSubmitInfo));
512 protectedSubmitInfo.sType = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO;
513 protectedSubmitInfo.pNext = nullptr;
514 protectedSubmitInfo.protectedSubmit = VK_TRUE;
515 }
516
Greg Daniel48661b82018-01-22 16:11:35 -0500517 VkSubmitInfo submitInfo;
518 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
519 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400520 submitInfo.pNext = protectedContext == GrProtected::kYes ? &protectedSubmitInfo : nullptr;
Greg Daniel48661b82018-01-22 16:11:35 -0500521 submitInfo.waitSemaphoreCount = waitCount;
522 submitInfo.pWaitSemaphores = waitSemaphores;
523 submitInfo.pWaitDstStageMask = waitStages;
524 submitInfo.commandBufferCount = commandBufferCount;
525 submitInfo.pCommandBuffers = commandBuffers;
526 submitInfo.signalSemaphoreCount = signalCount;
527 submitInfo.pSignalSemaphores = signalSemaphores;
Greg Daniele1185582019-12-04 11:29:44 -0500528 VkResult result;
529 GR_VK_CALL_RESULT(gpu, result, QueueSubmit(queue, 1, &submitInfo, fence));
530 return result == VK_SUCCESS;
Greg Daniel48661b82018-01-22 16:11:35 -0500531}
532
Greg Daniele1185582019-12-04 11:29:44 -0500533bool GrVkPrimaryCommandBuffer::submitToQueue(
Greg Daniele643da62019-11-05 12:36:42 -0500534 GrVkGpu* gpu,
Greg Daniel6be35232017-03-01 17:01:09 -0500535 VkQueue queue,
Greg Daniel48661b82018-01-22 16:11:35 -0500536 SkTArray<GrVkSemaphore::Resource*>& signalSemaphores,
537 SkTArray<GrVkSemaphore::Resource*>& waitSemaphores) {
egdaniel9a6cf802016-06-08 08:22:05 -0700538 SkASSERT(!fIsActive);
539
540 VkResult err;
jvanverth7ec92412016-07-06 09:24:57 -0700541 if (VK_NULL_HANDLE == fSubmitFence) {
542 VkFenceCreateInfo fenceInfo;
543 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
544 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
Greg Daniele1185582019-12-04 11:29:44 -0500545 GR_VK_CALL_RESULT(gpu, err, CreateFence(gpu->device(), &fenceInfo, nullptr,
546 &fSubmitFence));
547 if (err) {
548 fSubmitFence = VK_NULL_HANDLE;
549 return false;
550 }
jvanverth7ec92412016-07-06 09:24:57 -0700551 } else {
Greg Daniele1185582019-12-04 11:29:44 -0500552 // This cannot return DEVICE_LOST so we assert we succeeded.
553 GR_VK_CALL_RESULT(gpu, err, ResetFences(gpu->device(), 1, &fSubmitFence));
554 SkASSERT(err == VK_SUCCESS);
jvanverth7ec92412016-07-06 09:24:57 -0700555 }
egdaniel9a6cf802016-06-08 08:22:05 -0700556
Greg Daniela5cb7812017-06-16 09:45:32 -0400557 int signalCount = signalSemaphores.count();
Greg Daniel6be35232017-03-01 17:01:09 -0500558 int waitCount = waitSemaphores.count();
Greg Daniel6be35232017-03-01 17:01:09 -0500559
Greg Daniele1185582019-12-04 11:29:44 -0500560 bool submitted = false;
561
Greg Daniel48661b82018-01-22 16:11:35 -0500562 if (0 == signalCount && 0 == waitCount) {
563 // This command buffer has no dependent semaphores so we can simply just submit it to the
564 // queue with no worries.
Greg Daniele1185582019-12-04 11:29:44 -0500565 submitted = submit_to_queue(
566 gpu, queue, fSubmitFence, 0, nullptr, nullptr, 1, &fCmdBuffer, 0, nullptr,
567 gpu->protectedContext() ? GrProtected::kYes : GrProtected::kNo);
Greg Daniel48661b82018-01-22 16:11:35 -0500568 } else {
Greg Daniel48661b82018-01-22 16:11:35 -0500569 SkTArray<VkSemaphore> vkSignalSems(signalCount);
570 for (int i = 0; i < signalCount; ++i) {
571 if (signalSemaphores[i]->shouldSignal()) {
572 this->addResource(signalSemaphores[i]);
573 vkSignalSems.push_back(signalSemaphores[i]->semaphore());
574 }
575 }
576
577 SkTArray<VkSemaphore> vkWaitSems(waitCount);
578 SkTArray<VkPipelineStageFlags> vkWaitStages(waitCount);
579 for (int i = 0; i < waitCount; ++i) {
580 if (waitSemaphores[i]->shouldWait()) {
581 this->addResource(waitSemaphores[i]);
582 vkWaitSems.push_back(waitSemaphores[i]->semaphore());
583 vkWaitStages.push_back(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
584 }
585 }
Greg Daniele1185582019-12-04 11:29:44 -0500586 submitted = submit_to_queue(gpu, queue, fSubmitFence, vkWaitSems.count(),
587 vkWaitSems.begin(), vkWaitStages.begin(), 1, &fCmdBuffer,
588 vkSignalSems.count(), vkSignalSems.begin(),
589 gpu->protectedContext() ? GrProtected::kYes : GrProtected::kNo);
590 if (submitted) {
591 for (int i = 0; i < signalCount; ++i) {
592 signalSemaphores[i]->markAsSignaled();
593 }
594 for (int i = 0; i < waitCount; ++i) {
595 waitSemaphores[i]->markAsWaited();
596 }
Greg Daniel48661b82018-01-22 16:11:35 -0500597 }
Greg Daniel48661b82018-01-22 16:11:35 -0500598 }
egdaniel9a6cf802016-06-08 08:22:05 -0700599
Greg Daniele1185582019-12-04 11:29:44 -0500600 if (!submitted) {
601 // Destroy the fence or else we will try to wait forever for it to finish.
egdaniel9a6cf802016-06-08 08:22:05 -0700602 GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
603 fSubmitFence = VK_NULL_HANDLE;
Greg Daniele1185582019-12-04 11:29:44 -0500604 return false;
egdaniel9a6cf802016-06-08 08:22:05 -0700605 }
Greg Daniele1185582019-12-04 11:29:44 -0500606 return true;
egdaniel9a6cf802016-06-08 08:22:05 -0700607}
608
Greg Daniele1185582019-12-04 11:29:44 -0500609void GrVkPrimaryCommandBuffer::forceSync(GrVkGpu* gpu) {
610 SkASSERT(fSubmitFence != VK_NULL_HANDLE);
611 GR_VK_CALL_ERRCHECK(gpu, WaitForFences(gpu->device(), 1, &fSubmitFence, true, UINT64_MAX));
612}
613
614bool GrVkPrimaryCommandBuffer::finished(GrVkGpu* gpu) {
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500615 SkASSERT(!fIsActive);
egdaniel9a6cf802016-06-08 08:22:05 -0700616 if (VK_NULL_HANDLE == fSubmitFence) {
617 return true;
618 }
619
Greg Daniele1185582019-12-04 11:29:44 -0500620 VkResult err;
621 GR_VK_CALL_RESULT_NOCHECK(gpu, err, GetFenceStatus(gpu->device(), fSubmitFence));
egdaniel9a6cf802016-06-08 08:22:05 -0700622 switch (err) {
623 case VK_SUCCESS:
Greg Daniele1185582019-12-04 11:29:44 -0500624 case VK_ERROR_DEVICE_LOST:
egdaniel9a6cf802016-06-08 08:22:05 -0700625 return true;
626
627 case VK_NOT_READY:
628 return false;
629
630 default:
631 SkDebugf("Error getting fence status: %d\n", err);
Greg Daniele1185582019-12-04 11:29:44 -0500632 SK_ABORT("Got an invalid fence status");
633 return false;
egdaniel9a6cf802016-06-08 08:22:05 -0700634 }
egdaniel9a6cf802016-06-08 08:22:05 -0700635}
636
Greg Daniela3aa75a2019-04-12 14:24:55 -0400637void GrVkPrimaryCommandBuffer::addFinishedProc(sk_sp<GrRefCntedCallback> finishedProc) {
638 fFinishedProcs.push_back(std::move(finishedProc));
639}
640
Jim Van Verth5082df12020-03-11 16:14:51 -0400641void GrVkPrimaryCommandBuffer::onReleaseResources() {
jvanverth7ec92412016-07-06 09:24:57 -0700642 for (int i = 0; i < fSecondaryCommandBuffers.count(); ++i) {
Jim Van Verth5082df12020-03-11 16:14:51 -0400643 fSecondaryCommandBuffers[i]->releaseResources();
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500644 }
Brian Salomonab32f652019-05-10 14:24:50 -0400645 fFinishedProcs.reset();
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500646}
647
Greg Daniel0addbdf2019-11-25 15:03:58 -0500648void GrVkPrimaryCommandBuffer::recycleSecondaryCommandBuffers(GrVkCommandPool* cmdPool) {
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500649 for (int i = 0; i < fSecondaryCommandBuffers.count(); ++i) {
Greg Daniel0addbdf2019-11-25 15:03:58 -0500650 fSecondaryCommandBuffers[i].release()->recycle(cmdPool);
jvanverth7ec92412016-07-06 09:24:57 -0700651 }
652 fSecondaryCommandBuffers.reset();
653}
654
egdaniel9a6cf802016-06-08 08:22:05 -0700655void GrVkPrimaryCommandBuffer::copyImage(const GrVkGpu* gpu,
656 GrVkImage* srcImage,
657 VkImageLayout srcLayout,
658 GrVkImage* dstImage,
659 VkImageLayout dstLayout,
660 uint32_t copyRegionCount,
661 const VkImageCopy* copyRegions) {
662 SkASSERT(fIsActive);
663 SkASSERT(!fActiveRenderPass);
Greg Danielee54f232019-04-03 14:58:40 -0400664 this->addingWork(gpu);
egdaniel9a6cf802016-06-08 08:22:05 -0700665 this->addResource(srcImage->resource());
666 this->addResource(dstImage->resource());
667 GR_VK_CALL(gpu->vkInterface(), CmdCopyImage(fCmdBuffer,
668 srcImage->image(),
669 srcLayout,
670 dstImage->image(),
671 dstLayout,
672 copyRegionCount,
673 copyRegions));
674}
675
676void GrVkPrimaryCommandBuffer::blitImage(const GrVkGpu* gpu,
Jim Van Verth3e192162020-03-10 16:23:16 -0400677 const GrManagedResource* srcResource,
egdaniel9a6cf802016-06-08 08:22:05 -0700678 VkImage srcImage,
679 VkImageLayout srcLayout,
Jim Van Verth3e192162020-03-10 16:23:16 -0400680 const GrManagedResource* dstResource,
egdaniel9a6cf802016-06-08 08:22:05 -0700681 VkImage dstImage,
682 VkImageLayout dstLayout,
683 uint32_t blitRegionCount,
684 const VkImageBlit* blitRegions,
685 VkFilter filter) {
686 SkASSERT(fIsActive);
687 SkASSERT(!fActiveRenderPass);
Greg Danielee54f232019-04-03 14:58:40 -0400688 this->addingWork(gpu);
egdaniel9a6cf802016-06-08 08:22:05 -0700689 this->addResource(srcResource);
690 this->addResource(dstResource);
691 GR_VK_CALL(gpu->vkInterface(), CmdBlitImage(fCmdBuffer,
692 srcImage,
693 srcLayout,
694 dstImage,
695 dstLayout,
696 blitRegionCount,
697 blitRegions,
698 filter));
699}
700
Greg Daniel6ecc9112017-06-16 16:17:03 +0000701void GrVkPrimaryCommandBuffer::blitImage(const GrVkGpu* gpu,
702 const GrVkImage& srcImage,
703 const GrVkImage& dstImage,
704 uint32_t blitRegionCount,
705 const VkImageBlit* blitRegions,
706 VkFilter filter) {
707 this->blitImage(gpu,
708 srcImage.resource(),
709 srcImage.image(),
710 srcImage.currentLayout(),
711 dstImage.resource(),
712 dstImage.image(),
713 dstImage.currentLayout(),
714 blitRegionCount,
715 blitRegions,
716 filter);
717}
718
719
egdaniel9a6cf802016-06-08 08:22:05 -0700720void GrVkPrimaryCommandBuffer::copyImageToBuffer(const GrVkGpu* gpu,
721 GrVkImage* srcImage,
722 VkImageLayout srcLayout,
723 GrVkTransferBuffer* dstBuffer,
724 uint32_t copyRegionCount,
725 const VkBufferImageCopy* copyRegions) {
726 SkASSERT(fIsActive);
727 SkASSERT(!fActiveRenderPass);
Greg Danielee54f232019-04-03 14:58:40 -0400728 this->addingWork(gpu);
egdaniel9a6cf802016-06-08 08:22:05 -0700729 this->addResource(srcImage->resource());
730 this->addResource(dstBuffer->resource());
731 GR_VK_CALL(gpu->vkInterface(), CmdCopyImageToBuffer(fCmdBuffer,
732 srcImage->image(),
733 srcLayout,
734 dstBuffer->buffer(),
735 copyRegionCount,
736 copyRegions));
737}
738
739void GrVkPrimaryCommandBuffer::copyBufferToImage(const GrVkGpu* gpu,
740 GrVkTransferBuffer* srcBuffer,
741 GrVkImage* dstImage,
742 VkImageLayout dstLayout,
743 uint32_t copyRegionCount,
744 const VkBufferImageCopy* copyRegions) {
745 SkASSERT(fIsActive);
746 SkASSERT(!fActiveRenderPass);
Greg Danielee54f232019-04-03 14:58:40 -0400747 this->addingWork(gpu);
egdaniel9a6cf802016-06-08 08:22:05 -0700748 this->addResource(srcBuffer->resource());
749 this->addResource(dstImage->resource());
750 GR_VK_CALL(gpu->vkInterface(), CmdCopyBufferToImage(fCmdBuffer,
751 srcBuffer->buffer(),
752 dstImage->image(),
753 dstLayout,
754 copyRegionCount,
755 copyRegions));
756}
757
Greg Daniel6888c0d2017-08-25 11:55:50 -0400758
759void GrVkPrimaryCommandBuffer::copyBuffer(GrVkGpu* gpu,
760 GrVkBuffer* srcBuffer,
761 GrVkBuffer* dstBuffer,
762 uint32_t regionCount,
763 const VkBufferCopy* regions) {
764 SkASSERT(fIsActive);
765 SkASSERT(!fActiveRenderPass);
Greg Danielee54f232019-04-03 14:58:40 -0400766 this->addingWork(gpu);
Greg Daniel6888c0d2017-08-25 11:55:50 -0400767#ifdef SK_DEBUG
768 for (uint32_t i = 0; i < regionCount; ++i) {
769 const VkBufferCopy& region = regions[i];
770 SkASSERT(region.size > 0);
771 SkASSERT(region.srcOffset < srcBuffer->size());
772 SkASSERT(region.dstOffset < dstBuffer->size());
773 SkASSERT(region.srcOffset + region.size <= srcBuffer->size());
774 SkASSERT(region.dstOffset + region.size <= dstBuffer->size());
775 }
776#endif
777 this->addResource(srcBuffer->resource());
778 this->addResource(dstBuffer->resource());
779 GR_VK_CALL(gpu->vkInterface(), CmdCopyBuffer(fCmdBuffer,
780 srcBuffer->buffer(),
781 dstBuffer->buffer(),
782 regionCount,
783 regions));
784}
785
jvanvertha584de92016-06-30 09:10:52 -0700786void GrVkPrimaryCommandBuffer::updateBuffer(GrVkGpu* gpu,
787 GrVkBuffer* dstBuffer,
788 VkDeviceSize dstOffset,
789 VkDeviceSize dataSize,
790 const void* data) {
791 SkASSERT(fIsActive);
792 SkASSERT(!fActiveRenderPass);
793 SkASSERT(0 == (dstOffset & 0x03)); // four byte aligned
794 // TODO: handle larger transfer sizes
795 SkASSERT(dataSize <= 65536);
796 SkASSERT(0 == (dataSize & 0x03)); // four byte aligned
Greg Danielee54f232019-04-03 14:58:40 -0400797 this->addingWork(gpu);
jvanvertha584de92016-06-30 09:10:52 -0700798 this->addResource(dstBuffer->resource());
799 GR_VK_CALL(gpu->vkInterface(), CmdUpdateBuffer(fCmdBuffer,
800 dstBuffer->buffer(),
801 dstOffset,
802 dataSize,
803 (const uint32_t*) data));
804}
805
egdaniel9a6cf802016-06-08 08:22:05 -0700806void GrVkPrimaryCommandBuffer::clearColorImage(const GrVkGpu* gpu,
807 GrVkImage* image,
808 const VkClearColorValue* color,
809 uint32_t subRangeCount,
810 const VkImageSubresourceRange* subRanges) {
811 SkASSERT(fIsActive);
812 SkASSERT(!fActiveRenderPass);
Greg Danielee54f232019-04-03 14:58:40 -0400813 this->addingWork(gpu);
egdaniel9a6cf802016-06-08 08:22:05 -0700814 this->addResource(image->resource());
815 GR_VK_CALL(gpu->vkInterface(), CmdClearColorImage(fCmdBuffer,
816 image->image(),
817 image->currentLayout(),
818 color,
819 subRangeCount,
820 subRanges));
821}
822
823void GrVkPrimaryCommandBuffer::clearDepthStencilImage(const GrVkGpu* gpu,
824 GrVkImage* image,
825 const VkClearDepthStencilValue* color,
826 uint32_t subRangeCount,
827 const VkImageSubresourceRange* subRanges) {
828 SkASSERT(fIsActive);
829 SkASSERT(!fActiveRenderPass);
Greg Danielee54f232019-04-03 14:58:40 -0400830 this->addingWork(gpu);
egdaniel9a6cf802016-06-08 08:22:05 -0700831 this->addResource(image->resource());
832 GR_VK_CALL(gpu->vkInterface(), CmdClearDepthStencilImage(fCmdBuffer,
833 image->image(),
834 image->currentLayout(),
835 color,
836 subRangeCount,
837 subRanges));
838}
839
egdaniel52ad2512016-08-04 12:50:01 -0700840void GrVkPrimaryCommandBuffer::resolveImage(GrVkGpu* gpu,
841 const GrVkImage& srcImage,
842 const GrVkImage& dstImage,
843 uint32_t regionCount,
844 const VkImageResolve* regions) {
845 SkASSERT(fIsActive);
846 SkASSERT(!fActiveRenderPass);
847
Greg Danielee54f232019-04-03 14:58:40 -0400848 this->addingWork(gpu);
egdaniel52ad2512016-08-04 12:50:01 -0700849 this->addResource(srcImage.resource());
850 this->addResource(dstImage.resource());
851
852 GR_VK_CALL(gpu->vkInterface(), CmdResolveImage(fCmdBuffer,
853 srcImage.image(),
854 srcImage.currentLayout(),
855 dstImage.image(),
856 dstImage.currentLayout(),
857 regionCount,
858 regions));
859}
860
Jim Van Verth5082df12020-03-11 16:14:51 -0400861void GrVkPrimaryCommandBuffer::onFreeGPUData(const GrVkGpu* gpu) const {
egdaniel9cb63402016-06-23 08:37:05 -0700862 SkASSERT(!fActiveRenderPass);
863 // Destroy the fence, if any
864 if (VK_NULL_HANDLE != fSubmitFence) {
865 GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
866 }
Greg Daniel0addbdf2019-11-25 15:03:58 -0500867 SkASSERT(!fSecondaryCommandBuffers.count());
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500868}
869
egdaniel9a6cf802016-06-08 08:22:05 -0700870///////////////////////////////////////////////////////////////////////////////
871// SecondaryCommandBuffer
872////////////////////////////////////////////////////////////////////////////////
873
Greg Daniel315c8dc2019-11-26 15:41:27 -0500874GrVkSecondaryCommandBuffer* GrVkSecondaryCommandBuffer::Create(GrVkGpu* gpu,
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500875 GrVkCommandPool* cmdPool) {
Greg Daniel070cbaf2019-01-03 17:35:54 -0500876 SkASSERT(cmdPool);
egdaniel9a6cf802016-06-08 08:22:05 -0700877 const VkCommandBufferAllocateInfo cmdInfo = {
878 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
Ben Wagnera93a14a2017-08-28 10:34:05 -0400879 nullptr, // pNext
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500880 cmdPool->vkCommandPool(), // commandPool
egdaniel9a6cf802016-06-08 08:22:05 -0700881 VK_COMMAND_BUFFER_LEVEL_SECONDARY, // level
882 1 // bufferCount
883 };
884
885 VkCommandBuffer cmdBuffer;
Greg Daniel315c8dc2019-11-26 15:41:27 -0500886 VkResult err;
887 GR_VK_CALL_RESULT(gpu, err, AllocateCommandBuffers(gpu->device(), &cmdInfo, &cmdBuffer));
egdaniel9a6cf802016-06-08 08:22:05 -0700888 if (err) {
889 return nullptr;
890 }
Greg Daniel0addbdf2019-11-25 15:03:58 -0500891 return new GrVkSecondaryCommandBuffer(cmdBuffer, false);
egdaniel9a6cf802016-06-08 08:22:05 -0700892}
893
Greg Daniel070cbaf2019-01-03 17:35:54 -0500894GrVkSecondaryCommandBuffer* GrVkSecondaryCommandBuffer::Create(VkCommandBuffer cmdBuffer) {
Greg Daniel0addbdf2019-11-25 15:03:58 -0500895 return new GrVkSecondaryCommandBuffer(cmdBuffer, true);
Greg Daniel070cbaf2019-01-03 17:35:54 -0500896}
egdaniel9a6cf802016-06-08 08:22:05 -0700897
Greg Daniele643da62019-11-05 12:36:42 -0500898void GrVkSecondaryCommandBuffer::begin(GrVkGpu* gpu, const GrVkFramebuffer* framebuffer,
jvanverth7ec92412016-07-06 09:24:57 -0700899 const GrVkRenderPass* compatibleRenderPass) {
egdaniel9a6cf802016-06-08 08:22:05 -0700900 SkASSERT(!fIsActive);
jvanverth7ec92412016-07-06 09:24:57 -0700901 SkASSERT(compatibleRenderPass);
902 fActiveRenderPass = compatibleRenderPass;
egdaniel9a6cf802016-06-08 08:22:05 -0700903
Greg Daniel070cbaf2019-01-03 17:35:54 -0500904 if (!this->isWrapped()) {
905 VkCommandBufferInheritanceInfo inheritanceInfo;
906 memset(&inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
907 inheritanceInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
908 inheritanceInfo.pNext = nullptr;
909 inheritanceInfo.renderPass = fActiveRenderPass->vkRenderPass();
910 inheritanceInfo.subpass = 0; // Currently only using 1 subpass for each render pass
911 inheritanceInfo.framebuffer = framebuffer ? framebuffer->framebuffer() : VK_NULL_HANDLE;
912 inheritanceInfo.occlusionQueryEnable = false;
913 inheritanceInfo.queryFlags = 0;
914 inheritanceInfo.pipelineStatistics = 0;
egdaniel9a6cf802016-06-08 08:22:05 -0700915
Greg Daniel070cbaf2019-01-03 17:35:54 -0500916 VkCommandBufferBeginInfo cmdBufferBeginInfo;
917 memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
918 cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
919 cmdBufferBeginInfo.pNext = nullptr;
920 cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT |
921 VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
922 cmdBufferBeginInfo.pInheritanceInfo = &inheritanceInfo;
egdaniel9a6cf802016-06-08 08:22:05 -0700923
Greg Daniele643da62019-11-05 12:36:42 -0500924 GR_VK_CALL_ERRCHECK(gpu, BeginCommandBuffer(fCmdBuffer, &cmdBufferBeginInfo));
Greg Daniel070cbaf2019-01-03 17:35:54 -0500925 }
egdaniel9a6cf802016-06-08 08:22:05 -0700926 fIsActive = true;
927}
928
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500929void GrVkSecondaryCommandBuffer::end(GrVkGpu* gpu) {
egdaniel9a6cf802016-06-08 08:22:05 -0700930 SkASSERT(fIsActive);
Greg Daniel070cbaf2019-01-03 17:35:54 -0500931 if (!this->isWrapped()) {
Greg Daniele643da62019-11-05 12:36:42 -0500932 GR_VK_CALL_ERRCHECK(gpu, EndCommandBuffer(fCmdBuffer));
Greg Daniel070cbaf2019-01-03 17:35:54 -0500933 }
egdaniel9a6cf802016-06-08 08:22:05 -0700934 this->invalidateState();
935 fIsActive = false;
Robert Phillips04d2ce22019-04-03 13:20:43 -0400936 fHasWork = false;
egdaniel9a6cf802016-06-08 08:22:05 -0700937}
Greg Daniel8daf3b72019-07-30 09:57:26 -0400938
Greg Daniel0addbdf2019-11-25 15:03:58 -0500939void GrVkSecondaryCommandBuffer::recycle(GrVkCommandPool* cmdPool) {
Greg Daniel8daf3b72019-07-30 09:57:26 -0400940 if (this->isWrapped()) {
Greg Daniel8daf3b72019-07-30 09:57:26 -0400941 delete this;
942 } else {
Greg Daniel0addbdf2019-11-25 15:03:58 -0500943 cmdPool->recycleSecondaryCommandBuffer(this);
Greg Daniel8daf3b72019-07-30 09:57:26 -0400944 }
945}
946