blob: 696a53072d05a78f58630b91f5d0989feb37eab9 [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2* Copyright 2015 Google Inc.
3*
4* Use of this source code is governed by a BSD-style license that can be
5* found in the LICENSE file.
6*/
7
Mike Kleinc0bd9f92019-04-23 12:05:21 -05008#include "src/gpu/vk/GrVkCommandBuffer.h"
Greg Daniel164a9f02016-02-22 09:56:40 -05009
Mike Kleinc0bd9f92019-04-23 12:05:21 -050010#include "include/core/SkRect.h"
11#include "src/gpu/vk/GrVkCommandPool.h"
12#include "src/gpu/vk/GrVkFramebuffer.h"
13#include "src/gpu/vk/GrVkGpu.h"
14#include "src/gpu/vk/GrVkImage.h"
15#include "src/gpu/vk/GrVkImageView.h"
16#include "src/gpu/vk/GrVkIndexBuffer.h"
17#include "src/gpu/vk/GrVkPipeline.h"
Mike Kleinc0bd9f92019-04-23 12:05:21 -050018#include "src/gpu/vk/GrVkPipelineState.h"
19#include "src/gpu/vk/GrVkPipelineState.h"
20#include "src/gpu/vk/GrVkRenderPass.h"
21#include "src/gpu/vk/GrVkRenderTarget.h"
22#include "src/gpu/vk/GrVkTransferBuffer.h"
23#include "src/gpu/vk/GrVkUtil.h"
24#include "src/gpu/vk/GrVkVertexBuffer.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050025
26void GrVkCommandBuffer::invalidateState() {
Chris Dalton1d616352017-05-31 12:51:23 -060027 for (auto& boundInputBuffer : fBoundInputBuffers) {
28 boundInputBuffer = VK_NULL_HANDLE;
29 }
egdaniel470d77a2016-03-18 12:50:27 -070030 fBoundIndexBuffer = VK_NULL_HANDLE;
egdaniel470d77a2016-03-18 12:50:27 -070031
32 memset(&fCachedViewport, 0, sizeof(VkViewport));
33 fCachedViewport.width = - 1.0f; // Viewport must have a width greater than 0
34
35 memset(&fCachedScissor, 0, sizeof(VkRect2D));
36 fCachedScissor.offset.x = -1; // Scissor offset must be greater that 0 to be valid
37
38 for (int i = 0; i < 4; ++i) {
39 fCachedBlendConstant[i] = -1.0;
40 }
Greg Daniel164a9f02016-02-22 09:56:40 -050041}
42
Greg Daniel0addbdf2019-11-25 15:03:58 -050043void GrVkCommandBuffer::freeGPUData(GrVkGpu* gpu, VkCommandPool cmdPool) const {
Brian Salomone39526b2019-06-24 16:35:53 -040044 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
Greg Daniel164a9f02016-02-22 09:56:40 -050045 SkASSERT(!fIsActive);
Greg Daniel0addbdf2019-11-25 15:03:58 -050046 SkASSERT(!fTrackedResources.count());
47 SkASSERT(!fTrackedRecycledResources.count());
48 SkASSERT(cmdPool != VK_NULL_HANDLE);
49 SkASSERT(!this->isWrapped());
halcanary9d524f22016-03-29 09:03:52 -070050
Greg Daniel0addbdf2019-11-25 15:03:58 -050051 GR_VK_CALL(gpu->vkInterface(), FreeCommandBuffers(gpu->device(), cmdPool, 1, &fCmdBuffer));
egdaniel9cb63402016-06-23 08:37:05 -070052
53 this->onFreeGPUData(gpu);
Greg Daniel164a9f02016-02-22 09:56:40 -050054}
55
Ethan Nicholas8e265a72018-12-12 16:22:40 -050056void GrVkCommandBuffer::releaseResources(GrVkGpu* gpu) {
Brian Salomone39526b2019-06-24 16:35:53 -040057 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
Ethan Nicholas8e265a72018-12-12 16:22:40 -050058 SkDEBUGCODE(fResourcesReleased = true;)
jvanverth7ec92412016-07-06 09:24:57 -070059 SkASSERT(!fIsActive);
60 for (int i = 0; i < fTrackedResources.count(); ++i) {
Brian Salomon614c1a82018-12-19 15:42:06 -050061 fTrackedResources[i]->notifyRemovedFromCommandBuffer();
jvanverth7ec92412016-07-06 09:24:57 -070062 fTrackedResources[i]->unref(gpu);
63 }
egdanielc1be9bc2016-07-20 08:33:00 -070064 for (int i = 0; i < fTrackedRecycledResources.count(); ++i) {
Brian Salomon614c1a82018-12-19 15:42:06 -050065 fTrackedRecycledResources[i]->notifyRemovedFromCommandBuffer();
egdanielc1be9bc2016-07-20 08:33:00 -070066 fTrackedRecycledResources[i]->recycle(const_cast<GrVkGpu*>(gpu));
67 }
egdaniel594739c2016-09-20 12:39:25 -070068
69 if (++fNumResets > kNumRewindResetsBeforeFullReset) {
70 fTrackedResources.reset();
71 fTrackedRecycledResources.reset();
72 fTrackedResources.setReserve(kInitialTrackedResourcesCount);
73 fTrackedRecycledResources.setReserve(kInitialTrackedResourcesCount);
74 fNumResets = 0;
75 } else {
76 fTrackedResources.rewind();
77 fTrackedRecycledResources.rewind();
78 }
79
jvanverth7ec92412016-07-06 09:24:57 -070080 this->invalidateState();
81
Ethan Nicholas8e265a72018-12-12 16:22:40 -050082 this->onReleaseResources(gpu);
jvanverth7ec92412016-07-06 09:24:57 -070083}
84
Greg Daniel164a9f02016-02-22 09:56:40 -050085////////////////////////////////////////////////////////////////////////////////
86// CommandBuffer commands
87////////////////////////////////////////////////////////////////////////////////
88
89void GrVkCommandBuffer::pipelineBarrier(const GrVkGpu* gpu,
Greg Daniel59dc1482019-02-22 10:46:38 -050090 const GrVkResource* resource,
Greg Daniel164a9f02016-02-22 09:56:40 -050091 VkPipelineStageFlags srcStageMask,
92 VkPipelineStageFlags dstStageMask,
93 bool byRegion,
94 BarrierType barrierType,
Greg Daniel59dc1482019-02-22 10:46:38 -050095 void* barrier) {
Greg Daniel070cbaf2019-01-03 17:35:54 -050096 SkASSERT(!this->isWrapped());
Greg Daniel164a9f02016-02-22 09:56:40 -050097 SkASSERT(fIsActive);
egdaniel58a8d922016-04-21 08:03:10 -070098 // For images we can have barriers inside of render passes but they require us to add more
99 // support in subpasses which need self dependencies to have barriers inside them. Also, we can
100 // never have buffer barriers inside of a render pass. For now we will just assert that we are
101 // not in a render pass.
102 SkASSERT(!fActiveRenderPass);
Greg Danielf346df32019-04-03 14:52:13 -0400103
Greg Danielee54f232019-04-03 14:58:40 -0400104 if (barrierType == kBufferMemory_BarrierType) {
105 const VkBufferMemoryBarrier* barrierPtr = reinterpret_cast<VkBufferMemoryBarrier*>(barrier);
106 fBufferBarriers.push_back(*barrierPtr);
107 } else {
108 SkASSERT(barrierType == kImageMemory_BarrierType);
109 const VkImageMemoryBarrier* barrierPtr = reinterpret_cast<VkImageMemoryBarrier*>(barrier);
Greg Daniel212ff052019-04-09 10:41:34 -0400110 // We need to check if we are adding a pipeline barrier that covers part of the same
111 // subresource range as a barrier that is already in current batch. If it does, then we must
112 // submit the first batch because the vulkan spec does not define a specific ordering for
113 // barriers submitted in the same batch.
114 // TODO: Look if we can gain anything by merging barriers together instead of submitting
115 // the old ones.
116 for (int i = 0; i < fImageBarriers.count(); ++i) {
117 VkImageMemoryBarrier& currentBarrier = fImageBarriers[i];
118 if (barrierPtr->image == currentBarrier.image) {
119 const VkImageSubresourceRange newRange = barrierPtr->subresourceRange;
120 const VkImageSubresourceRange oldRange = currentBarrier.subresourceRange;
121 SkASSERT(newRange.aspectMask == oldRange.aspectMask);
122 SkASSERT(newRange.baseArrayLayer == oldRange.baseArrayLayer);
123 SkASSERT(newRange.layerCount == oldRange.layerCount);
124 uint32_t newStart = newRange.baseMipLevel;
125 uint32_t newEnd = newRange.baseMipLevel + newRange.levelCount - 1;
126 uint32_t oldStart = oldRange.baseMipLevel;
127 uint32_t oldEnd = oldRange.baseMipLevel + oldRange.levelCount - 1;
Brian Osman788b9162020-02-07 10:36:46 -0500128 if (std::max(newStart, oldStart) <= std::min(newEnd, oldEnd)) {
Greg Daniel212ff052019-04-09 10:41:34 -0400129 this->submitPipelineBarriers(gpu);
130 break;
131 }
132 }
133 }
Greg Danielee54f232019-04-03 14:58:40 -0400134 fImageBarriers.push_back(*barrierPtr);
Greg Daniel164a9f02016-02-22 09:56:40 -0500135 }
Greg Danielee54f232019-04-03 14:58:40 -0400136 fBarriersByRegion |= byRegion;
137
138 fSrcStageMask = fSrcStageMask | srcStageMask;
139 fDstStageMask = fDstStageMask | dstStageMask;
140
141 fHasWork = true;
Greg Daniel59dc1482019-02-22 10:46:38 -0500142 if (resource) {
143 this->addResource(resource);
144 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500145}
146
Greg Danielee54f232019-04-03 14:58:40 -0400147void GrVkCommandBuffer::submitPipelineBarriers(const GrVkGpu* gpu) {
148 SkASSERT(fIsActive);
149
150 // Currently we never submit a pipeline barrier without at least one memory barrier.
151 if (fBufferBarriers.count() || fImageBarriers.count()) {
152 // For images we can have barriers inside of render passes but they require us to add more
153 // support in subpasses which need self dependencies to have barriers inside them. Also, we
154 // can never have buffer barriers inside of a render pass. For now we will just assert that
155 // we are not in a render pass.
156 SkASSERT(!fActiveRenderPass);
157 SkASSERT(!this->isWrapped());
158 SkASSERT(fSrcStageMask && fDstStageMask);
159
160 VkDependencyFlags dependencyFlags = fBarriersByRegion ? VK_DEPENDENCY_BY_REGION_BIT : 0;
161 GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(
162 fCmdBuffer, fSrcStageMask, fDstStageMask, dependencyFlags, 0, nullptr,
163 fBufferBarriers.count(), fBufferBarriers.begin(),
164 fImageBarriers.count(), fImageBarriers.begin()));
165 fBufferBarriers.reset();
166 fImageBarriers.reset();
167 fBarriersByRegion = false;
168 fSrcStageMask = 0;
169 fDstStageMask = 0;
170 }
171 SkASSERT(!fBufferBarriers.count());
172 SkASSERT(!fImageBarriers.count());
173 SkASSERT(!fBarriersByRegion);
174 SkASSERT(!fSrcStageMask);
175 SkASSERT(!fDstStageMask);
176}
177
178
Greg Daniel6ecc9112017-06-16 16:17:03 +0000179void GrVkCommandBuffer::bindInputBuffer(GrVkGpu* gpu, uint32_t binding,
180 const GrVkVertexBuffer* vbuffer) {
181 VkBuffer vkBuffer = vbuffer->buffer();
182 SkASSERT(VK_NULL_HANDLE != vkBuffer);
183 SkASSERT(binding < kMaxInputBuffers);
184 // TODO: once vbuffer->offset() no longer always returns 0, we will need to track the offset
185 // to know if we can skip binding or not.
186 if (vkBuffer != fBoundInputBuffers[binding]) {
187 VkDeviceSize offset = vbuffer->offset();
188 GR_VK_CALL(gpu->vkInterface(), CmdBindVertexBuffers(fCmdBuffer,
189 binding,
190 1,
191 &vkBuffer,
192 &offset));
193 fBoundInputBuffers[binding] = vkBuffer;
Greg Daniel59dc1482019-02-22 10:46:38 -0500194 this->addResource(vbuffer->resource());
Greg Daniel6ecc9112017-06-16 16:17:03 +0000195 }
196}
197
198void GrVkCommandBuffer::bindIndexBuffer(GrVkGpu* gpu, const GrVkIndexBuffer* ibuffer) {
199 VkBuffer vkBuffer = ibuffer->buffer();
200 SkASSERT(VK_NULL_HANDLE != vkBuffer);
201 // TODO: once ibuffer->offset() no longer always returns 0, we will need to track the offset
202 // to know if we can skip binding or not.
203 if (vkBuffer != fBoundIndexBuffer) {
204 GR_VK_CALL(gpu->vkInterface(), CmdBindIndexBuffer(fCmdBuffer,
205 vkBuffer,
206 ibuffer->offset(),
207 VK_INDEX_TYPE_UINT16));
208 fBoundIndexBuffer = vkBuffer;
Greg Daniel59dc1482019-02-22 10:46:38 -0500209 this->addResource(ibuffer->resource());
Greg Daniel6ecc9112017-06-16 16:17:03 +0000210 }
211}
212
Greg Daniel164a9f02016-02-22 09:56:40 -0500213void GrVkCommandBuffer::clearAttachments(const GrVkGpu* gpu,
214 int numAttachments,
215 const VkClearAttachment* attachments,
216 int numRects,
Greg Danielf346df32019-04-03 14:52:13 -0400217 const VkClearRect* clearRects) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500218 SkASSERT(fIsActive);
219 SkASSERT(fActiveRenderPass);
220 SkASSERT(numAttachments > 0);
221 SkASSERT(numRects > 0);
Greg Danielf346df32019-04-03 14:52:13 -0400222
Greg Danielee54f232019-04-03 14:58:40 -0400223 this->addingWork(gpu);
Greg Danielf346df32019-04-03 14:52:13 -0400224
Greg Daniel164a9f02016-02-22 09:56:40 -0500225#ifdef SK_DEBUG
226 for (int i = 0; i < numAttachments; ++i) {
227 if (attachments[i].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) {
228 uint32_t testIndex;
229 SkAssertResult(fActiveRenderPass->colorAttachmentIndex(&testIndex));
230 SkASSERT(testIndex == attachments[i].colorAttachment);
231 }
232 }
233#endif
234 GR_VK_CALL(gpu->vkInterface(), CmdClearAttachments(fCmdBuffer,
235 numAttachments,
236 attachments,
237 numRects,
238 clearRects));
Greg Daniela718a612019-10-07 16:25:41 -0400239 if (gpu->vkCaps().mustInvalidatePrimaryCmdBufferStateAfterClearAttachments()) {
240 this->invalidateState();
241 }
Greg Daniel164a9f02016-02-22 09:56:40 -0500242}
243
244void GrVkCommandBuffer::bindDescriptorSets(const GrVkGpu* gpu,
egdaniel22281c12016-03-23 13:49:40 -0700245 GrVkPipelineState* pipelineState,
Greg Danieleecc6872019-07-29 13:21:37 -0400246 VkPipelineLayout layout,
Greg Daniel164a9f02016-02-22 09:56:40 -0500247 uint32_t firstSet,
248 uint32_t setCount,
249 const VkDescriptorSet* descriptorSets,
250 uint32_t dynamicOffsetCount,
251 const uint32_t* dynamicOffsets) {
252 SkASSERT(fIsActive);
253 GR_VK_CALL(gpu->vkInterface(), CmdBindDescriptorSets(fCmdBuffer,
254 VK_PIPELINE_BIND_POINT_GRAPHICS,
Greg Danieleecc6872019-07-29 13:21:37 -0400255 layout,
Greg Daniel164a9f02016-02-22 09:56:40 -0500256 firstSet,
257 setCount,
258 descriptorSets,
259 dynamicOffsetCount,
260 dynamicOffsets));
egdanielbc9b2962016-09-27 08:00:53 -0700261}
262
egdaniel470d77a2016-03-18 12:50:27 -0700263void GrVkCommandBuffer::bindPipeline(const GrVkGpu* gpu, const GrVkPipeline* pipeline) {
264 SkASSERT(fIsActive);
egdaniel470d77a2016-03-18 12:50:27 -0700265 GR_VK_CALL(gpu->vkInterface(), CmdBindPipeline(fCmdBuffer,
266 VK_PIPELINE_BIND_POINT_GRAPHICS,
267 pipeline->pipeline()));
egdanielec440992016-09-13 09:54:11 -0700268 this->addResource(pipeline);
egdaniel470d77a2016-03-18 12:50:27 -0700269}
270
Greg Daniel164a9f02016-02-22 09:56:40 -0500271void GrVkCommandBuffer::drawIndexed(const GrVkGpu* gpu,
272 uint32_t indexCount,
273 uint32_t instanceCount,
274 uint32_t firstIndex,
275 int32_t vertexOffset,
Greg Danielf346df32019-04-03 14:52:13 -0400276 uint32_t firstInstance) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500277 SkASSERT(fIsActive);
278 SkASSERT(fActiveRenderPass);
Greg Danielee54f232019-04-03 14:58:40 -0400279 this->addingWork(gpu);
Greg Daniel164a9f02016-02-22 09:56:40 -0500280 GR_VK_CALL(gpu->vkInterface(), CmdDrawIndexed(fCmdBuffer,
281 indexCount,
282 instanceCount,
283 firstIndex,
284 vertexOffset,
285 firstInstance));
286}
287
288void GrVkCommandBuffer::draw(const GrVkGpu* gpu,
289 uint32_t vertexCount,
290 uint32_t instanceCount,
291 uint32_t firstVertex,
Greg Danielf346df32019-04-03 14:52:13 -0400292 uint32_t firstInstance) {
Greg Daniel164a9f02016-02-22 09:56:40 -0500293 SkASSERT(fIsActive);
294 SkASSERT(fActiveRenderPass);
Greg Danielee54f232019-04-03 14:58:40 -0400295 this->addingWork(gpu);
Greg Daniel164a9f02016-02-22 09:56:40 -0500296 GR_VK_CALL(gpu->vkInterface(), CmdDraw(fCmdBuffer,
297 vertexCount,
298 instanceCount,
299 firstVertex,
300 firstInstance));
301}
egdaniel470d77a2016-03-18 12:50:27 -0700302
303void GrVkCommandBuffer::setViewport(const GrVkGpu* gpu,
304 uint32_t firstViewport,
305 uint32_t viewportCount,
306 const VkViewport* viewports) {
307 SkASSERT(fIsActive);
308 SkASSERT(1 == viewportCount);
309 if (memcmp(viewports, &fCachedViewport, sizeof(VkViewport))) {
310 GR_VK_CALL(gpu->vkInterface(), CmdSetViewport(fCmdBuffer,
311 firstViewport,
312 viewportCount,
313 viewports));
314 fCachedViewport = viewports[0];
315 }
316}
317
318void GrVkCommandBuffer::setScissor(const GrVkGpu* gpu,
319 uint32_t firstScissor,
320 uint32_t scissorCount,
321 const VkRect2D* scissors) {
322 SkASSERT(fIsActive);
323 SkASSERT(1 == scissorCount);
324 if (memcmp(scissors, &fCachedScissor, sizeof(VkRect2D))) {
325 GR_VK_CALL(gpu->vkInterface(), CmdSetScissor(fCmdBuffer,
326 firstScissor,
327 scissorCount,
328 scissors));
329 fCachedScissor = scissors[0];
330 }
331}
332
333void GrVkCommandBuffer::setBlendConstants(const GrVkGpu* gpu,
334 const float blendConstants[4]) {
335 SkASSERT(fIsActive);
336 if (memcmp(blendConstants, fCachedBlendConstant, 4 * sizeof(float))) {
337 GR_VK_CALL(gpu->vkInterface(), CmdSetBlendConstants(fCmdBuffer, blendConstants));
338 memcpy(fCachedBlendConstant, blendConstants, 4 * sizeof(float));
339 }
340}
egdaniel9a6cf802016-06-08 08:22:05 -0700341
Greg Danielee54f232019-04-03 14:58:40 -0400342void GrVkCommandBuffer::addingWork(const GrVkGpu* gpu) {
343 this->submitPipelineBarriers(gpu);
344 fHasWork = true;
345}
346
egdaniel9a6cf802016-06-08 08:22:05 -0700347///////////////////////////////////////////////////////////////////////////////
348// PrimaryCommandBuffer
349////////////////////////////////////////////////////////////////////////////////
egdaniel9cb63402016-06-23 08:37:05 -0700350GrVkPrimaryCommandBuffer::~GrVkPrimaryCommandBuffer() {
351 // Should have ended any render pass we're in the middle of
352 SkASSERT(!fActiveRenderPass);
353}
354
Greg Daniel315c8dc2019-11-26 15:41:27 -0500355GrVkPrimaryCommandBuffer* GrVkPrimaryCommandBuffer::Create(GrVkGpu* gpu,
Greg Daniel0addbdf2019-11-25 15:03:58 -0500356 VkCommandPool cmdPool) {
egdaniel9a6cf802016-06-08 08:22:05 -0700357 const VkCommandBufferAllocateInfo cmdInfo = {
358 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
Ben Wagnera93a14a2017-08-28 10:34:05 -0400359 nullptr, // pNext
Greg Daniel0addbdf2019-11-25 15:03:58 -0500360 cmdPool, // commandPool
egdaniel9a6cf802016-06-08 08:22:05 -0700361 VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
362 1 // bufferCount
363 };
364
365 VkCommandBuffer cmdBuffer;
Greg Daniel315c8dc2019-11-26 15:41:27 -0500366 VkResult err;
367 GR_VK_CALL_RESULT(gpu, err, AllocateCommandBuffers(gpu->device(), &cmdInfo, &cmdBuffer));
egdaniel9a6cf802016-06-08 08:22:05 -0700368 if (err) {
369 return nullptr;
370 }
Greg Daniel0addbdf2019-11-25 15:03:58 -0500371 return new GrVkPrimaryCommandBuffer(cmdBuffer);
egdaniel9a6cf802016-06-08 08:22:05 -0700372}
373
Greg Daniele643da62019-11-05 12:36:42 -0500374void GrVkPrimaryCommandBuffer::begin(GrVkGpu* gpu) {
egdaniel9a6cf802016-06-08 08:22:05 -0700375 SkASSERT(!fIsActive);
376 VkCommandBufferBeginInfo cmdBufferBeginInfo;
377 memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
378 cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
379 cmdBufferBeginInfo.pNext = nullptr;
380 cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
381 cmdBufferBeginInfo.pInheritanceInfo = nullptr;
382
Greg Daniele643da62019-11-05 12:36:42 -0500383 GR_VK_CALL_ERRCHECK(gpu, BeginCommandBuffer(fCmdBuffer, &cmdBufferBeginInfo));
egdaniel9a6cf802016-06-08 08:22:05 -0700384 fIsActive = true;
385}
386
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500387void GrVkPrimaryCommandBuffer::end(GrVkGpu* gpu) {
egdaniel9a6cf802016-06-08 08:22:05 -0700388 SkASSERT(fIsActive);
389 SkASSERT(!fActiveRenderPass);
Greg Danielee54f232019-04-03 14:58:40 -0400390
391 this->submitPipelineBarriers(gpu);
392
Greg Daniele643da62019-11-05 12:36:42 -0500393 GR_VK_CALL_ERRCHECK(gpu, EndCommandBuffer(fCmdBuffer));
egdaniel9a6cf802016-06-08 08:22:05 -0700394 this->invalidateState();
395 fIsActive = false;
Robert Phillips04d2ce22019-04-03 13:20:43 -0400396 fHasWork = false;
egdaniel9a6cf802016-06-08 08:22:05 -0700397}
398
Greg Danielfa3adf72019-11-07 09:53:41 -0500399bool GrVkPrimaryCommandBuffer::beginRenderPass(GrVkGpu* gpu,
egdaniel9cb63402016-06-23 08:37:05 -0700400 const GrVkRenderPass* renderPass,
Robert Phillips95214472017-08-08 18:00:03 -0400401 const VkClearValue clearValues[],
Greg Danielfa3adf72019-11-07 09:53:41 -0500402 GrVkRenderTarget* target,
egdaniel9cb63402016-06-23 08:37:05 -0700403 const SkIRect& bounds,
404 bool forSecondaryCB) {
egdaniel9a6cf802016-06-08 08:22:05 -0700405 SkASSERT(fIsActive);
406 SkASSERT(!fActiveRenderPass);
Greg Danielfa3adf72019-11-07 09:53:41 -0500407 SkASSERT(renderPass->isCompatible(*target));
408
409 const GrVkFramebuffer* framebuffer = target->getFramebuffer();
410 if (!framebuffer) {
411 return false;
412 }
egdaniel9cb63402016-06-23 08:37:05 -0700413
Greg Danielee54f232019-04-03 14:58:40 -0400414 this->addingWork(gpu);
Greg Danielf346df32019-04-03 14:52:13 -0400415
egdaniel9a6cf802016-06-08 08:22:05 -0700416 VkRenderPassBeginInfo beginInfo;
egdaniel9cb63402016-06-23 08:37:05 -0700417 VkRect2D renderArea;
418 renderArea.offset = { bounds.fLeft , bounds.fTop };
419 renderArea.extent = { (uint32_t)bounds.width(), (uint32_t)bounds.height() };
420
421 memset(&beginInfo, 0, sizeof(VkRenderPassBeginInfo));
422 beginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
423 beginInfo.pNext = nullptr;
424 beginInfo.renderPass = renderPass->vkRenderPass();
Greg Danielfa3adf72019-11-07 09:53:41 -0500425 beginInfo.framebuffer = framebuffer->framebuffer();
egdaniel9cb63402016-06-23 08:37:05 -0700426 beginInfo.renderArea = renderArea;
Greg Danielb68319a2018-02-23 16:08:28 -0500427 beginInfo.clearValueCount = renderPass->clearValueCount();
egdaniel9cb63402016-06-23 08:37:05 -0700428 beginInfo.pClearValues = clearValues;
429
430 VkSubpassContents contents = forSecondaryCB ? VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS
431 : VK_SUBPASS_CONTENTS_INLINE;
432
egdaniel9a6cf802016-06-08 08:22:05 -0700433 GR_VK_CALL(gpu->vkInterface(), CmdBeginRenderPass(fCmdBuffer, &beginInfo, contents));
434 fActiveRenderPass = renderPass;
435 this->addResource(renderPass);
Greg Danielfa3adf72019-11-07 09:53:41 -0500436 target->addResources(*this);
437 return true;
egdaniel9a6cf802016-06-08 08:22:05 -0700438}
439
440void GrVkPrimaryCommandBuffer::endRenderPass(const GrVkGpu* gpu) {
441 SkASSERT(fIsActive);
442 SkASSERT(fActiveRenderPass);
Greg Danielee54f232019-04-03 14:58:40 -0400443 this->addingWork(gpu);
egdaniel9a6cf802016-06-08 08:22:05 -0700444 GR_VK_CALL(gpu->vkInterface(), CmdEndRenderPass(fCmdBuffer));
445 fActiveRenderPass = nullptr;
446}
447
448void GrVkPrimaryCommandBuffer::executeCommands(const GrVkGpu* gpu,
Greg Daniel8daf3b72019-07-30 09:57:26 -0400449 std::unique_ptr<GrVkSecondaryCommandBuffer> buffer) {
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500450 // The Vulkan spec allows secondary command buffers to be executed on a primary command buffer
451 // if the command pools both were created from were created with the same queue family. However,
452 // we currently always create them from the same pool.
egdaniel9a6cf802016-06-08 08:22:05 -0700453 SkASSERT(fIsActive);
Greg Daniel77b53f62016-10-18 11:48:51 -0400454 SkASSERT(!buffer->fIsActive);
egdaniel9a6cf802016-06-08 08:22:05 -0700455 SkASSERT(fActiveRenderPass);
456 SkASSERT(fActiveRenderPass->isCompatible(*buffer->fActiveRenderPass));
457
Greg Danielee54f232019-04-03 14:58:40 -0400458 this->addingWork(gpu);
Greg Danielf346df32019-04-03 14:52:13 -0400459
egdaniel9a6cf802016-06-08 08:22:05 -0700460 GR_VK_CALL(gpu->vkInterface(), CmdExecuteCommands(fCmdBuffer, 1, &buffer->fCmdBuffer));
Greg Daniel8daf3b72019-07-30 09:57:26 -0400461 fSecondaryCommandBuffers.push_back(std::move(buffer));
egdaniel066df7c2016-06-08 14:02:27 -0700462 // When executing a secondary command buffer all state (besides render pass state) becomes
463 // invalidated and must be reset. This includes bound buffers, pipelines, dynamic state, etc.
464 this->invalidateState();
egdaniel9a6cf802016-06-08 08:22:05 -0700465}
466
Greg Daniele1185582019-12-04 11:29:44 -0500467static bool submit_to_queue(GrVkGpu* gpu,
Greg Daniel48661b82018-01-22 16:11:35 -0500468 VkQueue queue,
469 VkFence fence,
470 uint32_t waitCount,
471 const VkSemaphore* waitSemaphores,
472 const VkPipelineStageFlags* waitStages,
473 uint32_t commandBufferCount,
474 const VkCommandBuffer* commandBuffers,
475 uint32_t signalCount,
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400476 const VkSemaphore* signalSemaphores,
477 GrProtected protectedContext) {
478 VkProtectedSubmitInfo protectedSubmitInfo;
479 if (protectedContext == GrProtected::kYes) {
480 memset(&protectedSubmitInfo, 0, sizeof(VkProtectedSubmitInfo));
481 protectedSubmitInfo.sType = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO;
482 protectedSubmitInfo.pNext = nullptr;
483 protectedSubmitInfo.protectedSubmit = VK_TRUE;
484 }
485
Greg Daniel48661b82018-01-22 16:11:35 -0500486 VkSubmitInfo submitInfo;
487 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
488 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400489 submitInfo.pNext = protectedContext == GrProtected::kYes ? &protectedSubmitInfo : nullptr;
Greg Daniel48661b82018-01-22 16:11:35 -0500490 submitInfo.waitSemaphoreCount = waitCount;
491 submitInfo.pWaitSemaphores = waitSemaphores;
492 submitInfo.pWaitDstStageMask = waitStages;
493 submitInfo.commandBufferCount = commandBufferCount;
494 submitInfo.pCommandBuffers = commandBuffers;
495 submitInfo.signalSemaphoreCount = signalCount;
496 submitInfo.pSignalSemaphores = signalSemaphores;
Greg Daniele1185582019-12-04 11:29:44 -0500497 VkResult result;
498 GR_VK_CALL_RESULT(gpu, result, QueueSubmit(queue, 1, &submitInfo, fence));
499 return result == VK_SUCCESS;
Greg Daniel48661b82018-01-22 16:11:35 -0500500}
501
Greg Daniele1185582019-12-04 11:29:44 -0500502bool GrVkPrimaryCommandBuffer::submitToQueue(
Greg Daniele643da62019-11-05 12:36:42 -0500503 GrVkGpu* gpu,
Greg Daniel6be35232017-03-01 17:01:09 -0500504 VkQueue queue,
Greg Daniel48661b82018-01-22 16:11:35 -0500505 SkTArray<GrVkSemaphore::Resource*>& signalSemaphores,
506 SkTArray<GrVkSemaphore::Resource*>& waitSemaphores) {
egdaniel9a6cf802016-06-08 08:22:05 -0700507 SkASSERT(!fIsActive);
508
509 VkResult err;
jvanverth7ec92412016-07-06 09:24:57 -0700510 if (VK_NULL_HANDLE == fSubmitFence) {
511 VkFenceCreateInfo fenceInfo;
512 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
513 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
Greg Daniele1185582019-12-04 11:29:44 -0500514 GR_VK_CALL_RESULT(gpu, err, CreateFence(gpu->device(), &fenceInfo, nullptr,
515 &fSubmitFence));
516 if (err) {
517 fSubmitFence = VK_NULL_HANDLE;
518 return false;
519 }
jvanverth7ec92412016-07-06 09:24:57 -0700520 } else {
Greg Daniele1185582019-12-04 11:29:44 -0500521 // This cannot return DEVICE_LOST so we assert we succeeded.
522 GR_VK_CALL_RESULT(gpu, err, ResetFences(gpu->device(), 1, &fSubmitFence));
523 SkASSERT(err == VK_SUCCESS);
jvanverth7ec92412016-07-06 09:24:57 -0700524 }
egdaniel9a6cf802016-06-08 08:22:05 -0700525
Greg Daniela5cb7812017-06-16 09:45:32 -0400526 int signalCount = signalSemaphores.count();
Greg Daniel6be35232017-03-01 17:01:09 -0500527 int waitCount = waitSemaphores.count();
Greg Daniel6be35232017-03-01 17:01:09 -0500528
Greg Daniele1185582019-12-04 11:29:44 -0500529 bool submitted = false;
530
Greg Daniel48661b82018-01-22 16:11:35 -0500531 if (0 == signalCount && 0 == waitCount) {
532 // This command buffer has no dependent semaphores so we can simply just submit it to the
533 // queue with no worries.
Greg Daniele1185582019-12-04 11:29:44 -0500534 submitted = submit_to_queue(
535 gpu, queue, fSubmitFence, 0, nullptr, nullptr, 1, &fCmdBuffer, 0, nullptr,
536 gpu->protectedContext() ? GrProtected::kYes : GrProtected::kNo);
Greg Daniel48661b82018-01-22 16:11:35 -0500537 } else {
Greg Daniel48661b82018-01-22 16:11:35 -0500538 SkTArray<VkSemaphore> vkSignalSems(signalCount);
539 for (int i = 0; i < signalCount; ++i) {
540 if (signalSemaphores[i]->shouldSignal()) {
541 this->addResource(signalSemaphores[i]);
542 vkSignalSems.push_back(signalSemaphores[i]->semaphore());
543 }
544 }
545
546 SkTArray<VkSemaphore> vkWaitSems(waitCount);
547 SkTArray<VkPipelineStageFlags> vkWaitStages(waitCount);
548 for (int i = 0; i < waitCount; ++i) {
549 if (waitSemaphores[i]->shouldWait()) {
550 this->addResource(waitSemaphores[i]);
551 vkWaitSems.push_back(waitSemaphores[i]->semaphore());
552 vkWaitStages.push_back(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
553 }
554 }
Greg Daniele1185582019-12-04 11:29:44 -0500555 submitted = submit_to_queue(gpu, queue, fSubmitFence, vkWaitSems.count(),
556 vkWaitSems.begin(), vkWaitStages.begin(), 1, &fCmdBuffer,
557 vkSignalSems.count(), vkSignalSems.begin(),
558 gpu->protectedContext() ? GrProtected::kYes : GrProtected::kNo);
559 if (submitted) {
560 for (int i = 0; i < signalCount; ++i) {
561 signalSemaphores[i]->markAsSignaled();
562 }
563 for (int i = 0; i < waitCount; ++i) {
564 waitSemaphores[i]->markAsWaited();
565 }
Greg Daniel48661b82018-01-22 16:11:35 -0500566 }
Greg Daniel48661b82018-01-22 16:11:35 -0500567 }
egdaniel9a6cf802016-06-08 08:22:05 -0700568
Greg Daniele1185582019-12-04 11:29:44 -0500569 if (!submitted) {
570 // Destroy the fence or else we will try to wait forever for it to finish.
egdaniel9a6cf802016-06-08 08:22:05 -0700571 GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
572 fSubmitFence = VK_NULL_HANDLE;
Greg Daniele1185582019-12-04 11:29:44 -0500573 return false;
egdaniel9a6cf802016-06-08 08:22:05 -0700574 }
Greg Daniele1185582019-12-04 11:29:44 -0500575 return true;
egdaniel9a6cf802016-06-08 08:22:05 -0700576}
577
Greg Daniele1185582019-12-04 11:29:44 -0500578void GrVkPrimaryCommandBuffer::forceSync(GrVkGpu* gpu) {
579 SkASSERT(fSubmitFence != VK_NULL_HANDLE);
580 GR_VK_CALL_ERRCHECK(gpu, WaitForFences(gpu->device(), 1, &fSubmitFence, true, UINT64_MAX));
581}
582
583bool GrVkPrimaryCommandBuffer::finished(GrVkGpu* gpu) {
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500584 SkASSERT(!fIsActive);
egdaniel9a6cf802016-06-08 08:22:05 -0700585 if (VK_NULL_HANDLE == fSubmitFence) {
586 return true;
587 }
588
Greg Daniele1185582019-12-04 11:29:44 -0500589 VkResult err;
590 GR_VK_CALL_RESULT_NOCHECK(gpu, err, GetFenceStatus(gpu->device(), fSubmitFence));
egdaniel9a6cf802016-06-08 08:22:05 -0700591 switch (err) {
592 case VK_SUCCESS:
Greg Daniele1185582019-12-04 11:29:44 -0500593 case VK_ERROR_DEVICE_LOST:
egdaniel9a6cf802016-06-08 08:22:05 -0700594 return true;
595
596 case VK_NOT_READY:
597 return false;
598
599 default:
600 SkDebugf("Error getting fence status: %d\n", err);
Greg Daniele1185582019-12-04 11:29:44 -0500601 SK_ABORT("Got an invalid fence status");
602 return false;
egdaniel9a6cf802016-06-08 08:22:05 -0700603 }
egdaniel9a6cf802016-06-08 08:22:05 -0700604}
605
Greg Daniela3aa75a2019-04-12 14:24:55 -0400606void GrVkPrimaryCommandBuffer::addFinishedProc(sk_sp<GrRefCntedCallback> finishedProc) {
607 fFinishedProcs.push_back(std::move(finishedProc));
608}
609
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500610void GrVkPrimaryCommandBuffer::onReleaseResources(GrVkGpu* gpu) {
jvanverth7ec92412016-07-06 09:24:57 -0700611 for (int i = 0; i < fSecondaryCommandBuffers.count(); ++i) {
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500612 fSecondaryCommandBuffers[i]->releaseResources(gpu);
613 }
Brian Salomonab32f652019-05-10 14:24:50 -0400614 fFinishedProcs.reset();
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500615}
616
Greg Daniel0addbdf2019-11-25 15:03:58 -0500617void GrVkPrimaryCommandBuffer::recycleSecondaryCommandBuffers(GrVkCommandPool* cmdPool) {
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500618 for (int i = 0; i < fSecondaryCommandBuffers.count(); ++i) {
Greg Daniel0addbdf2019-11-25 15:03:58 -0500619 fSecondaryCommandBuffers[i].release()->recycle(cmdPool);
jvanverth7ec92412016-07-06 09:24:57 -0700620 }
621 fSecondaryCommandBuffers.reset();
622}
623
egdaniel9a6cf802016-06-08 08:22:05 -0700624void GrVkPrimaryCommandBuffer::copyImage(const GrVkGpu* gpu,
625 GrVkImage* srcImage,
626 VkImageLayout srcLayout,
627 GrVkImage* dstImage,
628 VkImageLayout dstLayout,
629 uint32_t copyRegionCount,
630 const VkImageCopy* copyRegions) {
631 SkASSERT(fIsActive);
632 SkASSERT(!fActiveRenderPass);
Greg Danielee54f232019-04-03 14:58:40 -0400633 this->addingWork(gpu);
egdaniel9a6cf802016-06-08 08:22:05 -0700634 this->addResource(srcImage->resource());
635 this->addResource(dstImage->resource());
636 GR_VK_CALL(gpu->vkInterface(), CmdCopyImage(fCmdBuffer,
637 srcImage->image(),
638 srcLayout,
639 dstImage->image(),
640 dstLayout,
641 copyRegionCount,
642 copyRegions));
643}
644
645void GrVkPrimaryCommandBuffer::blitImage(const GrVkGpu* gpu,
646 const GrVkResource* srcResource,
647 VkImage srcImage,
648 VkImageLayout srcLayout,
649 const GrVkResource* dstResource,
650 VkImage dstImage,
651 VkImageLayout dstLayout,
652 uint32_t blitRegionCount,
653 const VkImageBlit* blitRegions,
654 VkFilter filter) {
655 SkASSERT(fIsActive);
656 SkASSERT(!fActiveRenderPass);
Greg Danielee54f232019-04-03 14:58:40 -0400657 this->addingWork(gpu);
egdaniel9a6cf802016-06-08 08:22:05 -0700658 this->addResource(srcResource);
659 this->addResource(dstResource);
660 GR_VK_CALL(gpu->vkInterface(), CmdBlitImage(fCmdBuffer,
661 srcImage,
662 srcLayout,
663 dstImage,
664 dstLayout,
665 blitRegionCount,
666 blitRegions,
667 filter));
668}
669
Greg Daniel6ecc9112017-06-16 16:17:03 +0000670void GrVkPrimaryCommandBuffer::blitImage(const GrVkGpu* gpu,
671 const GrVkImage& srcImage,
672 const GrVkImage& dstImage,
673 uint32_t blitRegionCount,
674 const VkImageBlit* blitRegions,
675 VkFilter filter) {
676 this->blitImage(gpu,
677 srcImage.resource(),
678 srcImage.image(),
679 srcImage.currentLayout(),
680 dstImage.resource(),
681 dstImage.image(),
682 dstImage.currentLayout(),
683 blitRegionCount,
684 blitRegions,
685 filter);
686}
687
688
egdaniel9a6cf802016-06-08 08:22:05 -0700689void GrVkPrimaryCommandBuffer::copyImageToBuffer(const GrVkGpu* gpu,
690 GrVkImage* srcImage,
691 VkImageLayout srcLayout,
692 GrVkTransferBuffer* dstBuffer,
693 uint32_t copyRegionCount,
694 const VkBufferImageCopy* copyRegions) {
695 SkASSERT(fIsActive);
696 SkASSERT(!fActiveRenderPass);
Greg Danielee54f232019-04-03 14:58:40 -0400697 this->addingWork(gpu);
egdaniel9a6cf802016-06-08 08:22:05 -0700698 this->addResource(srcImage->resource());
699 this->addResource(dstBuffer->resource());
700 GR_VK_CALL(gpu->vkInterface(), CmdCopyImageToBuffer(fCmdBuffer,
701 srcImage->image(),
702 srcLayout,
703 dstBuffer->buffer(),
704 copyRegionCount,
705 copyRegions));
706}
707
708void GrVkPrimaryCommandBuffer::copyBufferToImage(const GrVkGpu* gpu,
709 GrVkTransferBuffer* srcBuffer,
710 GrVkImage* dstImage,
711 VkImageLayout dstLayout,
712 uint32_t copyRegionCount,
713 const VkBufferImageCopy* copyRegions) {
714 SkASSERT(fIsActive);
715 SkASSERT(!fActiveRenderPass);
Greg Danielee54f232019-04-03 14:58:40 -0400716 this->addingWork(gpu);
egdaniel9a6cf802016-06-08 08:22:05 -0700717 this->addResource(srcBuffer->resource());
718 this->addResource(dstImage->resource());
719 GR_VK_CALL(gpu->vkInterface(), CmdCopyBufferToImage(fCmdBuffer,
720 srcBuffer->buffer(),
721 dstImage->image(),
722 dstLayout,
723 copyRegionCount,
724 copyRegions));
725}
726
Greg Daniel6888c0d2017-08-25 11:55:50 -0400727
728void GrVkPrimaryCommandBuffer::copyBuffer(GrVkGpu* gpu,
729 GrVkBuffer* srcBuffer,
730 GrVkBuffer* dstBuffer,
731 uint32_t regionCount,
732 const VkBufferCopy* regions) {
733 SkASSERT(fIsActive);
734 SkASSERT(!fActiveRenderPass);
Greg Danielee54f232019-04-03 14:58:40 -0400735 this->addingWork(gpu);
Greg Daniel6888c0d2017-08-25 11:55:50 -0400736#ifdef SK_DEBUG
737 for (uint32_t i = 0; i < regionCount; ++i) {
738 const VkBufferCopy& region = regions[i];
739 SkASSERT(region.size > 0);
740 SkASSERT(region.srcOffset < srcBuffer->size());
741 SkASSERT(region.dstOffset < dstBuffer->size());
742 SkASSERT(region.srcOffset + region.size <= srcBuffer->size());
743 SkASSERT(region.dstOffset + region.size <= dstBuffer->size());
744 }
745#endif
746 this->addResource(srcBuffer->resource());
747 this->addResource(dstBuffer->resource());
748 GR_VK_CALL(gpu->vkInterface(), CmdCopyBuffer(fCmdBuffer,
749 srcBuffer->buffer(),
750 dstBuffer->buffer(),
751 regionCount,
752 regions));
753}
754
jvanvertha584de92016-06-30 09:10:52 -0700755void GrVkPrimaryCommandBuffer::updateBuffer(GrVkGpu* gpu,
756 GrVkBuffer* dstBuffer,
757 VkDeviceSize dstOffset,
758 VkDeviceSize dataSize,
759 const void* data) {
760 SkASSERT(fIsActive);
761 SkASSERT(!fActiveRenderPass);
762 SkASSERT(0 == (dstOffset & 0x03)); // four byte aligned
763 // TODO: handle larger transfer sizes
764 SkASSERT(dataSize <= 65536);
765 SkASSERT(0 == (dataSize & 0x03)); // four byte aligned
Greg Danielee54f232019-04-03 14:58:40 -0400766 this->addingWork(gpu);
jvanvertha584de92016-06-30 09:10:52 -0700767 this->addResource(dstBuffer->resource());
768 GR_VK_CALL(gpu->vkInterface(), CmdUpdateBuffer(fCmdBuffer,
769 dstBuffer->buffer(),
770 dstOffset,
771 dataSize,
772 (const uint32_t*) data));
773}
774
egdaniel9a6cf802016-06-08 08:22:05 -0700775void GrVkPrimaryCommandBuffer::clearColorImage(const GrVkGpu* gpu,
776 GrVkImage* image,
777 const VkClearColorValue* color,
778 uint32_t subRangeCount,
779 const VkImageSubresourceRange* subRanges) {
780 SkASSERT(fIsActive);
781 SkASSERT(!fActiveRenderPass);
Greg Danielee54f232019-04-03 14:58:40 -0400782 this->addingWork(gpu);
egdaniel9a6cf802016-06-08 08:22:05 -0700783 this->addResource(image->resource());
784 GR_VK_CALL(gpu->vkInterface(), CmdClearColorImage(fCmdBuffer,
785 image->image(),
786 image->currentLayout(),
787 color,
788 subRangeCount,
789 subRanges));
790}
791
792void GrVkPrimaryCommandBuffer::clearDepthStencilImage(const GrVkGpu* gpu,
793 GrVkImage* image,
794 const VkClearDepthStencilValue* color,
795 uint32_t subRangeCount,
796 const VkImageSubresourceRange* subRanges) {
797 SkASSERT(fIsActive);
798 SkASSERT(!fActiveRenderPass);
Greg Danielee54f232019-04-03 14:58:40 -0400799 this->addingWork(gpu);
egdaniel9a6cf802016-06-08 08:22:05 -0700800 this->addResource(image->resource());
801 GR_VK_CALL(gpu->vkInterface(), CmdClearDepthStencilImage(fCmdBuffer,
802 image->image(),
803 image->currentLayout(),
804 color,
805 subRangeCount,
806 subRanges));
807}
808
egdaniel52ad2512016-08-04 12:50:01 -0700809void GrVkPrimaryCommandBuffer::resolveImage(GrVkGpu* gpu,
810 const GrVkImage& srcImage,
811 const GrVkImage& dstImage,
812 uint32_t regionCount,
813 const VkImageResolve* regions) {
814 SkASSERT(fIsActive);
815 SkASSERT(!fActiveRenderPass);
816
Greg Danielee54f232019-04-03 14:58:40 -0400817 this->addingWork(gpu);
egdaniel52ad2512016-08-04 12:50:01 -0700818 this->addResource(srcImage.resource());
819 this->addResource(dstImage.resource());
820
821 GR_VK_CALL(gpu->vkInterface(), CmdResolveImage(fCmdBuffer,
822 srcImage.image(),
823 srcImage.currentLayout(),
824 dstImage.image(),
825 dstImage.currentLayout(),
826 regionCount,
827 regions));
828}
829
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500830void GrVkPrimaryCommandBuffer::onFreeGPUData(GrVkGpu* gpu) const {
egdaniel9cb63402016-06-23 08:37:05 -0700831 SkASSERT(!fActiveRenderPass);
832 // Destroy the fence, if any
833 if (VK_NULL_HANDLE != fSubmitFence) {
834 GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
835 }
Greg Daniel0addbdf2019-11-25 15:03:58 -0500836 SkASSERT(!fSecondaryCommandBuffers.count());
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500837}
838
egdaniel9a6cf802016-06-08 08:22:05 -0700839///////////////////////////////////////////////////////////////////////////////
840// SecondaryCommandBuffer
841////////////////////////////////////////////////////////////////////////////////
842
Greg Daniel315c8dc2019-11-26 15:41:27 -0500843GrVkSecondaryCommandBuffer* GrVkSecondaryCommandBuffer::Create(GrVkGpu* gpu,
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500844 GrVkCommandPool* cmdPool) {
Greg Daniel070cbaf2019-01-03 17:35:54 -0500845 SkASSERT(cmdPool);
egdaniel9a6cf802016-06-08 08:22:05 -0700846 const VkCommandBufferAllocateInfo cmdInfo = {
847 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
Ben Wagnera93a14a2017-08-28 10:34:05 -0400848 nullptr, // pNext
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500849 cmdPool->vkCommandPool(), // commandPool
egdaniel9a6cf802016-06-08 08:22:05 -0700850 VK_COMMAND_BUFFER_LEVEL_SECONDARY, // level
851 1 // bufferCount
852 };
853
854 VkCommandBuffer cmdBuffer;
Greg Daniel315c8dc2019-11-26 15:41:27 -0500855 VkResult err;
856 GR_VK_CALL_RESULT(gpu, err, AllocateCommandBuffers(gpu->device(), &cmdInfo, &cmdBuffer));
egdaniel9a6cf802016-06-08 08:22:05 -0700857 if (err) {
858 return nullptr;
859 }
Greg Daniel0addbdf2019-11-25 15:03:58 -0500860 return new GrVkSecondaryCommandBuffer(cmdBuffer, false);
egdaniel9a6cf802016-06-08 08:22:05 -0700861}
862
Greg Daniel070cbaf2019-01-03 17:35:54 -0500863GrVkSecondaryCommandBuffer* GrVkSecondaryCommandBuffer::Create(VkCommandBuffer cmdBuffer) {
Greg Daniel0addbdf2019-11-25 15:03:58 -0500864 return new GrVkSecondaryCommandBuffer(cmdBuffer, true);
Greg Daniel070cbaf2019-01-03 17:35:54 -0500865}
egdaniel9a6cf802016-06-08 08:22:05 -0700866
Greg Daniele643da62019-11-05 12:36:42 -0500867void GrVkSecondaryCommandBuffer::begin(GrVkGpu* gpu, const GrVkFramebuffer* framebuffer,
jvanverth7ec92412016-07-06 09:24:57 -0700868 const GrVkRenderPass* compatibleRenderPass) {
egdaniel9a6cf802016-06-08 08:22:05 -0700869 SkASSERT(!fIsActive);
jvanverth7ec92412016-07-06 09:24:57 -0700870 SkASSERT(compatibleRenderPass);
871 fActiveRenderPass = compatibleRenderPass;
egdaniel9a6cf802016-06-08 08:22:05 -0700872
Greg Daniel070cbaf2019-01-03 17:35:54 -0500873 if (!this->isWrapped()) {
874 VkCommandBufferInheritanceInfo inheritanceInfo;
875 memset(&inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
876 inheritanceInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
877 inheritanceInfo.pNext = nullptr;
878 inheritanceInfo.renderPass = fActiveRenderPass->vkRenderPass();
879 inheritanceInfo.subpass = 0; // Currently only using 1 subpass for each render pass
880 inheritanceInfo.framebuffer = framebuffer ? framebuffer->framebuffer() : VK_NULL_HANDLE;
881 inheritanceInfo.occlusionQueryEnable = false;
882 inheritanceInfo.queryFlags = 0;
883 inheritanceInfo.pipelineStatistics = 0;
egdaniel9a6cf802016-06-08 08:22:05 -0700884
Greg Daniel070cbaf2019-01-03 17:35:54 -0500885 VkCommandBufferBeginInfo cmdBufferBeginInfo;
886 memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
887 cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
888 cmdBufferBeginInfo.pNext = nullptr;
889 cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT |
890 VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
891 cmdBufferBeginInfo.pInheritanceInfo = &inheritanceInfo;
egdaniel9a6cf802016-06-08 08:22:05 -0700892
Greg Daniele643da62019-11-05 12:36:42 -0500893 GR_VK_CALL_ERRCHECK(gpu, BeginCommandBuffer(fCmdBuffer, &cmdBufferBeginInfo));
Greg Daniel070cbaf2019-01-03 17:35:54 -0500894 }
egdaniel9a6cf802016-06-08 08:22:05 -0700895 fIsActive = true;
896}
897
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500898void GrVkSecondaryCommandBuffer::end(GrVkGpu* gpu) {
egdaniel9a6cf802016-06-08 08:22:05 -0700899 SkASSERT(fIsActive);
Greg Daniel070cbaf2019-01-03 17:35:54 -0500900 if (!this->isWrapped()) {
Greg Daniele643da62019-11-05 12:36:42 -0500901 GR_VK_CALL_ERRCHECK(gpu, EndCommandBuffer(fCmdBuffer));
Greg Daniel070cbaf2019-01-03 17:35:54 -0500902 }
egdaniel9a6cf802016-06-08 08:22:05 -0700903 this->invalidateState();
904 fIsActive = false;
Robert Phillips04d2ce22019-04-03 13:20:43 -0400905 fHasWork = false;
egdaniel9a6cf802016-06-08 08:22:05 -0700906}
Greg Daniel8daf3b72019-07-30 09:57:26 -0400907
Greg Daniel0addbdf2019-11-25 15:03:58 -0500908void GrVkSecondaryCommandBuffer::recycle(GrVkCommandPool* cmdPool) {
Greg Daniel8daf3b72019-07-30 09:57:26 -0400909 if (this->isWrapped()) {
Greg Daniel8daf3b72019-07-30 09:57:26 -0400910 delete this;
911 } else {
Greg Daniel0addbdf2019-11-25 15:03:58 -0500912 cmdPool->recycleSecondaryCommandBuffer(this);
Greg Daniel8daf3b72019-07-30 09:57:26 -0400913 }
914}
915