blob: 09e8aba15d9601603c3a03c7b8f1974277595844 [file] [log] [blame]
Greg Daniel164a9f02016-02-22 09:56:40 -05001/*
2* Copyright 2015 Google Inc.
3*
4* Use of this source code is governed by a BSD-style license that can be
5* found in the LICENSE file.
6*/
7
8#include "GrVkCommandBuffer.h"
9
Ethan Nicholas8e265a72018-12-12 16:22:40 -050010#include "GrVkCommandPool.h"
Greg Daniel6ecc9112017-06-16 16:17:03 +000011#include "GrVkGpu.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050012#include "GrVkFramebuffer.h"
Greg Daniel6ecc9112017-06-16 16:17:03 +000013#include "GrVkImage.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050014#include "GrVkImageView.h"
Greg Daniel6ecc9112017-06-16 16:17:03 +000015#include "GrVkIndexBuffer.h"
egdaniel470d77a2016-03-18 12:50:27 -070016#include "GrVkPipeline.h"
Greg Daniel6ecc9112017-06-16 16:17:03 +000017#include "GrVkPipelineState.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050018#include "GrVkRenderPass.h"
19#include "GrVkRenderTarget.h"
Greg Daniel7d918fd2018-06-19 15:22:01 -040020#include "GrVkPipelineLayout.h"
egdaniel22281c12016-03-23 13:49:40 -070021#include "GrVkPipelineState.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050022#include "GrVkTransferBuffer.h"
23#include "GrVkUtil.h"
Greg Daniel6ecc9112017-06-16 16:17:03 +000024#include "GrVkVertexBuffer.h"
egdaniel9cb63402016-06-23 08:37:05 -070025#include "SkRect.h"
Greg Daniel164a9f02016-02-22 09:56:40 -050026
27void GrVkCommandBuffer::invalidateState() {
Chris Dalton1d616352017-05-31 12:51:23 -060028 for (auto& boundInputBuffer : fBoundInputBuffers) {
29 boundInputBuffer = VK_NULL_HANDLE;
30 }
egdaniel470d77a2016-03-18 12:50:27 -070031 fBoundIndexBuffer = VK_NULL_HANDLE;
egdaniel470d77a2016-03-18 12:50:27 -070032
33 memset(&fCachedViewport, 0, sizeof(VkViewport));
34 fCachedViewport.width = - 1.0f; // Viewport must have a width greater than 0
35
36 memset(&fCachedScissor, 0, sizeof(VkRect2D));
37 fCachedScissor.offset.x = -1; // Scissor offset must be greater that 0 to be valid
38
39 for (int i = 0; i < 4; ++i) {
40 fCachedBlendConstant[i] = -1.0;
41 }
Greg Daniel164a9f02016-02-22 09:56:40 -050042}
43
Ethan Nicholas8e265a72018-12-12 16:22:40 -050044void GrVkCommandBuffer::freeGPUData(GrVkGpu* gpu) const {
Greg Daniel164a9f02016-02-22 09:56:40 -050045 SkASSERT(!fIsActive);
Greg Daniel164a9f02016-02-22 09:56:40 -050046 for (int i = 0; i < fTrackedResources.count(); ++i) {
Brian Salomon614c1a82018-12-19 15:42:06 -050047 fTrackedResources[i]->notifyRemovedFromCommandBuffer();
Greg Daniel164a9f02016-02-22 09:56:40 -050048 fTrackedResources[i]->unref(gpu);
49 }
halcanary9d524f22016-03-29 09:03:52 -070050
egdanielc1be9bc2016-07-20 08:33:00 -070051 for (int i = 0; i < fTrackedRecycledResources.count(); ++i) {
Brian Salomon614c1a82018-12-19 15:42:06 -050052 fTrackedRecycledResources[i]->notifyRemovedFromCommandBuffer();
egdanielc1be9bc2016-07-20 08:33:00 -070053 fTrackedRecycledResources[i]->recycle(const_cast<GrVkGpu*>(gpu));
54 }
55
Greg Daniel7d918fd2018-06-19 15:22:01 -040056 for (int i = 0; i < fTrackedRecordingResources.count(); ++i) {
Brian Salomon614c1a82018-12-19 15:42:06 -050057 fTrackedRecordingResources[i]->notifyRemovedFromCommandBuffer();
Greg Daniel7d918fd2018-06-19 15:22:01 -040058 fTrackedRecordingResources[i]->unref(gpu);
59 }
60
Greg Daniel070cbaf2019-01-03 17:35:54 -050061 if (!this->isWrapped()) {
62 GR_VK_CALL(gpu->vkInterface(), FreeCommandBuffers(gpu->device(), fCmdPool->vkCommandPool(),
63 1, &fCmdBuffer));
64 }
egdaniel9cb63402016-06-23 08:37:05 -070065
66 this->onFreeGPUData(gpu);
Greg Daniel164a9f02016-02-22 09:56:40 -050067}
68
Greg Danielcef213c2017-04-21 11:52:27 -040069void GrVkCommandBuffer::abandonGPUData() const {
Ethan Nicholas8e265a72018-12-12 16:22:40 -050070 SkDEBUGCODE(fResourcesReleased = true;)
Greg Daniel164a9f02016-02-22 09:56:40 -050071 for (int i = 0; i < fTrackedResources.count(); ++i) {
Brian Salomon614c1a82018-12-19 15:42:06 -050072 fTrackedResources[i]->notifyRemovedFromCommandBuffer();
Greg Daniel164a9f02016-02-22 09:56:40 -050073 fTrackedResources[i]->unrefAndAbandon();
74 }
egdanielc1be9bc2016-07-20 08:33:00 -070075
76 for (int i = 0; i < fTrackedRecycledResources.count(); ++i) {
Brian Salomon614c1a82018-12-19 15:42:06 -050077 fTrackedRecycledResources[i]->notifyRemovedFromCommandBuffer();
egdanielc1be9bc2016-07-20 08:33:00 -070078 // We don't recycle resources when abandoning them.
79 fTrackedRecycledResources[i]->unrefAndAbandon();
80 }
Greg Daniel7d918fd2018-06-19 15:22:01 -040081
82 for (int i = 0; i < fTrackedRecordingResources.count(); ++i) {
Brian Salomon614c1a82018-12-19 15:42:06 -050083 fTrackedRecordingResources[i]->notifyRemovedFromCommandBuffer();
Greg Daniel7d918fd2018-06-19 15:22:01 -040084 fTrackedRecordingResources[i]->unrefAndAbandon();
85 }
Ethan Nicholas8e265a72018-12-12 16:22:40 -050086
87 this->onAbandonGPUData();
Greg Daniel164a9f02016-02-22 09:56:40 -050088}
89
Ethan Nicholas8e265a72018-12-12 16:22:40 -050090void GrVkCommandBuffer::releaseResources(GrVkGpu* gpu) {
91 SkDEBUGCODE(fResourcesReleased = true;)
jvanverth7ec92412016-07-06 09:24:57 -070092 SkASSERT(!fIsActive);
93 for (int i = 0; i < fTrackedResources.count(); ++i) {
Brian Salomon614c1a82018-12-19 15:42:06 -050094 fTrackedResources[i]->notifyRemovedFromCommandBuffer();
jvanverth7ec92412016-07-06 09:24:57 -070095 fTrackedResources[i]->unref(gpu);
96 }
egdanielc1be9bc2016-07-20 08:33:00 -070097 for (int i = 0; i < fTrackedRecycledResources.count(); ++i) {
Brian Salomon614c1a82018-12-19 15:42:06 -050098 fTrackedRecycledResources[i]->notifyRemovedFromCommandBuffer();
egdanielc1be9bc2016-07-20 08:33:00 -070099 fTrackedRecycledResources[i]->recycle(const_cast<GrVkGpu*>(gpu));
100 }
egdaniel594739c2016-09-20 12:39:25 -0700101
Greg Daniel7d918fd2018-06-19 15:22:01 -0400102 for (int i = 0; i < fTrackedRecordingResources.count(); ++i) {
Brian Salomon614c1a82018-12-19 15:42:06 -0500103 fTrackedRecordingResources[i]->notifyRemovedFromCommandBuffer();
Greg Daniel7d918fd2018-06-19 15:22:01 -0400104 fTrackedRecordingResources[i]->unref(gpu);
105 }
106
egdaniel594739c2016-09-20 12:39:25 -0700107 if (++fNumResets > kNumRewindResetsBeforeFullReset) {
108 fTrackedResources.reset();
109 fTrackedRecycledResources.reset();
Greg Daniel7d918fd2018-06-19 15:22:01 -0400110 fTrackedRecordingResources.reset();
egdaniel594739c2016-09-20 12:39:25 -0700111 fTrackedResources.setReserve(kInitialTrackedResourcesCount);
112 fTrackedRecycledResources.setReserve(kInitialTrackedResourcesCount);
Greg Daniel7d918fd2018-06-19 15:22:01 -0400113 fTrackedRecordingResources.setReserve(kInitialTrackedResourcesCount);
egdaniel594739c2016-09-20 12:39:25 -0700114 fNumResets = 0;
115 } else {
116 fTrackedResources.rewind();
117 fTrackedRecycledResources.rewind();
Greg Daniel7d918fd2018-06-19 15:22:01 -0400118 fTrackedRecordingResources.rewind();
egdaniel594739c2016-09-20 12:39:25 -0700119 }
120
jvanverth7ec92412016-07-06 09:24:57 -0700121 this->invalidateState();
122
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500123 this->onReleaseResources(gpu);
jvanverth7ec92412016-07-06 09:24:57 -0700124}
125
Greg Daniel164a9f02016-02-22 09:56:40 -0500126////////////////////////////////////////////////////////////////////////////////
127// CommandBuffer commands
128////////////////////////////////////////////////////////////////////////////////
129
130void GrVkCommandBuffer::pipelineBarrier(const GrVkGpu* gpu,
131 VkPipelineStageFlags srcStageMask,
132 VkPipelineStageFlags dstStageMask,
133 bool byRegion,
134 BarrierType barrierType,
135 void* barrier) const {
Greg Daniel070cbaf2019-01-03 17:35:54 -0500136 SkASSERT(!this->isWrapped());
Greg Daniel164a9f02016-02-22 09:56:40 -0500137 SkASSERT(fIsActive);
egdaniel58a8d922016-04-21 08:03:10 -0700138 // For images we can have barriers inside of render passes but they require us to add more
139 // support in subpasses which need self dependencies to have barriers inside them. Also, we can
140 // never have buffer barriers inside of a render pass. For now we will just assert that we are
141 // not in a render pass.
142 SkASSERT(!fActiveRenderPass);
Greg Daniel164a9f02016-02-22 09:56:40 -0500143 VkDependencyFlags dependencyFlags = byRegion ? VK_DEPENDENCY_BY_REGION_BIT : 0;
144
145 switch (barrierType) {
146 case kMemory_BarrierType: {
147 const VkMemoryBarrier* barrierPtr = reinterpret_cast<VkMemoryBarrier*>(barrier);
148 GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(fCmdBuffer, srcStageMask,
149 dstStageMask, dependencyFlags,
150 1, barrierPtr,
151 0, nullptr,
152 0, nullptr));
153 break;
154 }
155
156 case kBufferMemory_BarrierType: {
157 const VkBufferMemoryBarrier* barrierPtr =
158 reinterpret_cast<VkBufferMemoryBarrier*>(barrier);
159 GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(fCmdBuffer, srcStageMask,
160 dstStageMask, dependencyFlags,
161 0, nullptr,
162 1, barrierPtr,
163 0, nullptr));
164 break;
165 }
166
167 case kImageMemory_BarrierType: {
168 const VkImageMemoryBarrier* barrierPtr =
169 reinterpret_cast<VkImageMemoryBarrier*>(barrier);
170 GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(fCmdBuffer, srcStageMask,
171 dstStageMask, dependencyFlags,
172 0, nullptr,
173 0, nullptr,
174 1, barrierPtr));
175 break;
176 }
177 }
178
179}
180
Greg Daniel6ecc9112017-06-16 16:17:03 +0000181void GrVkCommandBuffer::bindInputBuffer(GrVkGpu* gpu, uint32_t binding,
182 const GrVkVertexBuffer* vbuffer) {
183 VkBuffer vkBuffer = vbuffer->buffer();
184 SkASSERT(VK_NULL_HANDLE != vkBuffer);
185 SkASSERT(binding < kMaxInputBuffers);
186 // TODO: once vbuffer->offset() no longer always returns 0, we will need to track the offset
187 // to know if we can skip binding or not.
188 if (vkBuffer != fBoundInputBuffers[binding]) {
189 VkDeviceSize offset = vbuffer->offset();
190 GR_VK_CALL(gpu->vkInterface(), CmdBindVertexBuffers(fCmdBuffer,
191 binding,
192 1,
193 &vkBuffer,
194 &offset));
195 fBoundInputBuffers[binding] = vkBuffer;
196 addResource(vbuffer->resource());
197 }
198}
199
200void GrVkCommandBuffer::bindIndexBuffer(GrVkGpu* gpu, const GrVkIndexBuffer* ibuffer) {
201 VkBuffer vkBuffer = ibuffer->buffer();
202 SkASSERT(VK_NULL_HANDLE != vkBuffer);
203 // TODO: once ibuffer->offset() no longer always returns 0, we will need to track the offset
204 // to know if we can skip binding or not.
205 if (vkBuffer != fBoundIndexBuffer) {
206 GR_VK_CALL(gpu->vkInterface(), CmdBindIndexBuffer(fCmdBuffer,
207 vkBuffer,
208 ibuffer->offset(),
209 VK_INDEX_TYPE_UINT16));
210 fBoundIndexBuffer = vkBuffer;
211 addResource(ibuffer->resource());
212 }
213}
214
Greg Daniel164a9f02016-02-22 09:56:40 -0500215void GrVkCommandBuffer::clearAttachments(const GrVkGpu* gpu,
216 int numAttachments,
217 const VkClearAttachment* attachments,
218 int numRects,
219 const VkClearRect* clearRects) const {
220 SkASSERT(fIsActive);
221 SkASSERT(fActiveRenderPass);
222 SkASSERT(numAttachments > 0);
223 SkASSERT(numRects > 0);
224#ifdef SK_DEBUG
225 for (int i = 0; i < numAttachments; ++i) {
226 if (attachments[i].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) {
227 uint32_t testIndex;
228 SkAssertResult(fActiveRenderPass->colorAttachmentIndex(&testIndex));
229 SkASSERT(testIndex == attachments[i].colorAttachment);
230 }
231 }
232#endif
233 GR_VK_CALL(gpu->vkInterface(), CmdClearAttachments(fCmdBuffer,
234 numAttachments,
235 attachments,
236 numRects,
237 clearRects));
238}
239
240void GrVkCommandBuffer::bindDescriptorSets(const GrVkGpu* gpu,
egdaniel22281c12016-03-23 13:49:40 -0700241 GrVkPipelineState* pipelineState,
Greg Daniel7d918fd2018-06-19 15:22:01 -0400242 GrVkPipelineLayout* layout,
Greg Daniel164a9f02016-02-22 09:56:40 -0500243 uint32_t firstSet,
244 uint32_t setCount,
245 const VkDescriptorSet* descriptorSets,
246 uint32_t dynamicOffsetCount,
247 const uint32_t* dynamicOffsets) {
248 SkASSERT(fIsActive);
249 GR_VK_CALL(gpu->vkInterface(), CmdBindDescriptorSets(fCmdBuffer,
250 VK_PIPELINE_BIND_POINT_GRAPHICS,
Greg Daniel7d918fd2018-06-19 15:22:01 -0400251 layout->layout(),
Greg Daniel164a9f02016-02-22 09:56:40 -0500252 firstSet,
253 setCount,
254 descriptorSets,
255 dynamicOffsetCount,
256 dynamicOffsets));
Greg Daniel7d918fd2018-06-19 15:22:01 -0400257 this->addRecordingResource(layout);
Greg Daniel164a9f02016-02-22 09:56:40 -0500258}
259
egdanielbc9b2962016-09-27 08:00:53 -0700260void GrVkCommandBuffer::bindDescriptorSets(const GrVkGpu* gpu,
261 const SkTArray<const GrVkRecycledResource*>& recycled,
262 const SkTArray<const GrVkResource*>& resources,
Greg Daniel7d918fd2018-06-19 15:22:01 -0400263 GrVkPipelineLayout* layout,
egdanielbc9b2962016-09-27 08:00:53 -0700264 uint32_t firstSet,
265 uint32_t setCount,
266 const VkDescriptorSet* descriptorSets,
267 uint32_t dynamicOffsetCount,
268 const uint32_t* dynamicOffsets) {
269 SkASSERT(fIsActive);
270 GR_VK_CALL(gpu->vkInterface(), CmdBindDescriptorSets(fCmdBuffer,
271 VK_PIPELINE_BIND_POINT_GRAPHICS,
Greg Daniel7d918fd2018-06-19 15:22:01 -0400272 layout->layout(),
egdanielbc9b2962016-09-27 08:00:53 -0700273 firstSet,
274 setCount,
275 descriptorSets,
276 dynamicOffsetCount,
277 dynamicOffsets));
Greg Daniel7d918fd2018-06-19 15:22:01 -0400278 this->addRecordingResource(layout);
egdanielbc9b2962016-09-27 08:00:53 -0700279 for (int i = 0; i < recycled.count(); ++i) {
280 this->addRecycledResource(recycled[i]);
281 }
282 for (int i = 0; i < resources.count(); ++i) {
283 this->addResource(resources[i]);
284 }
285}
286
egdaniel470d77a2016-03-18 12:50:27 -0700287void GrVkCommandBuffer::bindPipeline(const GrVkGpu* gpu, const GrVkPipeline* pipeline) {
288 SkASSERT(fIsActive);
egdaniel470d77a2016-03-18 12:50:27 -0700289 GR_VK_CALL(gpu->vkInterface(), CmdBindPipeline(fCmdBuffer,
290 VK_PIPELINE_BIND_POINT_GRAPHICS,
291 pipeline->pipeline()));
egdanielec440992016-09-13 09:54:11 -0700292 this->addResource(pipeline);
egdaniel470d77a2016-03-18 12:50:27 -0700293}
294
Greg Daniel164a9f02016-02-22 09:56:40 -0500295void GrVkCommandBuffer::drawIndexed(const GrVkGpu* gpu,
296 uint32_t indexCount,
297 uint32_t instanceCount,
298 uint32_t firstIndex,
299 int32_t vertexOffset,
300 uint32_t firstInstance) const {
301 SkASSERT(fIsActive);
302 SkASSERT(fActiveRenderPass);
303 GR_VK_CALL(gpu->vkInterface(), CmdDrawIndexed(fCmdBuffer,
304 indexCount,
305 instanceCount,
306 firstIndex,
307 vertexOffset,
308 firstInstance));
309}
310
311void GrVkCommandBuffer::draw(const GrVkGpu* gpu,
312 uint32_t vertexCount,
313 uint32_t instanceCount,
314 uint32_t firstVertex,
315 uint32_t firstInstance) const {
316 SkASSERT(fIsActive);
317 SkASSERT(fActiveRenderPass);
318 GR_VK_CALL(gpu->vkInterface(), CmdDraw(fCmdBuffer,
319 vertexCount,
320 instanceCount,
321 firstVertex,
322 firstInstance));
323}
egdaniel470d77a2016-03-18 12:50:27 -0700324
325void GrVkCommandBuffer::setViewport(const GrVkGpu* gpu,
326 uint32_t firstViewport,
327 uint32_t viewportCount,
328 const VkViewport* viewports) {
329 SkASSERT(fIsActive);
330 SkASSERT(1 == viewportCount);
331 if (memcmp(viewports, &fCachedViewport, sizeof(VkViewport))) {
332 GR_VK_CALL(gpu->vkInterface(), CmdSetViewport(fCmdBuffer,
333 firstViewport,
334 viewportCount,
335 viewports));
336 fCachedViewport = viewports[0];
337 }
338}
339
340void GrVkCommandBuffer::setScissor(const GrVkGpu* gpu,
341 uint32_t firstScissor,
342 uint32_t scissorCount,
343 const VkRect2D* scissors) {
344 SkASSERT(fIsActive);
345 SkASSERT(1 == scissorCount);
346 if (memcmp(scissors, &fCachedScissor, sizeof(VkRect2D))) {
347 GR_VK_CALL(gpu->vkInterface(), CmdSetScissor(fCmdBuffer,
348 firstScissor,
349 scissorCount,
350 scissors));
351 fCachedScissor = scissors[0];
352 }
353}
354
355void GrVkCommandBuffer::setBlendConstants(const GrVkGpu* gpu,
356 const float blendConstants[4]) {
357 SkASSERT(fIsActive);
358 if (memcmp(blendConstants, fCachedBlendConstant, 4 * sizeof(float))) {
359 GR_VK_CALL(gpu->vkInterface(), CmdSetBlendConstants(fCmdBuffer, blendConstants));
360 memcpy(fCachedBlendConstant, blendConstants, 4 * sizeof(float));
361 }
362}
egdaniel9a6cf802016-06-08 08:22:05 -0700363
364///////////////////////////////////////////////////////////////////////////////
365// PrimaryCommandBuffer
366////////////////////////////////////////////////////////////////////////////////
egdaniel9cb63402016-06-23 08:37:05 -0700367GrVkPrimaryCommandBuffer::~GrVkPrimaryCommandBuffer() {
368 // Should have ended any render pass we're in the middle of
369 SkASSERT(!fActiveRenderPass);
370}
371
egdaniel9a6cf802016-06-08 08:22:05 -0700372GrVkPrimaryCommandBuffer* GrVkPrimaryCommandBuffer::Create(const GrVkGpu* gpu,
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500373 GrVkCommandPool* cmdPool) {
egdaniel9a6cf802016-06-08 08:22:05 -0700374 const VkCommandBufferAllocateInfo cmdInfo = {
375 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
Ben Wagnera93a14a2017-08-28 10:34:05 -0400376 nullptr, // pNext
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500377 cmdPool->vkCommandPool(), // commandPool
egdaniel9a6cf802016-06-08 08:22:05 -0700378 VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
379 1 // bufferCount
380 };
381
382 VkCommandBuffer cmdBuffer;
383 VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateCommandBuffers(gpu->device(),
384 &cmdInfo,
385 &cmdBuffer));
386 if (err) {
387 return nullptr;
388 }
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500389 return new GrVkPrimaryCommandBuffer(cmdBuffer, cmdPool);
egdaniel9a6cf802016-06-08 08:22:05 -0700390}
391
392void GrVkPrimaryCommandBuffer::begin(const GrVkGpu* gpu) {
393 SkASSERT(!fIsActive);
394 VkCommandBufferBeginInfo cmdBufferBeginInfo;
395 memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
396 cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
397 cmdBufferBeginInfo.pNext = nullptr;
398 cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
399 cmdBufferBeginInfo.pInheritanceInfo = nullptr;
400
401 GR_VK_CALL_ERRCHECK(gpu->vkInterface(), BeginCommandBuffer(fCmdBuffer,
402 &cmdBufferBeginInfo));
403 fIsActive = true;
404}
405
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500406void GrVkPrimaryCommandBuffer::end(GrVkGpu* gpu) {
egdaniel9a6cf802016-06-08 08:22:05 -0700407 SkASSERT(fIsActive);
408 SkASSERT(!fActiveRenderPass);
409 GR_VK_CALL_ERRCHECK(gpu->vkInterface(), EndCommandBuffer(fCmdBuffer));
Greg Daniel7d918fd2018-06-19 15:22:01 -0400410 for (int i = 0; i < fTrackedRecordingResources.count(); ++i) {
411 fTrackedRecordingResources[i]->unref(gpu);
412 }
413 fTrackedRecordingResources.rewind();
egdaniel9a6cf802016-06-08 08:22:05 -0700414 this->invalidateState();
415 fIsActive = false;
416}
417
418void GrVkPrimaryCommandBuffer::beginRenderPass(const GrVkGpu* gpu,
egdaniel9cb63402016-06-23 08:37:05 -0700419 const GrVkRenderPass* renderPass,
Robert Phillips95214472017-08-08 18:00:03 -0400420 const VkClearValue clearValues[],
egdaniel9cb63402016-06-23 08:37:05 -0700421 const GrVkRenderTarget& target,
422 const SkIRect& bounds,
423 bool forSecondaryCB) {
egdaniel9a6cf802016-06-08 08:22:05 -0700424 SkASSERT(fIsActive);
425 SkASSERT(!fActiveRenderPass);
egdaniel9cb63402016-06-23 08:37:05 -0700426 SkASSERT(renderPass->isCompatible(target));
427
egdaniel9a6cf802016-06-08 08:22:05 -0700428 VkRenderPassBeginInfo beginInfo;
egdaniel9cb63402016-06-23 08:37:05 -0700429 VkRect2D renderArea;
430 renderArea.offset = { bounds.fLeft , bounds.fTop };
431 renderArea.extent = { (uint32_t)bounds.width(), (uint32_t)bounds.height() };
432
433 memset(&beginInfo, 0, sizeof(VkRenderPassBeginInfo));
434 beginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
435 beginInfo.pNext = nullptr;
436 beginInfo.renderPass = renderPass->vkRenderPass();
437 beginInfo.framebuffer = target.framebuffer()->framebuffer();
438 beginInfo.renderArea = renderArea;
Greg Danielb68319a2018-02-23 16:08:28 -0500439 beginInfo.clearValueCount = renderPass->clearValueCount();
egdaniel9cb63402016-06-23 08:37:05 -0700440 beginInfo.pClearValues = clearValues;
441
442 VkSubpassContents contents = forSecondaryCB ? VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS
443 : VK_SUBPASS_CONTENTS_INLINE;
444
egdaniel9a6cf802016-06-08 08:22:05 -0700445 GR_VK_CALL(gpu->vkInterface(), CmdBeginRenderPass(fCmdBuffer, &beginInfo, contents));
446 fActiveRenderPass = renderPass;
447 this->addResource(renderPass);
448 target.addResources(*this);
449}
450
451void GrVkPrimaryCommandBuffer::endRenderPass(const GrVkGpu* gpu) {
452 SkASSERT(fIsActive);
453 SkASSERT(fActiveRenderPass);
454 GR_VK_CALL(gpu->vkInterface(), CmdEndRenderPass(fCmdBuffer));
455 fActiveRenderPass = nullptr;
456}
457
458void GrVkPrimaryCommandBuffer::executeCommands(const GrVkGpu* gpu,
jvanverth7ec92412016-07-06 09:24:57 -0700459 GrVkSecondaryCommandBuffer* buffer) {
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500460 // The Vulkan spec allows secondary command buffers to be executed on a primary command buffer
461 // if the command pools both were created from were created with the same queue family. However,
462 // we currently always create them from the same pool.
463 SkASSERT(buffer->commandPool() == fCmdPool);
egdaniel9a6cf802016-06-08 08:22:05 -0700464 SkASSERT(fIsActive);
Greg Daniel77b53f62016-10-18 11:48:51 -0400465 SkASSERT(!buffer->fIsActive);
egdaniel9a6cf802016-06-08 08:22:05 -0700466 SkASSERT(fActiveRenderPass);
467 SkASSERT(fActiveRenderPass->isCompatible(*buffer->fActiveRenderPass));
468
469 GR_VK_CALL(gpu->vkInterface(), CmdExecuteCommands(fCmdBuffer, 1, &buffer->fCmdBuffer));
jvanverth7ec92412016-07-06 09:24:57 -0700470 buffer->ref();
471 fSecondaryCommandBuffers.push_back(buffer);
egdaniel066df7c2016-06-08 14:02:27 -0700472 // When executing a secondary command buffer all state (besides render pass state) becomes
473 // invalidated and must be reset. This includes bound buffers, pipelines, dynamic state, etc.
474 this->invalidateState();
egdaniel9a6cf802016-06-08 08:22:05 -0700475}
476
Greg Daniel48661b82018-01-22 16:11:35 -0500477static void submit_to_queue(const GrVkInterface* interface,
478 VkQueue queue,
479 VkFence fence,
480 uint32_t waitCount,
481 const VkSemaphore* waitSemaphores,
482 const VkPipelineStageFlags* waitStages,
483 uint32_t commandBufferCount,
484 const VkCommandBuffer* commandBuffers,
485 uint32_t signalCount,
486 const VkSemaphore* signalSemaphores) {
487 VkSubmitInfo submitInfo;
488 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
489 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
490 submitInfo.pNext = nullptr;
491 submitInfo.waitSemaphoreCount = waitCount;
492 submitInfo.pWaitSemaphores = waitSemaphores;
493 submitInfo.pWaitDstStageMask = waitStages;
494 submitInfo.commandBufferCount = commandBufferCount;
495 submitInfo.pCommandBuffers = commandBuffers;
496 submitInfo.signalSemaphoreCount = signalCount;
497 submitInfo.pSignalSemaphores = signalSemaphores;
498 GR_VK_CALL_ERRCHECK(interface, QueueSubmit(queue, 1, &submitInfo, fence));
499}
500
Greg Daniel6be35232017-03-01 17:01:09 -0500501void GrVkPrimaryCommandBuffer::submitToQueue(
502 const GrVkGpu* gpu,
503 VkQueue queue,
504 GrVkGpu::SyncQueue sync,
Greg Daniel48661b82018-01-22 16:11:35 -0500505 SkTArray<GrVkSemaphore::Resource*>& signalSemaphores,
506 SkTArray<GrVkSemaphore::Resource*>& waitSemaphores) {
egdaniel9a6cf802016-06-08 08:22:05 -0700507 SkASSERT(!fIsActive);
508
509 VkResult err;
jvanverth7ec92412016-07-06 09:24:57 -0700510 if (VK_NULL_HANDLE == fSubmitFence) {
511 VkFenceCreateInfo fenceInfo;
512 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
513 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
514 err = GR_VK_CALL(gpu->vkInterface(), CreateFence(gpu->device(), &fenceInfo, nullptr,
515 &fSubmitFence));
516 SkASSERT(!err);
517 } else {
518 GR_VK_CALL(gpu->vkInterface(), ResetFences(gpu->device(), 1, &fSubmitFence));
519 }
egdaniel9a6cf802016-06-08 08:22:05 -0700520
Greg Daniela5cb7812017-06-16 09:45:32 -0400521 int signalCount = signalSemaphores.count();
Greg Daniel6be35232017-03-01 17:01:09 -0500522 int waitCount = waitSemaphores.count();
Greg Daniel6be35232017-03-01 17:01:09 -0500523
Greg Daniel48661b82018-01-22 16:11:35 -0500524 if (0 == signalCount && 0 == waitCount) {
525 // This command buffer has no dependent semaphores so we can simply just submit it to the
526 // queue with no worries.
527 submit_to_queue(gpu->vkInterface(), queue, fSubmitFence, 0, nullptr, nullptr,
528 1, &fCmdBuffer, 0, nullptr);
529 } else {
530 GrVkSemaphore::Resource::AcquireMutex();
531
532 SkTArray<VkSemaphore> vkSignalSems(signalCount);
533 for (int i = 0; i < signalCount; ++i) {
534 if (signalSemaphores[i]->shouldSignal()) {
535 this->addResource(signalSemaphores[i]);
536 vkSignalSems.push_back(signalSemaphores[i]->semaphore());
537 }
538 }
539
540 SkTArray<VkSemaphore> vkWaitSems(waitCount);
541 SkTArray<VkPipelineStageFlags> vkWaitStages(waitCount);
542 for (int i = 0; i < waitCount; ++i) {
543 if (waitSemaphores[i]->shouldWait()) {
544 this->addResource(waitSemaphores[i]);
545 vkWaitSems.push_back(waitSemaphores[i]->semaphore());
546 vkWaitStages.push_back(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
547 }
548 }
549 submit_to_queue(gpu->vkInterface(), queue, fSubmitFence,
550 vkWaitSems.count(), vkWaitSems.begin(), vkWaitStages.begin(),
551 1, &fCmdBuffer,
552 vkSignalSems.count(), vkSignalSems.begin());
553 // Since shouldSignal/Wait do not require a mutex to be held, we must make sure that we mark
554 // the semaphores after we've submitted. Thus in the worst case another submit grabs the
555 // mutex and then realizes it doesn't need to submit the semaphore. We will never end up
556 // where a semaphore doesn't think it needs to be submitted (cause of querying
557 // shouldSignal/Wait), but it should need to.
558 for (int i = 0; i < signalCount; ++i) {
559 signalSemaphores[i]->markAsSignaled();
560 }
561 for (int i = 0; i < waitCount; ++i) {
562 waitSemaphores[i]->markAsWaited();
563 }
564
565 GrVkSemaphore::Resource::ReleaseMutex();
566 }
egdaniel9a6cf802016-06-08 08:22:05 -0700567
568 if (GrVkGpu::kForce_SyncQueue == sync) {
569 err = GR_VK_CALL(gpu->vkInterface(),
570 WaitForFences(gpu->device(), 1, &fSubmitFence, true, UINT64_MAX));
571 if (VK_TIMEOUT == err) {
572 SkDebugf("Fence failed to signal: %d\n", err);
Ben Wagnerb4aab9a2017-08-16 10:53:04 -0400573 SK_ABORT("failing");
egdaniel9a6cf802016-06-08 08:22:05 -0700574 }
575 SkASSERT(!err);
576
577 // Destroy the fence
578 GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
579 fSubmitFence = VK_NULL_HANDLE;
580 }
581}
582
583bool GrVkPrimaryCommandBuffer::finished(const GrVkGpu* gpu) const {
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500584 SkASSERT(!fIsActive);
egdaniel9a6cf802016-06-08 08:22:05 -0700585 if (VK_NULL_HANDLE == fSubmitFence) {
586 return true;
587 }
588
589 VkResult err = GR_VK_CALL(gpu->vkInterface(), GetFenceStatus(gpu->device(), fSubmitFence));
590 switch (err) {
591 case VK_SUCCESS:
592 return true;
593
594 case VK_NOT_READY:
595 return false;
596
597 default:
598 SkDebugf("Error getting fence status: %d\n", err);
Ben Wagnerb4aab9a2017-08-16 10:53:04 -0400599 SK_ABORT("failing");
egdaniel9a6cf802016-06-08 08:22:05 -0700600 break;
601 }
602
603 return false;
604}
605
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500606void GrVkPrimaryCommandBuffer::onReleaseResources(GrVkGpu* gpu) {
jvanverth7ec92412016-07-06 09:24:57 -0700607 for (int i = 0; i < fSecondaryCommandBuffers.count(); ++i) {
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500608 fSecondaryCommandBuffers[i]->releaseResources(gpu);
609 }
610}
611
612void GrVkPrimaryCommandBuffer::recycleSecondaryCommandBuffers() {
613 for (int i = 0; i < fSecondaryCommandBuffers.count(); ++i) {
614 SkASSERT(fSecondaryCommandBuffers[i]->commandPool() == fCmdPool);
615 fCmdPool->recycleSecondaryCommandBuffer(fSecondaryCommandBuffers[i]);
jvanverth7ec92412016-07-06 09:24:57 -0700616 }
617 fSecondaryCommandBuffers.reset();
618}
619
egdaniel9a6cf802016-06-08 08:22:05 -0700620void GrVkPrimaryCommandBuffer::copyImage(const GrVkGpu* gpu,
621 GrVkImage* srcImage,
622 VkImageLayout srcLayout,
623 GrVkImage* dstImage,
624 VkImageLayout dstLayout,
625 uint32_t copyRegionCount,
626 const VkImageCopy* copyRegions) {
627 SkASSERT(fIsActive);
628 SkASSERT(!fActiveRenderPass);
629 this->addResource(srcImage->resource());
630 this->addResource(dstImage->resource());
631 GR_VK_CALL(gpu->vkInterface(), CmdCopyImage(fCmdBuffer,
632 srcImage->image(),
633 srcLayout,
634 dstImage->image(),
635 dstLayout,
636 copyRegionCount,
637 copyRegions));
638}
639
640void GrVkPrimaryCommandBuffer::blitImage(const GrVkGpu* gpu,
641 const GrVkResource* srcResource,
642 VkImage srcImage,
643 VkImageLayout srcLayout,
644 const GrVkResource* dstResource,
645 VkImage dstImage,
646 VkImageLayout dstLayout,
647 uint32_t blitRegionCount,
648 const VkImageBlit* blitRegions,
649 VkFilter filter) {
650 SkASSERT(fIsActive);
651 SkASSERT(!fActiveRenderPass);
652 this->addResource(srcResource);
653 this->addResource(dstResource);
654 GR_VK_CALL(gpu->vkInterface(), CmdBlitImage(fCmdBuffer,
655 srcImage,
656 srcLayout,
657 dstImage,
658 dstLayout,
659 blitRegionCount,
660 blitRegions,
661 filter));
662}
663
Greg Daniel6ecc9112017-06-16 16:17:03 +0000664void GrVkPrimaryCommandBuffer::blitImage(const GrVkGpu* gpu,
665 const GrVkImage& srcImage,
666 const GrVkImage& dstImage,
667 uint32_t blitRegionCount,
668 const VkImageBlit* blitRegions,
669 VkFilter filter) {
670 this->blitImage(gpu,
671 srcImage.resource(),
672 srcImage.image(),
673 srcImage.currentLayout(),
674 dstImage.resource(),
675 dstImage.image(),
676 dstImage.currentLayout(),
677 blitRegionCount,
678 blitRegions,
679 filter);
680}
681
682
egdaniel9a6cf802016-06-08 08:22:05 -0700683void GrVkPrimaryCommandBuffer::copyImageToBuffer(const GrVkGpu* gpu,
684 GrVkImage* srcImage,
685 VkImageLayout srcLayout,
686 GrVkTransferBuffer* dstBuffer,
687 uint32_t copyRegionCount,
688 const VkBufferImageCopy* copyRegions) {
689 SkASSERT(fIsActive);
690 SkASSERT(!fActiveRenderPass);
691 this->addResource(srcImage->resource());
692 this->addResource(dstBuffer->resource());
693 GR_VK_CALL(gpu->vkInterface(), CmdCopyImageToBuffer(fCmdBuffer,
694 srcImage->image(),
695 srcLayout,
696 dstBuffer->buffer(),
697 copyRegionCount,
698 copyRegions));
699}
700
701void GrVkPrimaryCommandBuffer::copyBufferToImage(const GrVkGpu* gpu,
702 GrVkTransferBuffer* srcBuffer,
703 GrVkImage* dstImage,
704 VkImageLayout dstLayout,
705 uint32_t copyRegionCount,
706 const VkBufferImageCopy* copyRegions) {
707 SkASSERT(fIsActive);
708 SkASSERT(!fActiveRenderPass);
709 this->addResource(srcBuffer->resource());
710 this->addResource(dstImage->resource());
711 GR_VK_CALL(gpu->vkInterface(), CmdCopyBufferToImage(fCmdBuffer,
712 srcBuffer->buffer(),
713 dstImage->image(),
714 dstLayout,
715 copyRegionCount,
716 copyRegions));
717}
718
Greg Daniel6888c0d2017-08-25 11:55:50 -0400719
720void GrVkPrimaryCommandBuffer::copyBuffer(GrVkGpu* gpu,
721 GrVkBuffer* srcBuffer,
722 GrVkBuffer* dstBuffer,
723 uint32_t regionCount,
724 const VkBufferCopy* regions) {
725 SkASSERT(fIsActive);
726 SkASSERT(!fActiveRenderPass);
727#ifdef SK_DEBUG
728 for (uint32_t i = 0; i < regionCount; ++i) {
729 const VkBufferCopy& region = regions[i];
730 SkASSERT(region.size > 0);
731 SkASSERT(region.srcOffset < srcBuffer->size());
732 SkASSERT(region.dstOffset < dstBuffer->size());
733 SkASSERT(region.srcOffset + region.size <= srcBuffer->size());
734 SkASSERT(region.dstOffset + region.size <= dstBuffer->size());
735 }
736#endif
737 this->addResource(srcBuffer->resource());
738 this->addResource(dstBuffer->resource());
739 GR_VK_CALL(gpu->vkInterface(), CmdCopyBuffer(fCmdBuffer,
740 srcBuffer->buffer(),
741 dstBuffer->buffer(),
742 regionCount,
743 regions));
744}
745
jvanvertha584de92016-06-30 09:10:52 -0700746void GrVkPrimaryCommandBuffer::updateBuffer(GrVkGpu* gpu,
747 GrVkBuffer* dstBuffer,
748 VkDeviceSize dstOffset,
749 VkDeviceSize dataSize,
750 const void* data) {
751 SkASSERT(fIsActive);
752 SkASSERT(!fActiveRenderPass);
753 SkASSERT(0 == (dstOffset & 0x03)); // four byte aligned
754 // TODO: handle larger transfer sizes
755 SkASSERT(dataSize <= 65536);
756 SkASSERT(0 == (dataSize & 0x03)); // four byte aligned
757 this->addResource(dstBuffer->resource());
758 GR_VK_CALL(gpu->vkInterface(), CmdUpdateBuffer(fCmdBuffer,
759 dstBuffer->buffer(),
760 dstOffset,
761 dataSize,
762 (const uint32_t*) data));
763}
764
egdaniel9a6cf802016-06-08 08:22:05 -0700765void GrVkPrimaryCommandBuffer::clearColorImage(const GrVkGpu* gpu,
766 GrVkImage* image,
767 const VkClearColorValue* color,
768 uint32_t subRangeCount,
769 const VkImageSubresourceRange* subRanges) {
770 SkASSERT(fIsActive);
771 SkASSERT(!fActiveRenderPass);
772 this->addResource(image->resource());
773 GR_VK_CALL(gpu->vkInterface(), CmdClearColorImage(fCmdBuffer,
774 image->image(),
775 image->currentLayout(),
776 color,
777 subRangeCount,
778 subRanges));
779}
780
781void GrVkPrimaryCommandBuffer::clearDepthStencilImage(const GrVkGpu* gpu,
782 GrVkImage* image,
783 const VkClearDepthStencilValue* color,
784 uint32_t subRangeCount,
785 const VkImageSubresourceRange* subRanges) {
786 SkASSERT(fIsActive);
787 SkASSERT(!fActiveRenderPass);
788 this->addResource(image->resource());
789 GR_VK_CALL(gpu->vkInterface(), CmdClearDepthStencilImage(fCmdBuffer,
790 image->image(),
791 image->currentLayout(),
792 color,
793 subRangeCount,
794 subRanges));
795}
796
egdaniel52ad2512016-08-04 12:50:01 -0700797void GrVkPrimaryCommandBuffer::resolveImage(GrVkGpu* gpu,
798 const GrVkImage& srcImage,
799 const GrVkImage& dstImage,
800 uint32_t regionCount,
801 const VkImageResolve* regions) {
802 SkASSERT(fIsActive);
803 SkASSERT(!fActiveRenderPass);
804
805 this->addResource(srcImage.resource());
806 this->addResource(dstImage.resource());
807
808 GR_VK_CALL(gpu->vkInterface(), CmdResolveImage(fCmdBuffer,
809 srcImage.image(),
810 srcImage.currentLayout(),
811 dstImage.image(),
812 dstImage.currentLayout(),
813 regionCount,
814 regions));
815}
816
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500817void GrVkPrimaryCommandBuffer::onFreeGPUData(GrVkGpu* gpu) const {
egdaniel9cb63402016-06-23 08:37:05 -0700818 SkASSERT(!fActiveRenderPass);
819 // Destroy the fence, if any
820 if (VK_NULL_HANDLE != fSubmitFence) {
821 GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
822 }
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500823 for (GrVkSecondaryCommandBuffer* buffer : fSecondaryCommandBuffers) {
824 buffer->unref(gpu);
825 }
826}
827
828void GrVkPrimaryCommandBuffer::onAbandonGPUData() const {
829 SkASSERT(!fActiveRenderPass);
830 for (GrVkSecondaryCommandBuffer* buffer : fSecondaryCommandBuffers) {
831 buffer->unrefAndAbandon();
832 }
egdaniel9cb63402016-06-23 08:37:05 -0700833}
834
egdaniel9a6cf802016-06-08 08:22:05 -0700835///////////////////////////////////////////////////////////////////////////////
836// SecondaryCommandBuffer
837////////////////////////////////////////////////////////////////////////////////
838
jvanverth7ec92412016-07-06 09:24:57 -0700839GrVkSecondaryCommandBuffer* GrVkSecondaryCommandBuffer::Create(const GrVkGpu* gpu,
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500840 GrVkCommandPool* cmdPool) {
Greg Daniel070cbaf2019-01-03 17:35:54 -0500841 SkASSERT(cmdPool);
egdaniel9a6cf802016-06-08 08:22:05 -0700842 const VkCommandBufferAllocateInfo cmdInfo = {
843 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
Ben Wagnera93a14a2017-08-28 10:34:05 -0400844 nullptr, // pNext
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500845 cmdPool->vkCommandPool(), // commandPool
egdaniel9a6cf802016-06-08 08:22:05 -0700846 VK_COMMAND_BUFFER_LEVEL_SECONDARY, // level
847 1 // bufferCount
848 };
849
850 VkCommandBuffer cmdBuffer;
851 VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateCommandBuffers(gpu->device(),
852 &cmdInfo,
853 &cmdBuffer));
854 if (err) {
855 return nullptr;
856 }
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500857 return new GrVkSecondaryCommandBuffer(cmdBuffer, cmdPool);
egdaniel9a6cf802016-06-08 08:22:05 -0700858}
859
Greg Daniel070cbaf2019-01-03 17:35:54 -0500860GrVkSecondaryCommandBuffer* GrVkSecondaryCommandBuffer::Create(VkCommandBuffer cmdBuffer) {
861 return new GrVkSecondaryCommandBuffer(cmdBuffer, nullptr);
862}
egdaniel9a6cf802016-06-08 08:22:05 -0700863
jvanverth7ec92412016-07-06 09:24:57 -0700864void GrVkSecondaryCommandBuffer::begin(const GrVkGpu* gpu, const GrVkFramebuffer* framebuffer,
865 const GrVkRenderPass* compatibleRenderPass) {
egdaniel9a6cf802016-06-08 08:22:05 -0700866 SkASSERT(!fIsActive);
jvanverth7ec92412016-07-06 09:24:57 -0700867 SkASSERT(compatibleRenderPass);
868 fActiveRenderPass = compatibleRenderPass;
egdaniel9a6cf802016-06-08 08:22:05 -0700869
Greg Daniel070cbaf2019-01-03 17:35:54 -0500870 if (!this->isWrapped()) {
871 VkCommandBufferInheritanceInfo inheritanceInfo;
872 memset(&inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
873 inheritanceInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
874 inheritanceInfo.pNext = nullptr;
875 inheritanceInfo.renderPass = fActiveRenderPass->vkRenderPass();
876 inheritanceInfo.subpass = 0; // Currently only using 1 subpass for each render pass
877 inheritanceInfo.framebuffer = framebuffer ? framebuffer->framebuffer() : VK_NULL_HANDLE;
878 inheritanceInfo.occlusionQueryEnable = false;
879 inheritanceInfo.queryFlags = 0;
880 inheritanceInfo.pipelineStatistics = 0;
egdaniel9a6cf802016-06-08 08:22:05 -0700881
Greg Daniel070cbaf2019-01-03 17:35:54 -0500882 VkCommandBufferBeginInfo cmdBufferBeginInfo;
883 memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
884 cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
885 cmdBufferBeginInfo.pNext = nullptr;
886 cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT |
887 VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
888 cmdBufferBeginInfo.pInheritanceInfo = &inheritanceInfo;
egdaniel9a6cf802016-06-08 08:22:05 -0700889
Greg Daniel070cbaf2019-01-03 17:35:54 -0500890 GR_VK_CALL_ERRCHECK(gpu->vkInterface(), BeginCommandBuffer(fCmdBuffer,
891 &cmdBufferBeginInfo));
892 }
egdaniel9a6cf802016-06-08 08:22:05 -0700893 fIsActive = true;
894}
895
Ethan Nicholas8e265a72018-12-12 16:22:40 -0500896void GrVkSecondaryCommandBuffer::end(GrVkGpu* gpu) {
egdaniel9a6cf802016-06-08 08:22:05 -0700897 SkASSERT(fIsActive);
Greg Daniel070cbaf2019-01-03 17:35:54 -0500898 if (!this->isWrapped()) {
899 GR_VK_CALL_ERRCHECK(gpu->vkInterface(), EndCommandBuffer(fCmdBuffer));
900 }
egdaniel9a6cf802016-06-08 08:22:05 -0700901 this->invalidateState();
902 fIsActive = false;
903}