blob: 93f7d33811329a59648929b4c1c69609c2af92f9 [file] [log] [blame]
Chia-I Wu525c6602014-08-27 10:22:34 +08001/*
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -06002 * Vulkan
Chia-I Wu525c6602014-08-27 10:22:34 +08003 *
4 * Copyright (C) 2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
Chia-I Wu44e42362014-09-02 08:32:09 +080023 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
Chia-I Wu525c6602014-08-27 10:22:34 +080026 */
27
28#include "genhw/genhw.h"
29#include "img.h"
Chia-I Wu714df452015-01-01 07:55:04 +080030#include "buf.h"
Chia-I Wu525c6602014-08-27 10:22:34 +080031#include "cmd_priv.h"
32
33enum {
Chia-I Wuc45db532015-02-19 11:20:38 -070034 READ_OP = 1 << 0,
35 WRITE_OP = 1 << 1,
36 HIZ_OP = 1 << 2,
37};
38
39enum {
Chia-I Wu525c6602014-08-27 10:22:34 +080040 MEM_CACHE = 1 << 0,
41 DATA_READ_CACHE = 1 << 1,
42 DATA_WRITE_CACHE = 1 << 2,
43 RENDER_CACHE = 1 << 3,
44 SAMPLER_CACHE = 1 << 4,
45};
46
Chia-I Wuc45db532015-02-19 11:20:38 -070047static uint32_t img_get_layout_ops(const struct intel_img *img,
Courtney Goeltzenleuchter382489d2015-04-10 08:34:15 -060048 VkImageLayout layout)
Chia-I Wuc45db532015-02-19 11:20:38 -070049{
50 uint32_t ops;
51
52 switch (layout) {
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -060053 case VK_IMAGE_LAYOUT_GENERAL:
Chia-I Wuc45db532015-02-19 11:20:38 -070054 ops = READ_OP | WRITE_OP;
55 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -060056 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
Chia-I Wuc45db532015-02-19 11:20:38 -070057 ops = READ_OP | WRITE_OP;
58 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -060059 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
Chia-I Wuc45db532015-02-19 11:20:38 -070060 ops = READ_OP | WRITE_OP | HIZ_OP;
61 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -060062 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
Chia-I Wuc45db532015-02-19 11:20:38 -070063 ops = READ_OP | HIZ_OP;
64 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -060065 case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
Chia-I Wuc45db532015-02-19 11:20:38 -070066 ops = READ_OP;
67 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -060068 case VK_IMAGE_LAYOUT_CLEAR_OPTIMAL:
Chia-I Wuc45db532015-02-19 11:20:38 -070069 ops = WRITE_OP | HIZ_OP;
70 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -060071 case VK_IMAGE_LAYOUT_TRANSFER_SOURCE_OPTIMAL:
Chia-I Wuc45db532015-02-19 11:20:38 -070072 ops = READ_OP;
73 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -060074 case VK_IMAGE_LAYOUT_TRANSFER_DESTINATION_OPTIMAL:
Chia-I Wuc45db532015-02-19 11:20:38 -070075 ops = WRITE_OP;
76 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -060077 case VK_IMAGE_LAYOUT_UNDEFINED:
Chia-I Wuc45db532015-02-19 11:20:38 -070078 default:
79 ops = 0;
80 break;
81 }
82
83 return ops;
84}
85
Mike Stroyan55658c22014-12-04 11:08:39 +000086static uint32_t img_get_layout_caches(const struct intel_img *img,
Courtney Goeltzenleuchter382489d2015-04-10 08:34:15 -060087 VkImageLayout layout)
Chia-I Wu525c6602014-08-27 10:22:34 +080088{
89 uint32_t caches;
90
Mike Stroyan55658c22014-12-04 11:08:39 +000091 switch (layout) {
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -060092 case VK_IMAGE_LAYOUT_GENERAL:
Mike Stroyan55658c22014-12-04 11:08:39 +000093 // General layout when image can be used for any kind of access
94 caches = MEM_CACHE | DATA_READ_CACHE | DATA_WRITE_CACHE | RENDER_CACHE | SAMPLER_CACHE;
Chia-I Wub5c1cdf2014-11-22 03:17:45 +080095 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -060096 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
Mike Stroyan55658c22014-12-04 11:08:39 +000097 // Optimal layout when image is only used for color attachment read/write
98 caches = DATA_WRITE_CACHE | RENDER_CACHE;
Chia-I Wu525c6602014-08-27 10:22:34 +080099 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600100 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
Mike Stroyan55658c22014-12-04 11:08:39 +0000101 // Optimal layout when image is only used for depth/stencil attachment read/write
102 caches = DATA_WRITE_CACHE | RENDER_CACHE;
Chia-I Wu525c6602014-08-27 10:22:34 +0800103 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600104 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
Mike Stroyan55658c22014-12-04 11:08:39 +0000105 // Optimal layout when image is used for read only depth/stencil attachment and shader access
Chia-I Wu525c6602014-08-27 10:22:34 +0800106 caches = RENDER_CACHE;
107 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600108 case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
Mike Stroyan55658c22014-12-04 11:08:39 +0000109 // Optimal layout when image is used for read only shader access
110 caches = DATA_READ_CACHE | SAMPLER_CACHE;
111 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600112 case VK_IMAGE_LAYOUT_CLEAR_OPTIMAL:
Mike Stroyan55658c22014-12-04 11:08:39 +0000113 // Optimal layout when image is used only for clear operations
114 caches = RENDER_CACHE;
115 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600116 case VK_IMAGE_LAYOUT_TRANSFER_SOURCE_OPTIMAL:
Mike Stroyan55658c22014-12-04 11:08:39 +0000117 // Optimal layout when image is used only as source of transfer operations
118 caches = MEM_CACHE | DATA_READ_CACHE | RENDER_CACHE | SAMPLER_CACHE;
119 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600120 case VK_IMAGE_LAYOUT_TRANSFER_DESTINATION_OPTIMAL:
Mike Stroyan55658c22014-12-04 11:08:39 +0000121 // Optimal layout when image is used only as destination of transfer operations
122 caches = MEM_CACHE | DATA_WRITE_CACHE | RENDER_CACHE;
Chia-I Wu525c6602014-08-27 10:22:34 +0800123 break;
124 default:
125 caches = 0;
126 break;
127 }
128
129 return caches;
130}
131
Chia-I Wuc45db532015-02-19 11:20:38 -0700132static void cmd_resolve_depth(struct intel_cmd *cmd,
133 struct intel_img *img,
Courtney Goeltzenleuchter382489d2015-04-10 08:34:15 -0600134 VkImageLayout old_layout,
135 VkImageLayout new_layout,
136 const VkImageSubresourceRange *range)
Chia-I Wuc45db532015-02-19 11:20:38 -0700137{
138 const uint32_t old_ops = img_get_layout_ops(img, old_layout);
139 const uint32_t new_ops = img_get_layout_ops(img, new_layout);
140
141 if (old_ops & WRITE_OP) {
142 if ((old_ops & HIZ_OP) && !(new_ops & HIZ_OP))
143 cmd_meta_ds_op(cmd, INTEL_CMD_META_DS_RESOLVE, img, range);
144 else if (!(old_ops & HIZ_OP) && (new_ops & HIZ_OP))
145 cmd_meta_ds_op(cmd, INTEL_CMD_META_DS_HIZ_RESOLVE, img, range);
146 }
147}
148
Chia-I Wub5c1cdf2014-11-22 03:17:45 +0800149static uint32_t cmd_get_flush_flags(const struct intel_cmd *cmd,
150 uint32_t old_caches,
151 uint32_t new_caches,
152 bool is_ds)
Chia-I Wu525c6602014-08-27 10:22:34 +0800153{
154 uint32_t flags = 0;
155
156 /* not dirty */
157 if (!(old_caches & (MEM_CACHE | RENDER_CACHE | DATA_WRITE_CACHE)))
158 return 0;
159
160 if ((old_caches & RENDER_CACHE) && (new_caches & ~RENDER_CACHE)) {
Chia-I Wub5c1cdf2014-11-22 03:17:45 +0800161 if (is_ds)
Chia-I Wu525c6602014-08-27 10:22:34 +0800162 flags |= GEN6_PIPE_CONTROL_DEPTH_CACHE_FLUSH;
163 else
164 flags |= GEN6_PIPE_CONTROL_RENDER_CACHE_FLUSH;
165 }
166
167 if ((old_caches & DATA_WRITE_CACHE) &&
168 (new_caches & ~(DATA_READ_CACHE | DATA_WRITE_CACHE))) {
169 if (cmd_gen(cmd) >= INTEL_GEN(7))
Chia-I Wu97aa4de2015-03-05 15:43:16 -0700170 flags |= GEN7_PIPE_CONTROL_DC_FLUSH;
Chia-I Wu525c6602014-08-27 10:22:34 +0800171 }
172
173 if (new_caches & SAMPLER_CACHE)
174 flags |= GEN6_PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
175
176 if ((new_caches & DATA_READ_CACHE) && old_caches != DATA_WRITE_CACHE)
177 flags |= GEN6_PIPE_CONTROL_CONSTANT_CACHE_INVALIDATE;
178
179 if (!flags)
180 return 0;
181
182 flags |= GEN6_PIPE_CONTROL_CS_STALL;
183
184 return flags;
185}
186
Mike Stroyan55658c22014-12-04 11:08:39 +0000187static void cmd_memory_barriers(struct intel_cmd *cmd,
Mark Lobodzinskid3eabd72015-01-29 14:24:14 -0600188 uint32_t flush_flags,
Mark Lobodzinskie2d07a52015-01-29 08:55:56 -0600189 uint32_t memory_barrier_count,
Mark Lobodzinskid3eabd72015-01-29 14:24:14 -0600190 const void** memory_barriers)
Chia-I Wu525c6602014-08-27 10:22:34 +0800191{
Mike Stroyan55658c22014-12-04 11:08:39 +0000192 uint32_t i;
Courtney Goeltzenleuchter382489d2015-04-10 08:34:15 -0600193 VkFlags input_mask = 0;
194 VkFlags output_mask = 0;
Chia-I Wu525c6602014-08-27 10:22:34 +0800195
Mike Stroyan55658c22014-12-04 11:08:39 +0000196 for (i = 0; i < memory_barrier_count; i++) {
Mark Lobodzinskid3eabd72015-01-29 14:24:14 -0600197
198 const union {
Courtney Goeltzenleuchter382489d2015-04-10 08:34:15 -0600199 VkStructureType type;
Mark Lobodzinskid3eabd72015-01-29 14:24:14 -0600200
Courtney Goeltzenleuchter382489d2015-04-10 08:34:15 -0600201 VkMemoryBarrier mem;
202 VkBufferMemoryBarrier buf;
203 VkImageMemoryBarrier img;
Mark Lobodzinskid3eabd72015-01-29 14:24:14 -0600204 } *u = memory_barriers[i];
205
206 switch(u->type)
Mike Stroyan55658c22014-12-04 11:08:39 +0000207 {
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600208 case VK_STRUCTURE_TYPE_MEMORY_BARRIER:
Mark Lobodzinskid3eabd72015-01-29 14:24:14 -0600209 output_mask |= u->mem.outputMask;
210 input_mask |= u->mem.inputMask;
Mike Stroyan55658c22014-12-04 11:08:39 +0000211 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600212 case VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER:
Mark Lobodzinskid3eabd72015-01-29 14:24:14 -0600213 output_mask |= u->buf.outputMask;
214 input_mask |= u->buf.inputMask;
Mike Stroyan55658c22014-12-04 11:08:39 +0000215 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600216 case VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER:
Mark Lobodzinskid3eabd72015-01-29 14:24:14 -0600217 output_mask |= u->img.outputMask;
218 input_mask |= u->img.inputMask;
Mike Stroyan55658c22014-12-04 11:08:39 +0000219 {
Mark Lobodzinskid3eabd72015-01-29 14:24:14 -0600220 struct intel_img *img = intel_img(u->img.image);
Chia-I Wuc45db532015-02-19 11:20:38 -0700221
222 cmd_resolve_depth(cmd, img, u->img.oldLayout,
223 u->img.newLayout, &u->img.subresourceRange);
224
Mike Stroyan55658c22014-12-04 11:08:39 +0000225 flush_flags |= cmd_get_flush_flags(cmd,
Mark Lobodzinskid3eabd72015-01-29 14:24:14 -0600226 img_get_layout_caches(img, u->img.oldLayout),
227 img_get_layout_caches(img, u->img.newLayout),
Jeremy Hayes2b7e88a2015-01-23 08:51:43 -0700228 icd_format_is_ds(img->layout.format));
Mike Stroyan55658c22014-12-04 11:08:39 +0000229 }
230 break;
231 default:
232 break;
233 }
Chia-I Wu525c6602014-08-27 10:22:34 +0800234 }
235
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600236 if (output_mask & VK_MEMORY_OUTPUT_SHADER_WRITE_BIT) {
Chia-I Wu97aa4de2015-03-05 15:43:16 -0700237 flush_flags |= GEN7_PIPE_CONTROL_DC_FLUSH;
Mike Stroyan55658c22014-12-04 11:08:39 +0000238 }
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600239 if (output_mask & VK_MEMORY_OUTPUT_COLOR_ATTACHMENT_BIT) {
Mike Stroyan55658c22014-12-04 11:08:39 +0000240 flush_flags |= GEN6_PIPE_CONTROL_RENDER_CACHE_FLUSH;
241 }
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600242 if (output_mask & VK_MEMORY_OUTPUT_DEPTH_STENCIL_ATTACHMENT_BIT) {
Mike Stroyan55658c22014-12-04 11:08:39 +0000243 flush_flags |= GEN6_PIPE_CONTROL_DEPTH_CACHE_FLUSH;
244 }
245
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600246 /* CPU write is cache coherent, so VK_MEMORY_OUTPUT_CPU_WRITE_BIT needs no flush. */
Courtney Goeltzenleuchterad870812015-04-15 15:29:59 -0600247 /* Meta handles flushes, so VK_MEMORY_OUTPUT_TRANSFER_BIT needs no flush. */
Mike Stroyan55658c22014-12-04 11:08:39 +0000248
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600249 if (input_mask & (VK_MEMORY_INPUT_SHADER_READ_BIT | VK_MEMORY_INPUT_UNIFORM_READ_BIT)) {
Mike Stroyan55658c22014-12-04 11:08:39 +0000250 flush_flags |= GEN6_PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
251 }
252
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600253 if (input_mask & VK_MEMORY_INPUT_UNIFORM_READ_BIT) {
Mike Stroyan55658c22014-12-04 11:08:39 +0000254 flush_flags |= GEN6_PIPE_CONTROL_CONSTANT_CACHE_INVALIDATE;
255 }
256
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600257 if (input_mask & VK_MEMORY_INPUT_VERTEX_ATTRIBUTE_FETCH_BIT) {
Mike Stroyan55658c22014-12-04 11:08:39 +0000258 flush_flags |= GEN6_PIPE_CONTROL_VF_CACHE_INVALIDATE;
259 }
260
261 /* These bits have no corresponding cache invalidate operation.
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600262 * VK_MEMORY_INPUT_CPU_READ_BIT
263 * VK_MEMORY_INPUT_INDIRECT_COMMAND_BIT
264 * VK_MEMORY_INPUT_INDEX_FETCH_BIT
265 * VK_MEMORY_INPUT_COLOR_ATTACHMENT_BIT
266 * VK_MEMORY_INPUT_DEPTH_STENCIL_ATTACHMENT_BIT
Courtney Goeltzenleuchterad870812015-04-15 15:29:59 -0600267 * VK_MEMORY_INPUT_TRANSFER_BIT
Mike Stroyan55658c22014-12-04 11:08:39 +0000268 */
269
Chia-I Wu525c6602014-08-27 10:22:34 +0800270 cmd_batch_flush(cmd, flush_flags);
271}
272
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600273ICD_EXPORT void VKAPI vkCmdWaitEvents(
Courtney Goeltzenleuchter382489d2015-04-10 08:34:15 -0600274 VkCmdBuffer cmdBuffer,
275 const VkEventWaitInfo* pWaitInfo)
Chia-I Wu525c6602014-08-27 10:22:34 +0800276{
277 struct intel_cmd *cmd = intel_cmd(cmdBuffer);
Chia-I Wu525c6602014-08-27 10:22:34 +0800278
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600279 /* This hardware will always wait at VK_WAIT_EVENT_TOP_OF_PIPE.
280 * Passing a pWaitInfo->waitEvent of VK_WAIT_EVENT_BEFORE_FRAGMENT_PROCESSING
Mike Stroyan55658c22014-12-04 11:08:39 +0000281 * does not change that.
282 */
Chia-I Wu525c6602014-08-27 10:22:34 +0800283
Mike Stroyan55658c22014-12-04 11:08:39 +0000284 /* Because the command buffer is serialized, reaching
285 * a pipelined wait is always after completion of prior events.
286 * pWaitInfo->pEvents need not be examined.
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600287 * vkCmdWaitEvents is equivalent to memory barrier part of vkCmdPipelineBarrier.
Mike Stroyan55658c22014-12-04 11:08:39 +0000288 * cmd_memory_barriers will wait for GEN6_PIPE_CONTROL_CS_STALL and perform
289 * appropriate cache control.
290 */
291 cmd_memory_barriers(cmd,
292 GEN6_PIPE_CONTROL_CS_STALL,
Mark Lobodzinski628a8a52015-02-02 11:55:52 -0600293 pWaitInfo->memBarrierCount, pWaitInfo->ppMemBarriers);
Mike Stroyan55658c22014-12-04 11:08:39 +0000294}
295
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600296ICD_EXPORT void VKAPI vkCmdPipelineBarrier(
Courtney Goeltzenleuchter382489d2015-04-10 08:34:15 -0600297 VkCmdBuffer cmdBuffer,
298 const VkPipelineBarrier* pBarrier)
Mike Stroyan55658c22014-12-04 11:08:39 +0000299{
300 struct intel_cmd *cmd = intel_cmd(cmdBuffer);
301 uint32_t pipe_control_flags = 0;
302 uint32_t i;
303
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600304 /* This hardware will always wait at VK_WAIT_EVENT_TOP_OF_PIPE.
305 * Passing a pBarrier->waitEvent of VK_WAIT_EVENT_BEFORE_FRAGMENT_PROCESSING
Mike Stroyan55658c22014-12-04 11:08:39 +0000306 * does not change that.
307 */
308
309 /* Cache control is done with PIPE_CONTROL flags.
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600310 * With no GEN6_PIPE_CONTROL_CS_STALL flag set, it behaves as VK_PIPE_EVENT_TOP_OF_PIPE.
311 * All other pEvents values will behave as VK_PIPE_EVENT_GPU_COMMANDS_COMPLETE.
Mike Stroyan55658c22014-12-04 11:08:39 +0000312 */
313 for (i = 0; i < pBarrier->eventCount; i++) {
314 switch(pBarrier->pEvents[i])
315 {
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600316 case VK_PIPE_EVENT_TOP_OF_PIPE:
Mike Stroyan55658c22014-12-04 11:08:39 +0000317 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600318 case VK_PIPE_EVENT_VERTEX_PROCESSING_COMPLETE:
319 case VK_PIPE_EVENT_LOCAL_FRAGMENT_PROCESSING_COMPLETE:
320 case VK_PIPE_EVENT_FRAGMENT_PROCESSING_COMPLETE:
321 case VK_PIPE_EVENT_GRAPHICS_PIPELINE_COMPLETE:
322 case VK_PIPE_EVENT_COMPUTE_PIPELINE_COMPLETE:
323 case VK_PIPE_EVENT_TRANSFER_COMPLETE:
324 case VK_PIPE_EVENT_GPU_COMMANDS_COMPLETE:
Mike Stroyan55658c22014-12-04 11:08:39 +0000325 pipe_control_flags |= GEN6_PIPE_CONTROL_CS_STALL;
326 break;
327 default:
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600328 cmd_fail(cmd, VK_ERROR_UNKNOWN);
Mike Stroyan55658c22014-12-04 11:08:39 +0000329 return;
330 break;
331 }
Chia-I Wu525c6602014-08-27 10:22:34 +0800332 }
333
Mike Stroyan55658c22014-12-04 11:08:39 +0000334 /* cmd_memory_barriers can wait for GEN6_PIPE_CONTROL_CS_STALL and perform
335 * appropriate cache control.
336 */
337 cmd_memory_barriers(cmd,
338 pipe_control_flags,
Mark Lobodzinski628a8a52015-02-02 11:55:52 -0600339 pBarrier->memBarrierCount, pBarrier->ppMemBarriers);
Chia-I Wu525c6602014-08-27 10:22:34 +0800340}