blob: 8a097f56925196d1211de07dfdd1a970f57941f3 [file] [log] [blame]
Chia-I Wu525c6602014-08-27 10:22:34 +08001/*
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -06002 * Vulkan
Chia-I Wu525c6602014-08-27 10:22:34 +08003 *
4 * Copyright (C) 2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
Chia-I Wu44e42362014-09-02 08:32:09 +080023 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
Chia-I Wu525c6602014-08-27 10:22:34 +080026 */
27
28#include "genhw/genhw.h"
29#include "img.h"
Chia-I Wu714df452015-01-01 07:55:04 +080030#include "buf.h"
Chia-I Wu525c6602014-08-27 10:22:34 +080031#include "cmd_priv.h"
32
33enum {
Chia-I Wuc45db532015-02-19 11:20:38 -070034 READ_OP = 1 << 0,
35 WRITE_OP = 1 << 1,
36 HIZ_OP = 1 << 2,
37};
38
39enum {
Chia-I Wu525c6602014-08-27 10:22:34 +080040 MEM_CACHE = 1 << 0,
41 DATA_READ_CACHE = 1 << 1,
42 DATA_WRITE_CACHE = 1 << 2,
43 RENDER_CACHE = 1 << 3,
44 SAMPLER_CACHE = 1 << 4,
45};
46
Chia-I Wuc45db532015-02-19 11:20:38 -070047static uint32_t img_get_layout_ops(const struct intel_img *img,
Courtney Goeltzenleuchter382489d2015-04-10 08:34:15 -060048 VkImageLayout layout)
Chia-I Wuc45db532015-02-19 11:20:38 -070049{
50 uint32_t ops;
51
Chia-I Wu5b66aa52015-04-16 22:02:10 +080052 switch ((int) layout) {
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -060053 case VK_IMAGE_LAYOUT_GENERAL:
Chia-I Wu5b66aa52015-04-16 22:02:10 +080054 case VK_IMAGE_LAYOUT_PRESENT_SOURCE_WSI:
Chia-I Wuc45db532015-02-19 11:20:38 -070055 ops = READ_OP | WRITE_OP;
56 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -060057 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
Chia-I Wuc45db532015-02-19 11:20:38 -070058 ops = READ_OP | WRITE_OP;
59 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -060060 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
Chia-I Wuc45db532015-02-19 11:20:38 -070061 ops = READ_OP | WRITE_OP | HIZ_OP;
62 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -060063 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
Chia-I Wuc45db532015-02-19 11:20:38 -070064 ops = READ_OP | HIZ_OP;
65 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -060066 case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
Chia-I Wuc45db532015-02-19 11:20:38 -070067 ops = READ_OP;
68 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -060069 case VK_IMAGE_LAYOUT_CLEAR_OPTIMAL:
Chia-I Wuc45db532015-02-19 11:20:38 -070070 ops = WRITE_OP | HIZ_OP;
71 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -060072 case VK_IMAGE_LAYOUT_TRANSFER_SOURCE_OPTIMAL:
Chia-I Wuc45db532015-02-19 11:20:38 -070073 ops = READ_OP;
74 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -060075 case VK_IMAGE_LAYOUT_TRANSFER_DESTINATION_OPTIMAL:
Chia-I Wuc45db532015-02-19 11:20:38 -070076 ops = WRITE_OP;
77 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -060078 case VK_IMAGE_LAYOUT_UNDEFINED:
Chia-I Wuc45db532015-02-19 11:20:38 -070079 default:
80 ops = 0;
81 break;
82 }
83
84 return ops;
85}
86
Mike Stroyan55658c22014-12-04 11:08:39 +000087static uint32_t img_get_layout_caches(const struct intel_img *img,
Courtney Goeltzenleuchter382489d2015-04-10 08:34:15 -060088 VkImageLayout layout)
Chia-I Wu525c6602014-08-27 10:22:34 +080089{
90 uint32_t caches;
91
Chia-I Wu5b66aa52015-04-16 22:02:10 +080092 switch ((int) layout) {
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -060093 case VK_IMAGE_LAYOUT_GENERAL:
Chia-I Wu5b66aa52015-04-16 22:02:10 +080094 case VK_IMAGE_LAYOUT_PRESENT_SOURCE_WSI:
Mike Stroyan55658c22014-12-04 11:08:39 +000095 // General layout when image can be used for any kind of access
96 caches = MEM_CACHE | DATA_READ_CACHE | DATA_WRITE_CACHE | RENDER_CACHE | SAMPLER_CACHE;
Chia-I Wub5c1cdf2014-11-22 03:17:45 +080097 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -060098 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
Mike Stroyan55658c22014-12-04 11:08:39 +000099 // Optimal layout when image is only used for color attachment read/write
100 caches = DATA_WRITE_CACHE | RENDER_CACHE;
Chia-I Wu525c6602014-08-27 10:22:34 +0800101 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600102 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
Mike Stroyan55658c22014-12-04 11:08:39 +0000103 // Optimal layout when image is only used for depth/stencil attachment read/write
104 caches = DATA_WRITE_CACHE | RENDER_CACHE;
Chia-I Wu525c6602014-08-27 10:22:34 +0800105 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600106 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
Mike Stroyan55658c22014-12-04 11:08:39 +0000107 // Optimal layout when image is used for read only depth/stencil attachment and shader access
Chia-I Wu525c6602014-08-27 10:22:34 +0800108 caches = RENDER_CACHE;
109 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600110 case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
Mike Stroyan55658c22014-12-04 11:08:39 +0000111 // Optimal layout when image is used for read only shader access
112 caches = DATA_READ_CACHE | SAMPLER_CACHE;
113 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600114 case VK_IMAGE_LAYOUT_CLEAR_OPTIMAL:
Mike Stroyan55658c22014-12-04 11:08:39 +0000115 // Optimal layout when image is used only for clear operations
116 caches = RENDER_CACHE;
117 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600118 case VK_IMAGE_LAYOUT_TRANSFER_SOURCE_OPTIMAL:
Mike Stroyan55658c22014-12-04 11:08:39 +0000119 // Optimal layout when image is used only as source of transfer operations
120 caches = MEM_CACHE | DATA_READ_CACHE | RENDER_CACHE | SAMPLER_CACHE;
121 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600122 case VK_IMAGE_LAYOUT_TRANSFER_DESTINATION_OPTIMAL:
Mike Stroyan55658c22014-12-04 11:08:39 +0000123 // Optimal layout when image is used only as destination of transfer operations
124 caches = MEM_CACHE | DATA_WRITE_CACHE | RENDER_CACHE;
Chia-I Wu525c6602014-08-27 10:22:34 +0800125 break;
126 default:
127 caches = 0;
128 break;
129 }
130
131 return caches;
132}
133
Chia-I Wuc45db532015-02-19 11:20:38 -0700134static void cmd_resolve_depth(struct intel_cmd *cmd,
135 struct intel_img *img,
Courtney Goeltzenleuchter382489d2015-04-10 08:34:15 -0600136 VkImageLayout old_layout,
137 VkImageLayout new_layout,
138 const VkImageSubresourceRange *range)
Chia-I Wuc45db532015-02-19 11:20:38 -0700139{
140 const uint32_t old_ops = img_get_layout_ops(img, old_layout);
141 const uint32_t new_ops = img_get_layout_ops(img, new_layout);
142
143 if (old_ops & WRITE_OP) {
144 if ((old_ops & HIZ_OP) && !(new_ops & HIZ_OP))
145 cmd_meta_ds_op(cmd, INTEL_CMD_META_DS_RESOLVE, img, range);
146 else if (!(old_ops & HIZ_OP) && (new_ops & HIZ_OP))
147 cmd_meta_ds_op(cmd, INTEL_CMD_META_DS_HIZ_RESOLVE, img, range);
148 }
149}
150
Chia-I Wub5c1cdf2014-11-22 03:17:45 +0800151static uint32_t cmd_get_flush_flags(const struct intel_cmd *cmd,
152 uint32_t old_caches,
153 uint32_t new_caches,
154 bool is_ds)
Chia-I Wu525c6602014-08-27 10:22:34 +0800155{
156 uint32_t flags = 0;
157
158 /* not dirty */
159 if (!(old_caches & (MEM_CACHE | RENDER_CACHE | DATA_WRITE_CACHE)))
160 return 0;
161
162 if ((old_caches & RENDER_CACHE) && (new_caches & ~RENDER_CACHE)) {
Chia-I Wub5c1cdf2014-11-22 03:17:45 +0800163 if (is_ds)
Chia-I Wu525c6602014-08-27 10:22:34 +0800164 flags |= GEN6_PIPE_CONTROL_DEPTH_CACHE_FLUSH;
165 else
166 flags |= GEN6_PIPE_CONTROL_RENDER_CACHE_FLUSH;
167 }
168
169 if ((old_caches & DATA_WRITE_CACHE) &&
170 (new_caches & ~(DATA_READ_CACHE | DATA_WRITE_CACHE))) {
171 if (cmd_gen(cmd) >= INTEL_GEN(7))
Chia-I Wu97aa4de2015-03-05 15:43:16 -0700172 flags |= GEN7_PIPE_CONTROL_DC_FLUSH;
Chia-I Wu525c6602014-08-27 10:22:34 +0800173 }
174
175 if (new_caches & SAMPLER_CACHE)
176 flags |= GEN6_PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
177
178 if ((new_caches & DATA_READ_CACHE) && old_caches != DATA_WRITE_CACHE)
179 flags |= GEN6_PIPE_CONTROL_CONSTANT_CACHE_INVALIDATE;
180
181 if (!flags)
182 return 0;
183
184 flags |= GEN6_PIPE_CONTROL_CS_STALL;
185
186 return flags;
187}
188
Mike Stroyan55658c22014-12-04 11:08:39 +0000189static void cmd_memory_barriers(struct intel_cmd *cmd,
Mark Lobodzinskid3eabd72015-01-29 14:24:14 -0600190 uint32_t flush_flags,
Mark Lobodzinskie2d07a52015-01-29 08:55:56 -0600191 uint32_t memory_barrier_count,
Mark Lobodzinskid3eabd72015-01-29 14:24:14 -0600192 const void** memory_barriers)
Chia-I Wu525c6602014-08-27 10:22:34 +0800193{
Mike Stroyan55658c22014-12-04 11:08:39 +0000194 uint32_t i;
Courtney Goeltzenleuchter382489d2015-04-10 08:34:15 -0600195 VkFlags input_mask = 0;
196 VkFlags output_mask = 0;
Chia-I Wu525c6602014-08-27 10:22:34 +0800197
Mike Stroyan55658c22014-12-04 11:08:39 +0000198 for (i = 0; i < memory_barrier_count; i++) {
Mark Lobodzinskid3eabd72015-01-29 14:24:14 -0600199
200 const union {
Courtney Goeltzenleuchter382489d2015-04-10 08:34:15 -0600201 VkStructureType type;
Mark Lobodzinskid3eabd72015-01-29 14:24:14 -0600202
Courtney Goeltzenleuchter382489d2015-04-10 08:34:15 -0600203 VkMemoryBarrier mem;
204 VkBufferMemoryBarrier buf;
205 VkImageMemoryBarrier img;
Mark Lobodzinskid3eabd72015-01-29 14:24:14 -0600206 } *u = memory_barriers[i];
207
208 switch(u->type)
Mike Stroyan55658c22014-12-04 11:08:39 +0000209 {
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600210 case VK_STRUCTURE_TYPE_MEMORY_BARRIER:
Mark Lobodzinskid3eabd72015-01-29 14:24:14 -0600211 output_mask |= u->mem.outputMask;
212 input_mask |= u->mem.inputMask;
Mike Stroyan55658c22014-12-04 11:08:39 +0000213 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600214 case VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER:
Mark Lobodzinskid3eabd72015-01-29 14:24:14 -0600215 output_mask |= u->buf.outputMask;
216 input_mask |= u->buf.inputMask;
Mike Stroyan55658c22014-12-04 11:08:39 +0000217 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600218 case VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER:
Mark Lobodzinskid3eabd72015-01-29 14:24:14 -0600219 output_mask |= u->img.outputMask;
220 input_mask |= u->img.inputMask;
Mike Stroyan55658c22014-12-04 11:08:39 +0000221 {
Mark Lobodzinskid3eabd72015-01-29 14:24:14 -0600222 struct intel_img *img = intel_img(u->img.image);
Chia-I Wuc45db532015-02-19 11:20:38 -0700223
224 cmd_resolve_depth(cmd, img, u->img.oldLayout,
225 u->img.newLayout, &u->img.subresourceRange);
226
Mike Stroyan55658c22014-12-04 11:08:39 +0000227 flush_flags |= cmd_get_flush_flags(cmd,
Mark Lobodzinskid3eabd72015-01-29 14:24:14 -0600228 img_get_layout_caches(img, u->img.oldLayout),
229 img_get_layout_caches(img, u->img.newLayout),
Jeremy Hayes2b7e88a2015-01-23 08:51:43 -0700230 icd_format_is_ds(img->layout.format));
Mike Stroyan55658c22014-12-04 11:08:39 +0000231 }
232 break;
233 default:
234 break;
235 }
Chia-I Wu525c6602014-08-27 10:22:34 +0800236 }
237
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600238 if (output_mask & VK_MEMORY_OUTPUT_SHADER_WRITE_BIT) {
Chia-I Wu97aa4de2015-03-05 15:43:16 -0700239 flush_flags |= GEN7_PIPE_CONTROL_DC_FLUSH;
Mike Stroyan55658c22014-12-04 11:08:39 +0000240 }
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600241 if (output_mask & VK_MEMORY_OUTPUT_COLOR_ATTACHMENT_BIT) {
Mike Stroyan55658c22014-12-04 11:08:39 +0000242 flush_flags |= GEN6_PIPE_CONTROL_RENDER_CACHE_FLUSH;
243 }
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600244 if (output_mask & VK_MEMORY_OUTPUT_DEPTH_STENCIL_ATTACHMENT_BIT) {
Mike Stroyan55658c22014-12-04 11:08:39 +0000245 flush_flags |= GEN6_PIPE_CONTROL_DEPTH_CACHE_FLUSH;
246 }
247
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600248 /* CPU write is cache coherent, so VK_MEMORY_OUTPUT_CPU_WRITE_BIT needs no flush. */
Courtney Goeltzenleuchterad870812015-04-15 15:29:59 -0600249 /* Meta handles flushes, so VK_MEMORY_OUTPUT_TRANSFER_BIT needs no flush. */
Mike Stroyan55658c22014-12-04 11:08:39 +0000250
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600251 if (input_mask & (VK_MEMORY_INPUT_SHADER_READ_BIT | VK_MEMORY_INPUT_UNIFORM_READ_BIT)) {
Mike Stroyan55658c22014-12-04 11:08:39 +0000252 flush_flags |= GEN6_PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
253 }
254
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600255 if (input_mask & VK_MEMORY_INPUT_UNIFORM_READ_BIT) {
Mike Stroyan55658c22014-12-04 11:08:39 +0000256 flush_flags |= GEN6_PIPE_CONTROL_CONSTANT_CACHE_INVALIDATE;
257 }
258
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600259 if (input_mask & VK_MEMORY_INPUT_VERTEX_ATTRIBUTE_FETCH_BIT) {
Mike Stroyan55658c22014-12-04 11:08:39 +0000260 flush_flags |= GEN6_PIPE_CONTROL_VF_CACHE_INVALIDATE;
261 }
262
263 /* These bits have no corresponding cache invalidate operation.
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600264 * VK_MEMORY_INPUT_CPU_READ_BIT
265 * VK_MEMORY_INPUT_INDIRECT_COMMAND_BIT
266 * VK_MEMORY_INPUT_INDEX_FETCH_BIT
267 * VK_MEMORY_INPUT_COLOR_ATTACHMENT_BIT
268 * VK_MEMORY_INPUT_DEPTH_STENCIL_ATTACHMENT_BIT
Courtney Goeltzenleuchterad870812015-04-15 15:29:59 -0600269 * VK_MEMORY_INPUT_TRANSFER_BIT
Mike Stroyan55658c22014-12-04 11:08:39 +0000270 */
271
Chia-I Wu525c6602014-08-27 10:22:34 +0800272 cmd_batch_flush(cmd, flush_flags);
273}
274
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600275ICD_EXPORT void VKAPI vkCmdWaitEvents(
Tony Barbour8205d902015-04-16 15:59:00 -0600276 VkCmdBuffer cmdBuffer,
277 VkWaitEvent waitEvent,
278 uint32_t eventCount,
279 const VkEvent* pEvents,
280 uint32_t memBarrierCount,
281 const void** ppMemBarriers)
Chia-I Wu525c6602014-08-27 10:22:34 +0800282{
283 struct intel_cmd *cmd = intel_cmd(cmdBuffer);
Chia-I Wu525c6602014-08-27 10:22:34 +0800284
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600285 /* This hardware will always wait at VK_WAIT_EVENT_TOP_OF_PIPE.
286 * Passing a pWaitInfo->waitEvent of VK_WAIT_EVENT_BEFORE_FRAGMENT_PROCESSING
Mike Stroyan55658c22014-12-04 11:08:39 +0000287 * does not change that.
288 */
Chia-I Wu525c6602014-08-27 10:22:34 +0800289
Mike Stroyan55658c22014-12-04 11:08:39 +0000290 /* Because the command buffer is serialized, reaching
291 * a pipelined wait is always after completion of prior events.
292 * pWaitInfo->pEvents need not be examined.
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600293 * vkCmdWaitEvents is equivalent to memory barrier part of vkCmdPipelineBarrier.
Mike Stroyan55658c22014-12-04 11:08:39 +0000294 * cmd_memory_barriers will wait for GEN6_PIPE_CONTROL_CS_STALL and perform
295 * appropriate cache control.
296 */
297 cmd_memory_barriers(cmd,
298 GEN6_PIPE_CONTROL_CS_STALL,
Tony Barbour8205d902015-04-16 15:59:00 -0600299 memBarrierCount, ppMemBarriers);
Mike Stroyan55658c22014-12-04 11:08:39 +0000300}
301
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600302ICD_EXPORT void VKAPI vkCmdPipelineBarrier(
Tony Barbour8205d902015-04-16 15:59:00 -0600303 VkCmdBuffer cmdBuffer,
304 VkWaitEvent waitEvent,
305 uint32_t pipeEventCount,
306 const VkPipeEvent* pPipeEvents,
307 uint32_t memBarrierCount,
308 const void** ppMemBarriers)
Mike Stroyan55658c22014-12-04 11:08:39 +0000309{
310 struct intel_cmd *cmd = intel_cmd(cmdBuffer);
311 uint32_t pipe_control_flags = 0;
312 uint32_t i;
313
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600314 /* This hardware will always wait at VK_WAIT_EVENT_TOP_OF_PIPE.
315 * Passing a pBarrier->waitEvent of VK_WAIT_EVENT_BEFORE_FRAGMENT_PROCESSING
Mike Stroyan55658c22014-12-04 11:08:39 +0000316 * does not change that.
317 */
318
319 /* Cache control is done with PIPE_CONTROL flags.
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600320 * With no GEN6_PIPE_CONTROL_CS_STALL flag set, it behaves as VK_PIPE_EVENT_TOP_OF_PIPE.
Tony Barbour8205d902015-04-16 15:59:00 -0600321 * All other pEvents values will behave as VK_PIPE_EVENT_COMMANDS_COMPLETE.
Mike Stroyan55658c22014-12-04 11:08:39 +0000322 */
Tony Barbour8205d902015-04-16 15:59:00 -0600323 for (i = 0; i < pipeEventCount; i++) {
324 switch(pPipeEvents[i])
Mike Stroyan55658c22014-12-04 11:08:39 +0000325 {
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600326 case VK_PIPE_EVENT_TOP_OF_PIPE:
Mike Stroyan55658c22014-12-04 11:08:39 +0000327 break;
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600328 case VK_PIPE_EVENT_VERTEX_PROCESSING_COMPLETE:
329 case VK_PIPE_EVENT_LOCAL_FRAGMENT_PROCESSING_COMPLETE:
330 case VK_PIPE_EVENT_FRAGMENT_PROCESSING_COMPLETE:
331 case VK_PIPE_EVENT_GRAPHICS_PIPELINE_COMPLETE:
332 case VK_PIPE_EVENT_COMPUTE_PIPELINE_COMPLETE:
333 case VK_PIPE_EVENT_TRANSFER_COMPLETE:
Tony Barbour8205d902015-04-16 15:59:00 -0600334 case VK_PIPE_EVENT_COMMANDS_COMPLETE:
Mike Stroyan55658c22014-12-04 11:08:39 +0000335 pipe_control_flags |= GEN6_PIPE_CONTROL_CS_STALL;
336 break;
337 default:
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -0600338 cmd_fail(cmd, VK_ERROR_UNKNOWN);
Mike Stroyan55658c22014-12-04 11:08:39 +0000339 return;
340 break;
341 }
Chia-I Wu525c6602014-08-27 10:22:34 +0800342 }
343
Mike Stroyan55658c22014-12-04 11:08:39 +0000344 /* cmd_memory_barriers can wait for GEN6_PIPE_CONTROL_CS_STALL and perform
345 * appropriate cache control.
346 */
347 cmd_memory_barriers(cmd,
348 pipe_control_flags,
Tony Barbour8205d902015-04-16 15:59:00 -0600349 memBarrierCount, ppMemBarriers);
Chia-I Wu525c6602014-08-27 10:22:34 +0800350}