blob: 8f7690c1eba4daa3bde8efad57404af0d85f124b [file] [log] [blame]
Paul Berry2c5510b2012-04-29 22:00:46 -07001/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
Eric Anholt185b5a52013-06-18 13:52:03 -070024#include <errno.h>
Eric Anholtdb31bc52013-02-07 18:46:18 -080025#include "intel_batchbuffer.h"
Paul Berry2c5510b2012-04-29 22:00:46 -070026#include "intel_fbo.h"
27
28#include "brw_blorp.h"
Jason Ekstrandc1fe8852016-04-27 17:16:30 -070029#include "brw_compiler.h"
30#include "brw_nir.h"
Kenneth Graunke6f7c41d2013-09-30 18:11:03 -070031#include "brw_state.h"
Paul Berry2c5510b2012-04-29 22:00:46 -070032
Eric Anholta2ca98b2013-05-30 14:53:55 -070033#define FILE_DEBUG_FLAG DEBUG_BLORP
34
Paul Berry2c5510b2012-04-29 22:00:46 -070035void
Jason Ekstrandb6dd8e42016-04-21 16:39:56 -070036brw_blorp_surface_info_init(struct brw_context *brw,
37 struct brw_blorp_surface_info *info,
Jason Ekstranda543f742016-04-21 16:19:51 -070038 struct intel_mipmap_tree *mt,
39 unsigned int level, unsigned int layer,
40 mesa_format format, bool is_render_target)
Paul Berry2c5510b2012-04-29 22:00:46 -070041{
Paul Berryb5fe4132013-12-03 21:15:47 -080042 /* Layer is a physical layer, so if this is a 2D multisample array texture
43 * using INTEL_MSAA_LAYOUT_UMS or INTEL_MSAA_LAYOUT_CMS, then it had better
44 * be a multiple of num_samples.
45 */
46 if (mt->msaa_layout == INTEL_MSAA_LAYOUT_UMS ||
47 mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) {
Topi Pohjolainen01ba26d2016-04-18 08:51:10 +030048 assert(mt->num_samples <= 1 || layer % mt->num_samples == 0);
Paul Berryb5fe4132013-12-03 21:15:47 -080049 }
50
Paul Berry2c5510b2012-04-29 22:00:46 -070051 intel_miptree_check_level_layer(mt, level, layer);
52
Jason Ekstrandb6dd8e42016-04-21 16:39:56 -070053 info->mt = mt;
Jason Ekstrandb82de882016-06-22 15:33:44 -070054
55 intel_miptree_get_isl_surf(brw, mt, &info->surf);
56
57 if (mt->mcs_mt) {
58 intel_miptree_get_aux_isl_surf(brw, mt, &info->aux_surf,
59 &info->aux_usage);
60 } else {
61 info->aux_usage = ISL_AUX_USAGE_NONE;
62 }
63
Jason Ekstrandb6dd8e42016-04-21 16:39:56 -070064 info->level = level;
65 info->layer = layer;
66 info->width = minify(mt->physical_width0, level - mt->first_level);
67 info->height = minify(mt->physical_height0, level - mt->first_level);
Paul Berryc130ce72012-08-29 12:16:06 -070068
Jason Ekstrandb6dd8e42016-04-21 16:39:56 -070069 info->swizzle = SWIZZLE_XYZW;
Paul Berry506d70b2012-04-29 22:44:25 -070070
Jason Ekstrand75998862014-09-03 13:53:33 -070071 if (format == MESA_FORMAT_NONE)
72 format = mt->format;
73
74 switch (format) {
Mark Mueller50a01d22014-01-20 19:08:54 -080075 case MESA_FORMAT_S_UINT8:
Jason Ekstrandaa4117a2016-06-22 16:46:20 -070076 assert(info->surf.tiling == ISL_TILING_W);
77 /* Prior to Broadwell, we can't render to R8_UINT */
Jason Ekstrandb6dd8e42016-04-21 16:39:56 -070078 info->brw_surfaceformat = brw->gen >= 8 ? BRW_SURFACEFORMAT_R8_UINT :
Topi Pohjolainenf7ab4e02016-04-07 18:50:56 +030079 BRW_SURFACEFORMAT_R8_UNORM;
Paul Berry530bda22012-06-06 11:05:02 -070080 break;
Kenneth Graunkea487ef82014-02-07 21:53:18 -080081 case MESA_FORMAT_Z24_UNORM_X8_UINT:
Kenneth Graunke0589eae2013-10-07 11:27:22 -070082 /* It would make sense to use BRW_SURFACEFORMAT_R24_UNORM_X8_TYPELESS
83 * here, but unfortunately it isn't supported as a render target, which
84 * would prevent us from blitting to 24-bit depth.
85 *
86 * The miptree consists of 32 bits per pixel, arranged as 24-bit depth
Kenneth Graunke590d7172013-10-07 11:19:11 -070087 * values interleaved with 8 "don't care" bits. Since depth values don't
88 * require any blending, it doesn't matter how we interpret the bit
89 * pattern as long as we copy the right amount of data, so just map it
90 * as 8-bit BGRA.
Paul Berry530bda22012-06-06 11:05:02 -070091 */
Jason Ekstrandb6dd8e42016-04-21 16:39:56 -070092 info->brw_surfaceformat = BRW_SURFACEFORMAT_B8G8R8A8_UNORM;
Paul Berry530bda22012-06-06 11:05:02 -070093 break;
Mark Mueller50a01d22014-01-20 19:08:54 -080094 case MESA_FORMAT_Z_FLOAT32:
Jason Ekstrandb6dd8e42016-04-21 16:39:56 -070095 info->brw_surfaceformat = BRW_SURFACEFORMAT_R32_FLOAT;
Kenneth Graunke590d7172013-10-07 11:19:11 -070096 break;
Mark Mueller50a01d22014-01-20 19:08:54 -080097 case MESA_FORMAT_Z_UNORM16:
Jason Ekstrandb6dd8e42016-04-21 16:39:56 -070098 info->brw_surfaceformat = BRW_SURFACEFORMAT_R16_UNORM;
Paul Berry530bda22012-06-06 11:05:02 -070099 break;
Kenneth Graunke72aade42013-10-07 12:44:01 -0700100 default: {
Kenneth Graunke6f7c41d2013-09-30 18:11:03 -0700101 if (is_render_target) {
Kenneth Graunke8679bb72016-03-16 20:15:52 -0700102 assert(brw->format_supported_as_render_target[format]);
Jason Ekstrandb6dd8e42016-04-21 16:39:56 -0700103 info->brw_surfaceformat = brw->render_target_format[format];
Kenneth Graunke6f7c41d2013-09-30 18:11:03 -0700104 } else {
Jason Ekstrandb6dd8e42016-04-21 16:39:56 -0700105 info->brw_surfaceformat = brw_format_for_mesa_format(format);
Kenneth Graunke6f7c41d2013-09-30 18:11:03 -0700106 }
Paul Berry530bda22012-06-06 11:05:02 -0700107 break;
Paul Berry506d70b2012-04-29 22:44:25 -0700108 }
Kenneth Graunke72aade42013-10-07 12:44:01 -0700109 }
Jason Ekstrande046a462016-06-23 18:40:08 -0700110
111 uint32_t x_offset, y_offset;
112 intel_miptree_get_image_offset(mt, level, layer, &x_offset, &y_offset);
113
114 uint8_t bs = isl_format_get_layout(info->brw_surfaceformat)->bpb / 8;
115 isl_tiling_get_intratile_offset_el(&brw->isl_dev, info->surf.tiling, bs,
116 info->surf.row_pitch, x_offset, y_offset,
117 &info->bo_offset,
118 &info->tile_x_sa, &info->tile_y_sa);
Paul Berry506d70b2012-04-29 22:44:25 -0700119}
120
Paul Berryf04f2192012-08-29 16:04:15 -0700121
Jason Ekstrandb3f08b52016-04-22 14:32:48 -0700122void
123brw_blorp_params_init(struct brw_blorp_params *params)
Paul Berry2c5510b2012-04-29 22:00:46 -0700124{
Jason Ekstrandb3f08b52016-04-22 14:32:48 -0700125 memset(params, 0, sizeof(*params));
126 params->hiz_op = GEN6_HIZ_OP_NONE;
127 params->fast_clear_op = 0;
Jason Ekstrandb3f08b52016-04-22 14:32:48 -0700128 params->num_draw_buffers = 1;
129 params->num_layers = 1;
Paul Berry2c5510b2012-04-29 22:00:46 -0700130}
131
Jason Ekstrandc1fe8852016-04-27 17:16:30 -0700132void
133brw_blorp_init_wm_prog_key(struct brw_wm_prog_key *wm_key)
134{
135 memset(wm_key, 0, sizeof(*wm_key));
136 wm_key->nr_color_regions = 1;
137 for (int i = 0; i < MAX_SAMPLERS; i++)
138 wm_key->tex.swizzles[i] = SWIZZLE_XYZW;
139}
140
141static int
142nir_uniform_type_size(const struct glsl_type *type)
143{
144 /* Only very basic types are allowed */
145 assert(glsl_type_is_vector_or_scalar(type));
146 assert(glsl_get_bit_size(type) == 32);
147
148 return glsl_get_vector_elements(type) * 4;
149}
150
151const unsigned *
152brw_blorp_compile_nir_shader(struct brw_context *brw, struct nir_shader *nir,
153 const struct brw_wm_prog_key *wm_key,
154 bool use_repclear,
155 struct brw_blorp_prog_data *prog_data,
156 unsigned *program_size)
157{
158 const struct brw_compiler *compiler = brw->intelScreen->compiler;
159
160 void *mem_ctx = ralloc_context(NULL);
161
162 /* Calling brw_preprocess_nir and friends is destructive and, if cloning is
163 * enabled, may end up completely replacing the nir_shader. Therefore, we
164 * own it and might as well put it in our context for easy cleanup.
165 */
166 ralloc_steal(mem_ctx, nir);
167 nir->options =
168 compiler->glsl_compiler_options[MESA_SHADER_FRAGMENT].NirOptions;
169
170 struct brw_wm_prog_data wm_prog_data;
171 memset(&wm_prog_data, 0, sizeof(wm_prog_data));
172
Topi Pohjolainen175e0952016-05-18 22:01:17 +0300173 wm_prog_data.base.nr_params = 0;
174 wm_prog_data.base.param = NULL;
Jason Ekstrandc1fe8852016-04-27 17:16:30 -0700175
176 /* BLORP always just uses the first two binding table entries */
177 wm_prog_data.binding_table.render_target_start = 0;
178 wm_prog_data.base.binding_table.texture_start = 1;
179
180 nir = brw_preprocess_nir(compiler, nir);
181 nir_remove_dead_variables(nir, nir_var_shader_in);
182 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir)->impl);
183
184 /* Uniforms are required to be lowered before going into compile_fs. For
185 * BLORP, we'll assume that whoever builds the shader sets the location
186 * they want so we just need to lower them and figure out how many we have
187 * in total.
188 */
189 nir->num_uniforms = 0;
190 nir_foreach_variable(var, &nir->uniforms) {
191 var->data.driver_location = var->data.location;
192 unsigned end = var->data.location + nir_uniform_type_size(var->type);
193 nir->num_uniforms = MAX2(nir->num_uniforms, end);
194 }
195 nir_lower_io(nir, nir_var_uniform, nir_uniform_type_size);
196
197 const unsigned *program =
198 brw_compile_fs(compiler, brw, mem_ctx, wm_key, &wm_prog_data, nir,
Jason Ekstrand265487a2016-05-16 14:30:25 -0700199 NULL, -1, -1, false, use_repclear, program_size, NULL);
Jason Ekstrandc1fe8852016-04-27 17:16:30 -0700200
201 /* Copy the relavent bits of wm_prog_data over into the blorp prog data */
202 prog_data->dispatch_8 = wm_prog_data.dispatch_8;
203 prog_data->dispatch_16 = wm_prog_data.dispatch_16;
204 prog_data->first_curbe_grf_0 = wm_prog_data.base.dispatch_grf_start_reg;
205 prog_data->first_curbe_grf_2 = wm_prog_data.dispatch_grf_start_reg_2;
206 prog_data->ksp_offset_2 = wm_prog_data.prog_offset_2;
207 prog_data->persample_msaa_dispatch = wm_prog_data.persample_dispatch;
Topi Pohjolainenf5e85752016-05-15 11:34:37 +0300208 prog_data->flat_inputs = wm_prog_data.flat_inputs;
209 prog_data->num_varying_inputs = wm_prog_data.num_varying_inputs;
Topi Pohjolainen9b2fa172016-06-01 12:27:56 +0300210 prog_data->inputs_read = nir->info.inputs_read;
Jason Ekstrandc1fe8852016-04-27 17:16:30 -0700211
Topi Pohjolainen175e0952016-05-18 22:01:17 +0300212 assert(wm_prog_data.base.nr_params == 0);
Jason Ekstrandc1fe8852016-04-27 17:16:30 -0700213
214 return program;
215}
216
Jason Ekstrand6553dc02016-06-10 12:03:18 -0700217struct surface_state_info {
218 unsigned num_dwords;
219 unsigned ss_align; /* Required alignment of RENDER_SURFACE_STATE in bytes */
220 unsigned reloc_dw;
221 unsigned aux_reloc_dw;
222 unsigned tex_mocs;
223 unsigned rb_mocs;
224};
225
226static const struct surface_state_info surface_state_infos[] = {
227 [6] = {6, 32, 1, 0},
228 [7] = {8, 32, 1, 6, GEN7_MOCS_L3, GEN7_MOCS_L3},
229 [8] = {13, 64, 8, 10, BDW_MOCS_WB, BDW_MOCS_PTE},
230 [9] = {16, 64, 8, 10, SKL_MOCS_WB, SKL_MOCS_PTE},
231};
232
233uint32_t
234brw_blorp_emit_surface_state(struct brw_context *brw,
235 const struct brw_blorp_surface_info *surface,
236 uint32_t read_domains, uint32_t write_domain,
237 bool is_render_target)
238{
239 const struct surface_state_info ss_info = surface_state_infos[brw->gen];
240
Jason Ekstrandb82de882016-06-22 15:33:44 -0700241 struct isl_surf surf = surface->surf;
Jason Ekstrand6553dc02016-06-10 12:03:18 -0700242
243 /* Stomp surface dimensions and tiling (if needed) with info from blorp */
244 surf.dim = ISL_SURF_DIM_2D;
245 surf.dim_layout = ISL_DIM_LAYOUT_GEN4_2D;
Jason Ekstrand6553dc02016-06-10 12:03:18 -0700246 surf.logical_level0_px.width = surface->width;
247 surf.logical_level0_px.height = surface->height;
248 surf.logical_level0_px.depth = 1;
249 surf.logical_level0_px.array_len = 1;
250 surf.levels = 1;
Jason Ekstrand6553dc02016-06-10 12:03:18 -0700251
252 /* Alignment doesn't matter since we have 1 miplevel and 1 array slice so
253 * just pick something that works for everybody.
254 */
255 surf.image_alignment_el = isl_extent3d(4, 4, 1);
256
Jason Ekstrand28b0ad82016-06-23 11:00:59 -0700257 if (brw->gen == 6 && surf.samples > 1) {
Jason Ekstrand6553dc02016-06-10 12:03:18 -0700258 /* Since gen6 uses INTEL_MSAA_LAYOUT_IMS, width and height are measured
259 * in samples. But SURFACE_STATE wants them in pixels, so we need to
260 * divide them each by 2.
261 */
262 surf.logical_level0_px.width /= 2;
263 surf.logical_level0_px.height /= 2;
264 }
265
266 if (brw->gen == 6 && surf.image_alignment_el.height > 4) {
267 /* This can happen on stencil buffers on Sandy Bridge due to the
268 * single-LOD work-around. It's fairly harmless as long as we don't
269 * pass a bogus value into isl_surf_fill_state().
270 */
271 surf.image_alignment_el = isl_extent3d(4, 2, 1);
272 }
273
Jason Ekstrand6553dc02016-06-10 12:03:18 -0700274 union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
275
Jason Ekstrandb82de882016-06-22 15:33:44 -0700276 const struct isl_surf *aux_surf = NULL;
Jason Ekstrand6553dc02016-06-10 12:03:18 -0700277 uint64_t aux_offset = 0;
Jason Ekstrand6553dc02016-06-10 12:03:18 -0700278 if (surface->mt->mcs_mt) {
Jason Ekstrandb82de882016-06-22 15:33:44 -0700279 aux_surf = &surface->aux_surf;
Jason Ekstrand6553dc02016-06-10 12:03:18 -0700280 assert(surface->mt->mcs_mt->offset == 0);
281 aux_offset = surface->mt->mcs_mt->bo->offset64;
282
283 /* We only really need a clear color if we also have an auxiliary
284 * surface. Without one, it does nothing.
285 */
286 clear_color = intel_miptree_get_isl_clear_color(brw, surface->mt);
287 }
288
289 struct isl_view view = {
290 .format = surface->brw_surfaceformat,
291 .base_level = 0,
292 .levels = 1,
293 .base_array_layer = 0,
294 .array_len = 1,
295 .channel_select = {
296 ISL_CHANNEL_SELECT_RED,
297 ISL_CHANNEL_SELECT_GREEN,
298 ISL_CHANNEL_SELECT_BLUE,
299 ISL_CHANNEL_SELECT_ALPHA,
300 },
301 .usage = is_render_target ? ISL_SURF_USAGE_RENDER_TARGET_BIT :
302 ISL_SURF_USAGE_TEXTURE_BIT,
303 };
304
Jason Ekstrand6553dc02016-06-10 12:03:18 -0700305 uint32_t surf_offset;
306 uint32_t *dw = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
307 ss_info.num_dwords * 4, ss_info.ss_align,
308 &surf_offset);
309
310 const uint32_t mocs = is_render_target ? ss_info.rb_mocs : ss_info.tex_mocs;
311
312 isl_surf_fill_state(&brw->isl_dev, dw, .surf = &surf, .view = &view,
Jason Ekstrande046a462016-06-23 18:40:08 -0700313 .address = surface->mt->bo->offset64 + surface->bo_offset,
Jason Ekstrandb82de882016-06-22 15:33:44 -0700314 .aux_surf = aux_surf, .aux_usage = surface->aux_usage,
Jason Ekstrand6553dc02016-06-10 12:03:18 -0700315 .aux_address = aux_offset,
316 .mocs = mocs, .clear_color = clear_color,
Jason Ekstrande046a462016-06-23 18:40:08 -0700317 .x_offset_sa = surface->tile_x_sa,
318 .y_offset_sa = surface->tile_y_sa);
Jason Ekstrand6553dc02016-06-10 12:03:18 -0700319
320 /* Emit relocation to surface contents */
321 drm_intel_bo_emit_reloc(brw->batch.bo,
322 surf_offset + ss_info.reloc_dw * 4,
323 surface->mt->bo,
324 dw[ss_info.reloc_dw] - surface->mt->bo->offset64,
325 read_domains, write_domain);
326
327 if (aux_surf) {
328 /* On gen7 and prior, the bottom 12 bits of the MCS base address are
329 * used to store other information. This should be ok, however, because
330 * surface buffer addresses are always 4K page alinged.
331 */
332 assert((aux_offset & 0xfff) == 0);
333 drm_intel_bo_emit_reloc(brw->batch.bo,
334 surf_offset + ss_info.aux_reloc_dw * 4,
335 surface->mt->mcs_mt->bo,
336 dw[ss_info.aux_reloc_dw] & 0xfff,
337 read_domains, write_domain);
338 }
339
340 return surf_offset;
341}
342
Jason Ekstrand8096ed72016-04-22 13:46:25 -0700343/**
344 * Perform a HiZ or depth resolve operation.
345 *
346 * For an overview of HiZ ops, see the following sections of the Sandy Bridge
347 * PRM, Volume 1, Part 2:
348 * - 7.5.3.1 Depth Buffer Clear
349 * - 7.5.3.2 Depth Buffer Resolve
350 * - 7.5.3.3 Hierarchical Depth Buffer Resolve
351 */
Eric Anholt5b226ad2012-05-21 09:30:35 -0700352void
Kenneth Graunkeca437572013-07-02 23:17:14 -0700353intel_hiz_exec(struct brw_context *brw, struct intel_mipmap_tree *mt,
Jason Ekstrandbed74292016-04-22 16:04:05 -0700354 unsigned int level, unsigned int layer, enum gen6_hiz_op op)
Eric Anholt5b226ad2012-05-21 09:30:35 -0700355{
Eric Anholta2ca98b2013-05-30 14:53:55 -0700356 const char *opname = NULL;
357
358 switch (op) {
359 case GEN6_HIZ_OP_DEPTH_RESOLVE:
360 opname = "depth resolve";
361 break;
362 case GEN6_HIZ_OP_HIZ_RESOLVE:
363 opname = "hiz ambiguate";
364 break;
365 case GEN6_HIZ_OP_DEPTH_CLEAR:
366 opname = "depth clear";
367 break;
368 case GEN6_HIZ_OP_NONE:
369 opname = "noop?";
370 break;
371 }
372
373 DBG("%s %s to mt %p level %d layer %d\n",
Marius Predut28d9e902015-04-07 22:05:28 +0300374 __func__, opname, mt, level, layer);
Eric Anholta2ca98b2013-05-30 14:53:55 -0700375
Kenneth Graunke8cad1c12014-02-06 17:06:12 -0800376 if (brw->gen >= 8) {
377 gen8_hiz_exec(brw, mt, level, layer, op);
378 } else {
Jason Ekstrand8096ed72016-04-22 13:46:25 -0700379 gen6_blorp_hiz_exec(brw, mt, level, layer, op);
Kenneth Graunke8cad1c12014-02-06 17:06:12 -0800380 }
Eric Anholt5b226ad2012-05-21 09:30:35 -0700381}
382
Paul Berry2c5510b2012-04-29 22:00:46 -0700383void
Jason Ekstrandb3f08b52016-04-22 14:32:48 -0700384brw_blorp_exec(struct brw_context *brw, const struct brw_blorp_params *params)
Paul Berry2c5510b2012-04-29 22:00:46 -0700385{
Eric Anholt185b5a52013-06-18 13:52:03 -0700386 struct gl_context *ctx = &brw->ctx;
Topi Pohjolainen7de72f72016-03-30 20:41:30 +0300387 const uint32_t estimated_max_batch_usage = brw->gen >= 8 ? 1800 : 1500;
Eric Anholt185b5a52013-06-18 13:52:03 -0700388 bool check_aperture_failed_once = false;
389
390 /* Flush the sampler and render caches. We definitely need to flush the
391 * sampler cache so that we get updated contents from the render cache for
392 * the glBlitFramebuffer() source. Also, we are sometimes warned in the
393 * docs to flush the cache between reinterpretations of the same surface
394 * data with different formats, which blorp does for stencil and depth
395 * data.
396 */
Chris Wilson4b35ab92015-04-30 17:04:51 +0100397 brw_emit_mi_flush(brw);
Eric Anholt185b5a52013-06-18 13:52:03 -0700398
Topi Pohjolainen7644e8a2016-04-15 10:43:05 +0300399 brw_select_pipeline(brw, BRW_RENDER_PIPELINE);
400
Eric Anholt185b5a52013-06-18 13:52:03 -0700401retry:
Kenneth Graunke6bc40f92013-10-28 16:06:10 -0700402 intel_batchbuffer_require_space(brw, estimated_max_batch_usage, RENDER_RING);
Eric Anholt185b5a52013-06-18 13:52:03 -0700403 intel_batchbuffer_save_state(brw);
404 drm_intel_bo *saved_bo = brw->batch.bo;
Matt Turner131573d2015-07-11 14:36:25 -0700405 uint32_t saved_used = USED_BATCH(brw->batch);
Eric Anholt185b5a52013-06-18 13:52:03 -0700406 uint32_t saved_state_batch_offset = brw->batch.state_batch_offset;
407
Kenneth Graunke53631be2013-07-06 00:36:46 -0700408 switch (brw->gen) {
Paul Berry2c5510b2012-04-29 22:00:46 -0700409 case 6:
Kenneth Graunkeca437572013-07-02 23:17:14 -0700410 gen6_blorp_exec(brw, params);
Paul Berry2c5510b2012-04-29 22:00:46 -0700411 break;
412 case 7:
Kenneth Graunkeca437572013-07-02 23:17:14 -0700413 gen7_blorp_exec(brw, params);
Paul Berry2c5510b2012-04-29 22:00:46 -0700414 break;
Topi Pohjolainen7de72f72016-03-30 20:41:30 +0300415 case 8:
416 case 9:
417 gen8_blorp_exec(brw, params);
418 break;
Paul Berry2c5510b2012-04-29 22:00:46 -0700419 default:
420 /* BLORP is not supported before Gen6. */
Matt Turner3d826722014-06-29 14:54:01 -0700421 unreachable("not reached");
Paul Berry2c5510b2012-04-29 22:00:46 -0700422 }
Eric Anholtdb31bc52013-02-07 18:46:18 -0800423
Eric Anholt185b5a52013-06-18 13:52:03 -0700424 /* Make sure we didn't wrap the batch unintentionally, and make sure we
425 * reserved enough space that a wrap will never happen.
426 */
427 assert(brw->batch.bo == saved_bo);
Matt Turner131573d2015-07-11 14:36:25 -0700428 assert((USED_BATCH(brw->batch) - saved_used) * 4 +
Eric Anholt185b5a52013-06-18 13:52:03 -0700429 (saved_state_batch_offset - brw->batch.state_batch_offset) <
430 estimated_max_batch_usage);
431 /* Shut up compiler warnings on release build */
432 (void)saved_bo;
433 (void)saved_used;
434 (void)saved_state_batch_offset;
435
436 /* Check if the blorp op we just did would make our batch likely to fail to
437 * map all the BOs into the GPU at batch exec time later. If so, flush the
438 * batch and try again with nothing else in the batch.
439 */
440 if (dri_bufmgr_check_aperture_space(&brw->batch.bo, 1)) {
441 if (!check_aperture_failed_once) {
442 check_aperture_failed_once = true;
443 intel_batchbuffer_reset_to_saved(brw);
444 intel_batchbuffer_flush(brw);
445 goto retry;
446 } else {
447 int ret = intel_batchbuffer_flush(brw);
448 WARN_ONCE(ret == -ENOSPC,
449 "i965: blorp emit exceeded available aperture space\n");
450 }
451 }
452
Kenneth Graunkee3343902013-07-03 13:54:53 -0700453 if (unlikely(brw->always_flush_batch))
Kenneth Graunkeca437572013-07-02 23:17:14 -0700454 intel_batchbuffer_flush(brw);
Eric Anholtdb31bc52013-02-07 18:46:18 -0800455
456 /* We've smashed all state compared to what the normal 3D pipeline
457 * rendering tracks for GL.
458 */
Topi Pohjolainen234b5f22016-04-22 13:43:39 +0300459 brw->ctx.NewDriverState |= BRW_NEW_BLORP;
Eric Anholt3f9440c2014-04-06 10:49:49 -0700460 brw->no_depth_or_stencil = false;
Eric Anholt2e2445f2013-06-18 14:54:18 -0700461 brw->ib.type = -1;
Eric Anholtdb31bc52013-02-07 18:46:18 -0800462
463 /* Flush the sampler cache so any texturing from the destination is
464 * coherent.
465 */
Chris Wilson4b35ab92015-04-30 17:04:51 +0100466 brw_emit_mi_flush(brw);
Paul Berry2c5510b2012-04-29 22:00:46 -0700467}
468
Jason Ekstrand8096ed72016-04-22 13:46:25 -0700469void
470gen6_blorp_hiz_exec(struct brw_context *brw, struct intel_mipmap_tree *mt,
471 unsigned int level, unsigned int layer, enum gen6_hiz_op op)
Paul Berry2c5510b2012-04-29 22:00:46 -0700472{
Jason Ekstrandb3f08b52016-04-22 14:32:48 -0700473 struct brw_blorp_params params;
474 brw_blorp_params_init(&params);
Paul Berry2c5510b2012-04-29 22:00:46 -0700475
Jason Ekstrand8096ed72016-04-22 13:46:25 -0700476 params.hiz_op = op;
477
Jason Ekstrandb6dd8e42016-04-21 16:39:56 -0700478 brw_blorp_surface_info_init(brw, &params.depth, mt, level, layer,
479 mt->format, true);
Chad Versacea14dc4f2013-03-11 19:21:46 -0700480
481 /* Align the rectangle primitive to 8x4 pixels.
482 *
483 * During fast depth clears, the emitted rectangle primitive must be
484 * aligned to 8x4 pixels. From the Ivybridge PRM, Vol 2 Part 1 Section
485 * 11.5.3.1 Depth Buffer Clear (and the matching section in the Sandybridge
486 * PRM):
487 * If Number of Multisamples is NUMSAMPLES_1, the rectangle must be
488 * aligned to an 8x4 pixel block relative to the upper left corner
489 * of the depth buffer [...]
490 *
491 * For hiz resolves, the rectangle must also be 8x4 aligned. Item
492 * WaHizAmbiguate8x4Aligned from the Haswell workarounds page and the
493 * Ivybridge simulator require the alignment.
494 *
495 * To be safe, let's just align the rect for all hiz operations and all
496 * hardware generations.
497 *
498 * However, for some miptree slices of a Z24 texture, emitting an 8x4
499 * aligned rectangle that covers the slice may clobber adjacent slices if
500 * we strictly adhered to the texture alignments specified in the PRM. The
501 * Ivybridge PRM, Section "Alignment Unit Size", states that
502 * SURFACE_STATE.Surface_Horizontal_Alignment should be 4 for Z24 surfaces,
503 * not 8. But commit 1f112cc increased the alignment from 4 to 8, which
504 * prevents the clobbering.
505 */
Jason Ekstrand28b0ad82016-06-23 11:00:59 -0700506 params.dst.surf.samples = MAX2(mt->num_samples, 1);
507 if (params.depth.surf.samples > 1) {
Jason Ekstrand8096ed72016-04-22 13:46:25 -0700508 params.depth.width = ALIGN(mt->logical_width0, 8);
509 params.depth.height = ALIGN(mt->logical_height0, 4);
Chris Forbes43d23e82014-11-18 21:49:53 +1300510 } else {
Jason Ekstrand8096ed72016-04-22 13:46:25 -0700511 params.depth.width = ALIGN(params.depth.width, 8);
512 params.depth.height = ALIGN(params.depth.height, 4);
Chris Forbes43d23e82014-11-18 21:49:53 +1300513 }
Chad Versacea14dc4f2013-03-11 19:21:46 -0700514
Jason Ekstrand8096ed72016-04-22 13:46:25 -0700515 params.x1 = params.depth.width;
516 params.y1 = params.depth.height;
Paul Berry2c5510b2012-04-29 22:00:46 -0700517
Eric Anholt11bef602014-04-23 14:21:21 -0700518 assert(intel_miptree_level_has_hiz(mt, level));
Paul Berry2c5510b2012-04-29 22:00:46 -0700519
520 switch (mt->format) {
Jason Ekstrand8096ed72016-04-22 13:46:25 -0700521 case MESA_FORMAT_Z_UNORM16:
522 params.depth_format = BRW_DEPTHFORMAT_D16_UNORM;
523 break;
524 case MESA_FORMAT_Z_FLOAT32:
525 params.depth_format = BRW_DEPTHFORMAT_D32_FLOAT;
526 break;
527 case MESA_FORMAT_Z24_UNORM_X8_UINT:
528 params.depth_format = BRW_DEPTHFORMAT_D24_UNORM_X8_UINT;
529 break;
530 default:
531 unreachable("not reached");
Paul Berry2c5510b2012-04-29 22:00:46 -0700532 }
Jason Ekstrand8096ed72016-04-22 13:46:25 -0700533
534 brw_blorp_exec(brw, &params);
Paul Berry2c5510b2012-04-29 22:00:46 -0700535}