blob: 333480a0f550804dbccfa71d2a1799a7e7595376 [file] [log] [blame]
Eric Anholt11dd9e92011-05-24 16:34:27 -07001/*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
Kenneth Graunkeec44d562013-09-17 23:32:10 -070024/** @file brw_fs_generator.cpp
Eric Anholt11dd9e92011-05-24 16:34:27 -070025 *
Kenneth Graunkeec44d562013-09-17 23:32:10 -070026 * This file supports generating code from the FS LIR to the actual
Eric Anholt11dd9e92011-05-24 16:34:27 -070027 * native instructions.
28 */
29
Eric Anholt11dd9e92011-05-24 16:34:27 -070030#include "brw_eu.h"
Eric Anholt11dd9e92011-05-24 16:34:27 -070031#include "brw_fs.h"
Eric Anholt5ed57d92012-10-03 13:03:12 -070032#include "brw_cfg.h"
Matt Turnerecac1aa2015-11-22 15:30:59 -080033#include "brw_program.h"
Eric Anholt11dd9e92011-05-24 16:34:27 -070034
Matt Turnerd74dd702015-10-23 13:11:44 -070035static enum brw_reg_file
36brw_file_from_reg(fs_reg *reg)
Matt Turnerc0e26c52014-12-05 09:53:11 -080037{
38 switch (reg->file) {
Matt Turnerb3315a62015-10-26 17:52:57 -070039 case ARF:
40 return BRW_ARCHITECTURE_REGISTER_FILE;
41 case FIXED_GRF:
Matt Turnerb163aa02015-10-26 17:09:25 -070042 case VGRF:
Matt Turnerc0e26c52014-12-05 09:53:11 -080043 return BRW_GENERAL_REGISTER_FILE;
44 case MRF:
45 return BRW_MESSAGE_REGISTER_FILE;
46 case IMM:
47 return BRW_IMMEDIATE_VALUE;
Matt Turner7c81a6a2015-10-26 06:58:56 -070048 case BAD_FILE:
Matt Turner7c81a6a2015-10-26 06:58:56 -070049 case ATTR:
50 case UNIFORM:
Matt Turnerc0e26c52014-12-05 09:53:11 -080051 unreachable("not reached");
52 }
Matt Turnerd74dd702015-10-23 13:11:44 -070053 return BRW_ARCHITECTURE_REGISTER_FILE;
Matt Turnerc0e26c52014-12-05 09:53:11 -080054}
55
Matt Turnerdb186f22014-11-28 12:21:03 -080056static struct brw_reg
Kenneth Graunkedabaf4f2016-05-18 19:02:45 -070057brw_reg_from_fs_reg(fs_inst *inst, fs_reg *reg, unsigned gen, bool compressed)
Matt Turnerdb186f22014-11-28 12:21:03 -080058{
59 struct brw_reg brw_reg;
60
61 switch (reg->file) {
Matt Turnerdb186f22014-11-28 12:21:03 -080062 case MRF:
Matt Turner0eb3db12015-11-02 10:23:12 -080063 assert((reg->nr & ~BRW_MRF_COMPR4) < BRW_MAX_MRF(gen));
Iago Toral Quiroga08586102015-09-16 09:08:19 +020064 /* Fallthrough */
Matt Turnerb163aa02015-10-26 17:09:25 -070065 case VGRF:
Matt Turnerdb186f22014-11-28 12:21:03 -080066 if (reg->stride == 0) {
Matt Turner7638e752015-10-26 04:35:14 -070067 brw_reg = brw_vec1_reg(brw_file_from_reg(reg), reg->nr, 0);
Matt Turnerdb186f22014-11-28 12:21:03 -080068 } else {
69 /* From the Haswell PRM:
70 *
Francisco Jerez0b4cd912016-05-19 21:43:48 -070071 * "VertStride must be used to cross GRF register boundaries. This
72 * rule implies that elements within a 'Width' cannot cross GRF
73 * boundaries."
Matt Turnerdb186f22014-11-28 12:21:03 -080074 *
Francisco Jerez0b4cd912016-05-19 21:43:48 -070075 * The maximum width value that could satisfy this restriction is:
Matt Turnerdb186f22014-11-28 12:21:03 -080076 */
Francisco Jerez0b4cd912016-05-19 21:43:48 -070077 const unsigned reg_width = REG_SIZE / (reg->stride * type_sz(reg->type));
78
79 /* Because the hardware can only split source regions at a whole
80 * multiple of width during decompression (i.e. vertically), clamp
81 * the value obtained above to the physical execution size of a
82 * single decompressed chunk of the instruction:
83 */
84 const unsigned phys_width = compressed ? inst->exec_size / 2 :
85 inst->exec_size;
86
87 /* XXX - The equation above is strictly speaking not correct on
88 * hardware that supports unbalanced GRF writes -- On Gen9+
89 * each decompressed chunk of the instruction may have a
90 * different execution size when the number of components
91 * written to each destination GRF is not the same.
92 */
93 const unsigned width = MIN2(reg_width, phys_width);
94 brw_reg = brw_vecn_reg(width, brw_file_from_reg(reg), reg->nr, 0);
95 brw_reg = stride(brw_reg, width * reg->stride, width, reg->stride);
Matt Turnerdb186f22014-11-28 12:21:03 -080096 }
97
98 brw_reg = retype(brw_reg, reg->type);
99 brw_reg = byte_offset(brw_reg, reg->subreg_offset);
Matt Turner94b10312015-10-24 15:29:03 -0700100 brw_reg.abs = reg->abs;
101 brw_reg.negate = reg->negate;
Matt Turnerdb186f22014-11-28 12:21:03 -0800102 break;
Matt Turnerb3315a62015-10-26 17:52:57 -0700103 case ARF:
104 case FIXED_GRF:
Matt Turnera5b31152015-11-02 12:25:24 -0800105 case IMM:
Matt Turner2d8c5292015-11-19 21:51:37 -0800106 brw_reg = reg->as_brw_reg();
Matt Turnerdb186f22014-11-28 12:21:03 -0800107 break;
108 case BAD_FILE:
109 /* Probably unused. */
110 brw_reg = brw_null_reg();
111 break;
Matt Turner7c81a6a2015-10-26 06:58:56 -0700112 case ATTR:
113 case UNIFORM:
Matt Turnerdb186f22014-11-28 12:21:03 -0800114 unreachable("not reached");
115 }
Matt Turnerdb186f22014-11-28 12:21:03 -0800116
117 return brw_reg;
118}
119
Jason Ekstrandd7565b72015-04-16 14:34:04 -0700120fs_generator::fs_generator(const struct brw_compiler *compiler, void *log_data,
Kenneth Graunke2d4ac9b2014-05-14 01:21:02 -0700121 void *mem_ctx,
Kristian Høgsberg7bb9d332014-10-20 22:53:31 -0700122 const void *key,
123 struct brw_stage_prog_data *prog_data,
Matt Turnerb0d422c2015-03-16 12:18:31 -0700124 unsigned promoted_constants,
Kenneth Graunke68ed14d2015-01-13 14:28:13 -0800125 bool runtime_check_aads_emit,
Jason Ekstrand9870f792016-01-14 20:27:51 -0800126 gl_shader_stage stage)
Kenneth Graunkeea681a02012-11-09 01:05:47 -0800127
Jason Ekstrandd7565b72015-04-16 14:34:04 -0700128 : compiler(compiler), log_data(log_data),
129 devinfo(compiler->devinfo), key(key),
Kristian Høgsberg686ef092014-10-27 19:43:31 -0700130 prog_data(prog_data),
Jason Ekstrand5e86f5b2015-10-05 16:01:33 -0700131 promoted_constants(promoted_constants),
Matt Turnerb0d422c2015-03-16 12:18:31 -0700132 runtime_check_aads_emit(runtime_check_aads_emit), debug_flag(false),
Jason Ekstrand9870f792016-01-14 20:27:51 -0800133 stage(stage), mem_ctx(mem_ctx)
Kenneth Graunkeea681a02012-11-09 01:05:47 -0800134{
Jason Ekstranda85c4c92015-04-16 11:06:57 -0700135 p = rzalloc(mem_ctx, struct brw_codegen);
136 brw_init_codegen(devinfo, p, mem_ctx);
Kenneth Graunkeea681a02012-11-09 01:05:47 -0800137}
138
139fs_generator::~fs_generator()
140{
141}
142
Matt Turner279c1c82014-11-12 11:01:16 -0800143class ip_record : public exec_node {
144public:
145 DECLARE_RALLOC_CXX_OPERATORS(ip_record)
146
147 ip_record(int ip)
148 {
149 this->ip = ip;
150 }
151
152 int ip;
153};
154
Matt Turnerb5fd7622014-05-16 13:06:45 -0700155bool
Eric Anholtbeafced2012-12-06 10:15:08 -0800156fs_generator::patch_discard_jumps_to_fb_writes()
157{
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -0700158 if (devinfo->gen < 6 || this->discard_halt_patches.is_empty())
Matt Turnerb5fd7622014-05-16 13:06:45 -0700159 return false;
Eric Anholtbeafced2012-12-06 10:15:08 -0800160
Jason Ekstrand4e9c79c2015-04-14 18:00:06 -0700161 int scale = brw_jump_scale(p->devinfo);
Kenneth Graunke82ddd512014-06-30 08:00:25 -0700162
Eric Anholtbeafced2012-12-06 10:15:08 -0800163 /* There is a somewhat strange undocumented requirement of using
164 * HALT, according to the simulator. If some channel has HALTed to
165 * a particular UIP, then by the end of the program, every channel
166 * must have HALTed to that UIP. Furthermore, the tracking is a
167 * stack, so you can't do the final halt of a UIP after starting
168 * halting to a new UIP.
169 *
170 * Symptoms of not emitting this instruction on actual hardware
171 * included GPU hangs and sparkly rendering on the piglit discard
172 * tests.
173 */
Matt Turner7c796082014-06-13 14:29:25 -0700174 brw_inst *last_halt = gen6_HALT(p);
Jason Ekstrand4e9c79c2015-04-14 18:00:06 -0700175 brw_inst_set_uip(p->devinfo, last_halt, 1 * scale);
176 brw_inst_set_jip(p->devinfo, last_halt, 1 * scale);
Eric Anholtbeafced2012-12-06 10:15:08 -0800177
178 int ip = p->nr_insn;
179
Matt Turnerc5030ac2014-06-24 15:53:19 -0700180 foreach_in_list(ip_record, patch_ip, &discard_halt_patches) {
Matt Turner7c796082014-06-13 14:29:25 -0700181 brw_inst *patch = &p->store[patch_ip->ip];
Eric Anholtbeafced2012-12-06 10:15:08 -0800182
Jason Ekstrand4e9c79c2015-04-14 18:00:06 -0700183 assert(brw_inst_opcode(p->devinfo, patch) == BRW_OPCODE_HALT);
Eric Anholtbeafced2012-12-06 10:15:08 -0800184 /* HALT takes a half-instruction distance from the pre-incremented IP. */
Jason Ekstrand4e9c79c2015-04-14 18:00:06 -0700185 brw_inst_set_uip(p->devinfo, patch, (ip - patch_ip->ip) * scale);
Eric Anholtbeafced2012-12-06 10:15:08 -0800186 }
187
188 this->discard_halt_patches.make_empty();
Matt Turnerb5fd7622014-05-16 13:06:45 -0700189 return true;
Eric Anholtbeafced2012-12-06 10:15:08 -0800190}
191
192void
Iago Toral Quirogadc2d3a72014-06-05 15:03:08 +0200193fs_generator::fire_fb_write(fs_inst *inst,
Jason Ekstrand8b0e4b32014-09-16 15:16:20 -0700194 struct brw_reg payload,
Iago Toral Quirogadc2d3a72014-06-05 15:03:08 +0200195 struct brw_reg implied_header,
196 GLuint nr)
197{
198 uint32_t msg_control;
199
Jordan Justenc43ae402014-08-29 12:50:46 -0700200 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
201
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -0700202 if (devinfo->gen < 6) {
Kenneth Graunkea2ad7712014-06-10 18:50:03 -0700203 brw_push_insn_state(p);
Matt Turnerdd5c8252015-04-14 12:40:34 -0700204 brw_set_default_exec_size(p, BRW_EXECUTE_8);
Kenneth Graunkea2ad7712014-06-10 18:50:03 -0700205 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
206 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
207 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
Jason Ekstrand8b0e4b32014-09-16 15:16:20 -0700208 brw_MOV(p, offset(payload, 1), brw_vec8_grf(1, 0));
Kenneth Graunkea2ad7712014-06-10 18:50:03 -0700209 brw_pop_insn_state(p);
Iago Toral Quirogadc2d3a72014-06-05 15:03:08 +0200210 }
211
Kristian Høgsbergf9dc7aa2014-07-07 15:27:17 -0700212 if (inst->opcode == FS_OPCODE_REP_FB_WRITE)
213 msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE_REPLICATED;
Iago Toral Quirogaa72fb692015-03-05 09:43:38 +0100214 else if (prog_data->dual_src_blend) {
Francisco Jerezfa75f2d2015-07-13 15:41:34 +0300215 if (!inst->force_sechalf)
Iago Toral Quirogaa72fb692015-03-05 09:43:38 +0100216 msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN01;
217 else
218 msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN23;
Francisco Jerezfa75f2d2015-07-13 15:41:34 +0300219 } else if (inst->exec_size == 16)
Iago Toral Quirogadc2d3a72014-06-05 15:03:08 +0200220 msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE;
221 else
222 msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01;
223
224 uint32_t surf_index =
225 prog_data->binding_table.render_target_start + inst->target;
226
Iago Toral Quirogaa72fb692015-03-05 09:43:38 +0100227 bool last_render_target = inst->eot ||
228 (prog_data->dual_src_blend && dispatch_width == 16);
229
230
Iago Toral Quirogadc2d3a72014-06-05 15:03:08 +0200231 brw_fb_WRITE(p,
232 dispatch_width,
Jason Ekstrand8b0e4b32014-09-16 15:16:20 -0700233 payload,
Iago Toral Quirogadc2d3a72014-06-05 15:03:08 +0200234 implied_header,
235 msg_control,
236 surf_index,
237 nr,
238 0,
239 inst->eot,
Iago Toral Quirogaa72fb692015-03-05 09:43:38 +0100240 last_render_target,
Jason Ekstrand76c10862015-03-24 10:17:32 -0700241 inst->header_size != 0);
Iago Toral Quirogadc2d3a72014-06-05 15:03:08 +0200242
243 brw_mark_surface_used(&prog_data->base, surf_index);
244}
245
246void
Jason Ekstrand8b0e4b32014-09-16 15:16:20 -0700247fs_generator::generate_fb_write(fs_inst *inst, struct brw_reg payload)
Eric Anholt11dd9e92011-05-24 16:34:27 -0700248{
Jordan Justenc43ae402014-08-29 12:50:46 -0700249 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
Jordan Justen1f184bc2014-08-29 12:50:46 -0700250 const brw_wm_prog_key * const key = (brw_wm_prog_key * const) this->key;
Jason Ekstrand8b0e4b32014-09-16 15:16:20 -0700251 struct brw_reg implied_header;
252
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -0700253 if (devinfo->gen < 8 && !devinfo->is_haswell) {
Matt Turnere4d02992014-11-11 18:02:23 -0800254 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
255 }
256
Jason Ekstrand8b0e4b32014-09-16 15:16:20 -0700257 if (inst->base_mrf >= 0)
258 payload = brw_message_reg(inst->base_mrf);
Jordan Justenc43ae402014-08-29 12:50:46 -0700259
Eric Anholt11dd9e92011-05-24 16:34:27 -0700260 /* Header is 2 regs, g0 and g1 are the contents. g0 will be implied
261 * move, here's g1.
262 */
Jason Ekstrand76c10862015-03-24 10:17:32 -0700263 if (inst->header_size != 0) {
Kenneth Graunkeb207caf2014-06-10 18:54:09 -0700264 brw_push_insn_state(p);
265 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
266 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
267 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
Kenneth Graunkeefc818e2014-08-10 16:15:51 -0700268 brw_set_default_flag_reg(p, 0, 0);
Kenneth Graunkeb207caf2014-06-10 18:54:09 -0700269
Eric Anholtd92f5932014-02-13 21:37:50 -0800270 /* On HSW, the GPU will use the predicate on SENDC, unless the header is
271 * present.
272 */
Kenneth Graunke13328012014-11-08 02:34:43 -0800273 if (prog_data->uses_kill) {
Eric Anholtd92f5932014-02-13 21:37:50 -0800274 struct brw_reg pixel_mask;
275
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -0700276 if (devinfo->gen >= 6)
Eric Anholtd92f5932014-02-13 21:37:50 -0800277 pixel_mask = retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UW);
278 else
279 pixel_mask = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW);
280
281 brw_MOV(p, pixel_mask, brw_flag_reg(0, 1));
282 }
283
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -0700284 if (devinfo->gen >= 6) {
Matt Turnerdd5c8252015-04-14 12:40:34 -0700285 brw_push_insn_state(p);
286 brw_set_default_exec_size(p, BRW_EXECUTE_16);
Kenneth Graunkee3748092014-05-31 16:57:02 -0700287 brw_set_default_compression_control(p, BRW_COMPRESSION_COMPRESSED);
Eric Anholt11dd9e92011-05-24 16:34:27 -0700288 brw_MOV(p,
Jason Ekstrand8b0e4b32014-09-16 15:16:20 -0700289 retype(payload, BRW_REGISTER_TYPE_UD),
Eric Anholt11dd9e92011-05-24 16:34:27 -0700290 retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
Matt Turnerdd5c8252015-04-14 12:40:34 -0700291 brw_pop_insn_state(p);
Eric Anholt11dd9e92011-05-24 16:34:27 -0700292
Kenneth Graunkec96fdeb2014-05-14 00:24:50 -0700293 if (inst->target > 0 && key->replicate_alpha) {
Anuj Phogate592f7d2012-08-01 16:32:06 -0700294 /* Set "Source0 Alpha Present to RenderTarget" bit in message
295 * header.
296 */
297 brw_OR(p,
Jason Ekstrand8b0e4b32014-09-16 15:16:20 -0700298 vec1(retype(payload, BRW_REGISTER_TYPE_UD)),
Anuj Phogate592f7d2012-08-01 16:32:06 -0700299 vec1(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD)),
300 brw_imm_ud(0x1 << 11));
301 }
302
Eric Anholt11dd9e92011-05-24 16:34:27 -0700303 if (inst->target > 0) {
304 /* Set the render target index for choosing BLEND_STATE. */
Jason Ekstrand8b0e4b32014-09-16 15:16:20 -0700305 brw_MOV(p, retype(vec1(suboffset(payload, 2)),
306 BRW_REGISTER_TYPE_UD),
Eric Anholt11dd9e92011-05-24 16:34:27 -0700307 brw_imm_ud(inst->target));
308 }
309
Ben Widawsky1db44252015-10-20 14:29:39 -0700310 /* Set computes stencil to render target */
311 if (prog_data->computed_stencil) {
312 brw_OR(p,
313 vec1(retype(payload, BRW_REGISTER_TYPE_UD)),
314 vec1(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD)),
315 brw_imm_ud(0x1 << 14));
316 }
317
Eric Anholt11dd9e92011-05-24 16:34:27 -0700318 implied_header = brw_null_reg();
319 } else {
320 implied_header = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW);
Eric Anholt11dd9e92011-05-24 16:34:27 -0700321 }
Kenneth Graunkeb207caf2014-06-10 18:54:09 -0700322
323 brw_pop_insn_state(p);
Eric Anholt11dd9e92011-05-24 16:34:27 -0700324 } else {
325 implied_header = brw_null_reg();
326 }
327
Iago Toral Quirogadc2d3a72014-06-05 15:03:08 +0200328 if (!runtime_check_aads_emit) {
Jason Ekstrand8b0e4b32014-09-16 15:16:20 -0700329 fire_fb_write(inst, payload, implied_header, inst->mlen);
Iago Toral Quirogadc2d3a72014-06-05 15:03:08 +0200330 } else {
331 /* This can only happen in gen < 6 */
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -0700332 assert(devinfo->gen < 6);
Iago Toral Quirogadc2d3a72014-06-05 15:03:08 +0200333
334 struct brw_reg v1_null_ud = vec1(retype(brw_null_reg(), BRW_REGISTER_TYPE_UD));
335
336 /* Check runtime bit to detect if we have to send AA data or not */
337 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
338 brw_AND(p,
339 v1_null_ud,
340 retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_UD),
341 brw_imm_ud(1<<26));
Jason Ekstrand4e9c79c2015-04-14 18:00:06 -0700342 brw_inst_set_cond_modifier(p->devinfo, brw_last_inst, BRW_CONDITIONAL_NZ);
Iago Toral Quirogadc2d3a72014-06-05 15:03:08 +0200343
344 int jmp = brw_JMPI(p, brw_imm_ud(0), BRW_PREDICATE_NORMAL) - p->store;
Jason Ekstrand4e9c79c2015-04-14 18:00:06 -0700345 brw_inst_set_exec_size(p->devinfo, brw_last_inst, BRW_EXECUTE_1);
Iago Toral Quirogadc2d3a72014-06-05 15:03:08 +0200346 {
347 /* Don't send AA data */
Jason Ekstrand8b0e4b32014-09-16 15:16:20 -0700348 fire_fb_write(inst, offset(payload, 1), implied_header, inst->mlen-1);
Iago Toral Quirogadc2d3a72014-06-05 15:03:08 +0200349 }
350 brw_land_fwd_jump(p, jmp);
Jason Ekstrand8b0e4b32014-09-16 15:16:20 -0700351 fire_fb_write(inst, payload, implied_header, inst->mlen);
Iago Toral Quirogadc2d3a72014-06-05 15:03:08 +0200352 }
Eric Anholt11dd9e92011-05-24 16:34:27 -0700353}
354
Topi Pohjolainen9927d7a2013-12-17 14:00:50 +0200355void
Kenneth Graunked2f089b2015-11-07 18:58:34 -0800356fs_generator::generate_mov_indirect(fs_inst *inst,
357 struct brw_reg dst,
358 struct brw_reg reg,
359 struct brw_reg indirect_byte_offset)
360{
361 assert(indirect_byte_offset.type == BRW_REGISTER_TYPE_UD);
362 assert(indirect_byte_offset.file == BRW_GENERAL_REGISTER_FILE);
363
364 unsigned imm_byte_offset = reg.nr * REG_SIZE + reg.subnr;
365
Jason Ekstrand27bd8ac2015-11-24 09:01:11 -0800366 if (indirect_byte_offset.file == BRW_IMMEDIATE_VALUE) {
367 imm_byte_offset += indirect_byte_offset.ud;
Kenneth Graunked2f089b2015-11-07 18:58:34 -0800368
Jason Ekstrand27bd8ac2015-11-24 09:01:11 -0800369 reg.nr = imm_byte_offset / REG_SIZE;
370 reg.subnr = imm_byte_offset % REG_SIZE;
371 brw_MOV(p, dst, reg);
372 } else {
373 /* Prior to Broadwell, there are only 8 address registers. */
374 assert(inst->exec_size == 8 || devinfo->gen >= 8);
Kenneth Graunked2f089b2015-11-07 18:58:34 -0800375
Jason Ekstrand27bd8ac2015-11-24 09:01:11 -0800376 /* We use VxH indirect addressing, clobbering a0.0 through a0.7. */
377 struct brw_reg addr = vec8(brw_address_reg(0));
Kenneth Graunked2f089b2015-11-07 18:58:34 -0800378
Jason Ekstrand27bd8ac2015-11-24 09:01:11 -0800379 /* The destination stride of an instruction (in bytes) must be greater
380 * than or equal to the size of the rest of the instruction. Since the
381 * address register is of type UW, we can't use a D-type instruction.
382 * In order to get around this, re retype to UW and use a stride.
383 */
384 indirect_byte_offset =
385 retype(spread(indirect_byte_offset, 2), BRW_REGISTER_TYPE_UW);
386
387 struct brw_reg ind_src;
388 if (devinfo->gen < 8) {
389 /* From the Haswell PRM section "Register Region Restrictions":
390 *
391 * "The lower bits of the AddressImmediate must not overflow to
392 * change the register address. The lower 5 bits of Address
393 * Immediate when added to lower 5 bits of address register gives
394 * the sub-register offset. The upper bits of Address Immediate
395 * when added to upper bits of address register gives the register
396 * address. Any overflow from sub-register offset is dropped."
397 *
398 * This restriction is only listed in the Haswell PRM but emperical
399 * testing indicates that it applies on all older generations and is
400 * lifted on Broadwell.
401 *
402 * Since the indirect may cause us to cross a register boundary, this
403 * makes the base offset almost useless. We could try and do
404 * something clever where we use a actual base offset if
405 * base_offset % 32 == 0 but that would mean we were generating
406 * different code depending on the base offset. Instead, for the
407 * sake of consistency, we'll just do the add ourselves.
408 */
409 brw_ADD(p, addr, indirect_byte_offset, brw_imm_uw(imm_byte_offset));
410 ind_src = brw_VxH_indirect(0, 0);
411 } else {
412 brw_MOV(p, addr, indirect_byte_offset);
413 ind_src = brw_VxH_indirect(0, imm_byte_offset);
414 }
415
416 brw_inst *mov = brw_MOV(p, dst, retype(ind_src, dst.type));
417
418 if (devinfo->gen == 6 && dst.file == BRW_MESSAGE_REGISTER_FILE &&
419 !inst->get_next()->is_tail_sentinel() &&
420 ((fs_inst *)inst->get_next())->mlen > 0) {
421 /* From the Sandybridge PRM:
422 *
423 * "[Errata: DevSNB(SNB)] If MRF register is updated by any
424 * instruction that “indexed/indirect” source AND is followed by a
425 * send, the instruction requires a “Switch”. This is to avoid
426 * race condition where send may dispatch before MRF is updated."
427 */
428 brw_inst_set_thread_control(devinfo, mov, BRW_THREAD_SWITCH);
429 }
430 }
Kenneth Graunked2f089b2015-11-07 18:58:34 -0800431}
432
433void
Kenneth Graunkeac988882015-09-29 14:32:02 -0700434fs_generator::generate_urb_read(fs_inst *inst,
435 struct brw_reg dst,
436 struct brw_reg header)
437{
438 assert(header.file == BRW_GENERAL_REGISTER_FILE);
439 assert(header.type == BRW_REGISTER_TYPE_UD);
440
441 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
442 brw_set_dest(p, send, dst);
443 brw_set_src0(p, send, header);
444 brw_set_src1(p, send, brw_imm_ud(0u));
445
446 brw_inst_set_sfid(p->devinfo, send, BRW_SFID_URB);
447 brw_inst_set_urb_opcode(p->devinfo, send, GEN8_URB_OPCODE_SIMD8_READ);
448
Kenneth Graunke5480bbd2015-11-07 01:37:33 -0800449 if (inst->opcode == SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT)
450 brw_inst_set_urb_per_slot_offset(p->devinfo, send, true);
451
Kenneth Graunkeac988882015-09-29 14:32:02 -0700452 brw_inst_set_mlen(p->devinfo, send, inst->mlen);
453 brw_inst_set_rlen(p->devinfo, send, inst->regs_written);
454 brw_inst_set_header_present(p->devinfo, send, true);
455 brw_inst_set_urb_global_offset(p->devinfo, send, inst->offset);
456}
457
458void
Kristian Høgsbergd9e29f52014-10-20 23:00:50 -0700459fs_generator::generate_urb_write(fs_inst *inst, struct brw_reg payload)
460{
461 brw_inst *insn;
462
463 insn = brw_next_insn(p, BRW_OPCODE_SEND);
464
465 brw_set_dest(p, insn, brw_null_reg());
466 brw_set_src0(p, insn, payload);
467 brw_set_src1(p, insn, brw_imm_d(0));
468
Jason Ekstrand4e9c79c2015-04-14 18:00:06 -0700469 brw_inst_set_sfid(p->devinfo, insn, BRW_SFID_URB);
470 brw_inst_set_urb_opcode(p->devinfo, insn, GEN8_URB_OPCODE_SIMD8_WRITE);
Kristian Høgsbergd9e29f52014-10-20 23:00:50 -0700471
Kenneth Graunkebea75222015-05-06 00:04:10 -0700472 if (inst->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT ||
473 inst->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT)
474 brw_inst_set_urb_per_slot_offset(p->devinfo, insn, true);
475
476 if (inst->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_MASKED ||
477 inst->opcode == SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT)
478 brw_inst_set_urb_channel_mask_present(p->devinfo, insn, true);
479
Jason Ekstrand4e9c79c2015-04-14 18:00:06 -0700480 brw_inst_set_mlen(p->devinfo, insn, inst->mlen);
481 brw_inst_set_rlen(p->devinfo, insn, 0);
482 brw_inst_set_eot(p->devinfo, insn, inst->eot);
483 brw_inst_set_header_present(p->devinfo, insn, true);
484 brw_inst_set_urb_global_offset(p->devinfo, insn, inst->offset);
Kristian Høgsbergd9e29f52014-10-20 23:00:50 -0700485}
486
487void
Jordan Justen2a4df9c2014-08-27 11:33:25 -0700488fs_generator::generate_cs_terminate(fs_inst *inst, struct brw_reg payload)
489{
490 struct brw_inst *insn;
491
492 insn = brw_next_insn(p, BRW_OPCODE_SEND);
493
Jordan Justen7428e6f2016-01-31 18:28:42 -0800494 brw_set_dest(p, insn, retype(brw_null_reg(), BRW_REGISTER_TYPE_UW));
Jordan Justen2a4df9c2014-08-27 11:33:25 -0700495 brw_set_src0(p, insn, payload);
496 brw_set_src1(p, insn, brw_imm_d(0));
497
498 /* Terminate a compute shader by sending a message to the thread spawner.
499 */
500 brw_inst_set_sfid(devinfo, insn, BRW_SFID_THREAD_SPAWNER);
501 brw_inst_set_mlen(devinfo, insn, 1);
502 brw_inst_set_rlen(devinfo, insn, 0);
503 brw_inst_set_eot(devinfo, insn, inst->eot);
504 brw_inst_set_header_present(devinfo, insn, false);
505
506 brw_inst_set_ts_opcode(devinfo, insn, 0); /* Dereference resource */
507 brw_inst_set_ts_request_type(devinfo, insn, 0); /* Root thread */
508
509 /* Note that even though the thread has a URB resource associated with it,
510 * we set the "do not dereference URB" bit, because the URB resource is
511 * managed by the fixed-function unit, so it will free it automatically.
512 */
513 brw_inst_set_ts_resource_select(devinfo, insn, 1); /* Do not dereference URB */
514
515 brw_inst_set_mask_control(devinfo, insn, BRW_MASK_DISABLE);
516}
517
518void
Jordan Justenf7ef8ec2014-08-27 11:32:08 -0700519fs_generator::generate_barrier(fs_inst *inst, struct brw_reg src)
520{
521 brw_barrier(p, src);
522 brw_WAIT(p);
523}
524
525void
Kenneth Graunkeea681a02012-11-09 01:05:47 -0800526fs_generator::generate_linterp(fs_inst *inst,
Eric Anholt11dd9e92011-05-24 16:34:27 -0700527 struct brw_reg dst, struct brw_reg *src)
528{
Matt Turner5af06042015-04-06 17:44:40 -0700529 /* PLN reads:
530 * / in SIMD16 \
531 * -----------------------------------
532 * | src1+0 | src1+1 | src1+2 | src1+3 |
533 * |-----------------------------------|
534 * |(x0, x1)|(y0, y1)|(x2, x3)|(y2, y3)|
535 * -----------------------------------
536 *
537 * but for the LINE/MAC pair, the LINE reads Xs and the MAC reads Ys:
538 *
539 * -----------------------------------
540 * | src1+0 | src1+1 | src1+2 | src1+3 |
541 * |-----------------------------------|
542 * |(x0, x1)|(y0, y1)| | | in SIMD8
543 * |-----------------------------------|
544 * |(x0, x1)|(x2, x3)|(y0, y1)|(y2, y3)| in SIMD16
545 * -----------------------------------
546 *
547 * See also: emit_interpolation_setup_gen4().
548 */
Eric Anholt11dd9e92011-05-24 16:34:27 -0700549 struct brw_reg delta_x = src[0];
Matt Turner5af06042015-04-06 17:44:40 -0700550 struct brw_reg delta_y = offset(src[0], dispatch_width / 8);
551 struct brw_reg interp = src[1];
Eric Anholt11dd9e92011-05-24 16:34:27 -0700552
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -0700553 if (devinfo->has_pln &&
554 (devinfo->gen >= 7 || (delta_x.nr & 1) == 0)) {
Eric Anholt11dd9e92011-05-24 16:34:27 -0700555 brw_PLN(p, dst, interp, delta_x);
556 } else {
557 brw_LINE(p, brw_null_reg(), interp, delta_x);
558 brw_MAC(p, dst, suboffset(interp, 1), delta_y);
559 }
560}
561
562void
Samuel Iglesias Gonsalvezb23eb642015-04-13 16:55:49 +0200563fs_generator::generate_get_buffer_size(fs_inst *inst,
564 struct brw_reg dst,
565 struct brw_reg src,
566 struct brw_reg surf_index)
567{
568 assert(devinfo->gen >= 7);
569 assert(surf_index.file == BRW_IMMEDIATE_VALUE);
570
571 uint32_t simd_mode;
572 int rlen = 4;
573
574 switch (inst->exec_size) {
575 case 8:
576 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8;
577 break;
578 case 16:
579 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
580 break;
581 default:
582 unreachable("Invalid width for texture instruction");
583 }
584
585 if (simd_mode == BRW_SAMPLER_SIMD_MODE_SIMD16) {
586 rlen = 8;
587 dst = vec16(dst);
588 }
589
590 brw_SAMPLE(p,
591 retype(dst, BRW_REGISTER_TYPE_UW),
592 inst->base_mrf,
593 src,
Matt Turnere42fb0c2015-10-22 19:41:30 -0700594 surf_index.ud,
Samuel Iglesias Gonsalvezb23eb642015-04-13 16:55:49 +0200595 0,
596 GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO,
597 rlen, /* response length */
598 inst->mlen,
599 inst->header_size > 0,
600 simd_mode,
601 BRW_SAMPLER_RETURN_FORMAT_SINT32);
Matt Turnere42fb0c2015-10-22 19:41:30 -0700602
603 brw_mark_surface_used(prog_data, surf_index.ud);
Samuel Iglesias Gonsalvezb23eb642015-04-13 16:55:49 +0200604}
605
606void
Chris Forbesba5f7a32014-08-03 21:23:31 +1200607fs_generator::generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src,
Jason Ekstrandc0c14de2015-11-02 15:24:05 -0800608 struct brw_reg surface_index,
Chris Forbesba5f7a32014-08-03 21:23:31 +1200609 struct brw_reg sampler_index)
Eric Anholt11dd9e92011-05-24 16:34:27 -0700610{
611 int msg_type = -1;
Jason Ekstrand30d718c2014-08-29 17:22:57 -0700612 uint32_t simd_mode;
Eric Anholt7e84a642011-11-09 16:07:57 -0800613 uint32_t return_format;
Ben Widawskyb069f9e2015-02-08 13:59:57 -0800614 bool is_combined_send = inst->eot;
Eric Anholt7e84a642011-11-09 16:07:57 -0800615
616 switch (dst.type) {
617 case BRW_REGISTER_TYPE_D:
618 return_format = BRW_SAMPLER_RETURN_FORMAT_SINT32;
619 break;
620 case BRW_REGISTER_TYPE_UD:
621 return_format = BRW_SAMPLER_RETURN_FORMAT_UINT32;
622 break;
623 default:
624 return_format = BRW_SAMPLER_RETURN_FORMAT_FLOAT32;
625 break;
626 }
Eric Anholt11dd9e92011-05-24 16:34:27 -0700627
Jason Ekstrandd065a932015-11-11 15:46:55 -0800628 /* Stomp the resinfo output type to UINT32. On gens 4-5, the output type
629 * is set as part of the message descriptor. On gen4, the PRM seems to
630 * allow UINT32 and FLOAT32 (i965 PRM, Vol. 4 Section 4.8.1.1), but on
631 * later gens UINT32 is required. Once you hit Sandy Bridge, the bit is
632 * gone from the message descriptor entirely and you just get UINT32 all
633 * the time regasrdless. Since we can really only do non-UINT32 on gen4,
634 * just stomp it to UINT32 all the time.
635 */
636 if (inst->opcode == SHADER_OPCODE_TXS)
637 return_format = BRW_SAMPLER_RETURN_FORMAT_UINT32;
638
Jason Ekstrand30d718c2014-08-29 17:22:57 -0700639 switch (inst->exec_size) {
640 case 8:
641 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8;
642 break;
643 case 16:
Eric Anholt11dd9e92011-05-24 16:34:27 -0700644 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
Jason Ekstrand30d718c2014-08-29 17:22:57 -0700645 break;
646 default:
647 unreachable("Invalid width for texture instruction");
648 }
Eric Anholt11dd9e92011-05-24 16:34:27 -0700649
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -0700650 if (devinfo->gen >= 5) {
Eric Anholt11dd9e92011-05-24 16:34:27 -0700651 switch (inst->opcode) {
Kenneth Graunkefebad172011-10-26 12:58:37 -0700652 case SHADER_OPCODE_TEX:
Eric Anholt11dd9e92011-05-24 16:34:27 -0700653 if (inst->shadow_compare) {
654 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_COMPARE;
655 } else {
656 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE;
657 }
658 break;
659 case FS_OPCODE_TXB:
660 if (inst->shadow_compare) {
661 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE;
662 } else {
663 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS;
664 }
665 break;
Kenneth Graunkefebad172011-10-26 12:58:37 -0700666 case SHADER_OPCODE_TXL:
Eric Anholt11dd9e92011-05-24 16:34:27 -0700667 if (inst->shadow_compare) {
668 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE;
669 } else {
670 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LOD;
671 }
672 break;
Matt Turner75dccf52016-05-04 15:46:45 -0700673 case SHADER_OPCODE_TXL_LZ:
674 assert(devinfo->gen >= 9);
675 if (inst->shadow_compare) {
676 msg_type = GEN9_SAMPLER_MESSAGE_SAMPLE_C_LZ;
677 } else {
678 msg_type = GEN9_SAMPLER_MESSAGE_SAMPLE_LZ;
679 }
680 break;
Kenneth Graunkefebad172011-10-26 12:58:37 -0700681 case SHADER_OPCODE_TXS:
Kenneth Graunkeecf89632011-06-19 01:47:50 -0700682 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO;
683 break;
Kenneth Graunkefebad172011-10-26 12:58:37 -0700684 case SHADER_OPCODE_TXD:
Kenneth Graunke899017f2013-01-04 07:53:09 -0800685 if (inst->shadow_compare) {
686 /* Gen7.5+. Otherwise, lowered by brw_lower_texture_gradients(). */
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -0700687 assert(devinfo->gen >= 8 || devinfo->is_haswell);
Kenneth Graunke899017f2013-01-04 07:53:09 -0800688 msg_type = HSW_SAMPLER_MESSAGE_SAMPLE_DERIV_COMPARE;
689 } else {
690 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS;
691 }
Eric Anholt11dd9e92011-05-24 16:34:27 -0700692 break;
Kenneth Graunkefebad172011-10-26 12:58:37 -0700693 case SHADER_OPCODE_TXF:
Kenneth Graunke30be2cc2011-08-25 17:13:37 -0700694 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
695 break;
Matt Turner75dccf52016-05-04 15:46:45 -0700696 case SHADER_OPCODE_TXF_LZ:
697 assert(devinfo->gen >= 9);
698 msg_type = GEN9_SAMPLER_MESSAGE_SAMPLE_LD_LZ;
699 break;
Neil Robertse386fb02015-09-08 15:52:09 +0100700 case SHADER_OPCODE_TXF_CMS_W:
701 assert(devinfo->gen >= 9);
702 msg_type = GEN9_SAMPLER_MESSAGE_SAMPLE_LD2DMS_W;
703 break;
Topi Pohjolainence527a62013-12-10 16:36:31 +0200704 case SHADER_OPCODE_TXF_CMS:
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -0700705 if (devinfo->gen >= 7)
Chris Forbesf52ce6a2013-01-24 21:35:15 +1300706 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DMS;
707 else
708 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
709 break;
Topi Pohjolainen41d397f2013-12-10 16:38:15 +0200710 case SHADER_OPCODE_TXF_UMS:
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -0700711 assert(devinfo->gen >= 7);
Topi Pohjolainen41d397f2013-12-10 16:38:15 +0200712 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD2DSS;
713 break;
Chris Forbes7629c482013-11-30 10:32:16 +1300714 case SHADER_OPCODE_TXF_MCS:
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -0700715 assert(devinfo->gen >= 7);
Chris Forbes7629c482013-11-30 10:32:16 +1300716 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_LD_MCS;
717 break;
Matt Turnerb8aa9f72013-03-06 14:47:01 -0800718 case SHADER_OPCODE_LOD:
719 msg_type = GEN5_SAMPLER_MESSAGE_LOD;
720 break;
Chris Forbesfb455502013-03-31 21:31:12 +1300721 case SHADER_OPCODE_TG4:
Chris Forbes3c98d772013-10-10 19:57:29 +1300722 if (inst->shadow_compare) {
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -0700723 assert(devinfo->gen >= 7);
Chris Forbes3c98d772013-10-10 19:57:29 +1300724 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_C;
725 } else {
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -0700726 assert(devinfo->gen >= 6);
Chris Forbes3c98d772013-10-10 19:57:29 +1300727 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4;
728 }
Chris Forbesfb455502013-03-31 21:31:12 +1300729 break;
Chris Forbes6bb2cf22013-10-08 21:42:10 +1300730 case SHADER_OPCODE_TG4_OFFSET:
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -0700731 assert(devinfo->gen >= 7);
Chris Forbes3c98d772013-10-10 19:57:29 +1300732 if (inst->shadow_compare) {
733 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO_C;
734 } else {
735 msg_type = GEN7_SAMPLER_MESSAGE_SAMPLE_GATHER4_PO;
736 }
Chris Forbes6bb2cf22013-10-08 21:42:10 +1300737 break;
Ilia Mirkin0b91bce2015-08-11 20:37:32 -0400738 case SHADER_OPCODE_SAMPLEINFO:
739 msg_type = GEN6_SAMPLER_MESSAGE_SAMPLE_SAMPLEINFO;
740 break;
Eric Anholt6034b9a2011-05-03 10:55:50 -0700741 default:
Matt Turner3d826722014-06-29 14:54:01 -0700742 unreachable("not reached");
Eric Anholt11dd9e92011-05-24 16:34:27 -0700743 }
744 } else {
745 switch (inst->opcode) {
Kenneth Graunkefebad172011-10-26 12:58:37 -0700746 case SHADER_OPCODE_TEX:
Eric Anholt11dd9e92011-05-24 16:34:27 -0700747 /* Note that G45 and older determines shadow compare and dispatch width
748 * from message length for most messages.
749 */
Francisco Jerez8fbb3d32015-07-13 15:42:20 +0300750 if (inst->exec_size == 8) {
Kenneth Graunke797d6062015-02-20 15:11:49 -0800751 msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE;
752 if (inst->shadow_compare) {
753 assert(inst->mlen == 6);
754 } else {
755 assert(inst->mlen <= 4);
756 }
757 } else {
758 if (inst->shadow_compare) {
759 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_COMPARE;
760 assert(inst->mlen == 9);
761 } else {
762 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE;
763 assert(inst->mlen <= 7 && inst->mlen % 2 == 1);
764 }
765 }
Eric Anholt11dd9e92011-05-24 16:34:27 -0700766 break;
767 case FS_OPCODE_TXB:
768 if (inst->shadow_compare) {
Francisco Jerez8fbb3d32015-07-13 15:42:20 +0300769 assert(inst->exec_size == 8);
Eric Anholt11dd9e92011-05-24 16:34:27 -0700770 assert(inst->mlen == 6);
771 msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_BIAS_COMPARE;
772 } else {
773 assert(inst->mlen == 9);
774 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS;
775 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
776 }
777 break;
Kenneth Graunkefebad172011-10-26 12:58:37 -0700778 case SHADER_OPCODE_TXL:
Eric Anholt11dd9e92011-05-24 16:34:27 -0700779 if (inst->shadow_compare) {
Francisco Jerez8fbb3d32015-07-13 15:42:20 +0300780 assert(inst->exec_size == 8);
Eric Anholt11dd9e92011-05-24 16:34:27 -0700781 assert(inst->mlen == 6);
782 msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_LOD_COMPARE;
783 } else {
784 assert(inst->mlen == 9);
785 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_LOD;
786 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
787 }
788 break;
Kenneth Graunkefebad172011-10-26 12:58:37 -0700789 case SHADER_OPCODE_TXD:
Kenneth Graunke6430df32011-06-10 14:48:46 -0700790 /* There is no sample_d_c message; comparisons are done manually */
Francisco Jerez8fbb3d32015-07-13 15:42:20 +0300791 assert(inst->exec_size == 8);
Kenneth Graunke6c947cf2011-06-08 16:05:34 -0700792 assert(inst->mlen == 7 || inst->mlen == 10);
793 msg_type = BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_GRADIENTS;
Eric Anholt11dd9e92011-05-24 16:34:27 -0700794 break;
Kenneth Graunkefebad172011-10-26 12:58:37 -0700795 case SHADER_OPCODE_TXF:
Kenneth Graunke797d6062015-02-20 15:11:49 -0800796 assert(inst->mlen <= 9 && inst->mlen % 2 == 1);
Kenneth Graunke47b556f2011-09-06 16:39:01 -0700797 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_LD;
798 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
799 break;
Kenneth Graunkefebad172011-10-26 12:58:37 -0700800 case SHADER_OPCODE_TXS:
Kenneth Graunke4eeb4c12011-08-17 10:45:47 -0700801 assert(inst->mlen == 3);
802 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_RESINFO;
803 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
804 break;
Eric Anholt6034b9a2011-05-03 10:55:50 -0700805 default:
Matt Turner3d826722014-06-29 14:54:01 -0700806 unreachable("not reached");
Eric Anholt11dd9e92011-05-24 16:34:27 -0700807 }
808 }
809 assert(msg_type != -1);
810
811 if (simd_mode == BRW_SAMPLER_SIMD_MODE_SIMD16) {
Eric Anholt11dd9e92011-05-24 16:34:27 -0700812 dst = vec16(dst);
813 }
814
Jason Ekstrand76c10862015-03-24 10:17:32 -0700815 assert(devinfo->gen < 7 || inst->header_size == 0 ||
Jason Ekstrand72105832014-08-18 14:27:55 -0700816 src.file == BRW_GENERAL_REGISTER_FILE);
Eric Anholt36fbe662013-10-09 17:17:59 -0700817
Chris Forbesba5f7a32014-08-03 21:23:31 +1200818 assert(sampler_index.type == BRW_REGISTER_TYPE_UD);
819
Kenneth Graunke82bfb4b2012-08-04 20:33:13 -0700820 /* Load the message header if present. If there's a texture offset,
821 * we need to set it up explicitly and load the offset bitfield.
822 * Otherwise, we can use an implied move from g0 to the first message reg.
823 */
Jason Ekstrand76c10862015-03-24 10:17:32 -0700824 if (inst->header_size != 0) {
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -0700825 if (devinfo->gen < 6 && !inst->offset) {
Kenneth Graunkeebfe43d2014-01-18 12:48:18 -0800826 /* Set up an implied move from g0 to the MRF. */
827 src = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW);
Eric Anholt36fbe662013-10-09 17:17:59 -0700828 } else {
Kenneth Graunkeebfe43d2014-01-18 12:48:18 -0800829 struct brw_reg header_reg;
Kenneth Graunke82bfb4b2012-08-04 20:33:13 -0700830
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -0700831 if (devinfo->gen >= 7) {
Kenneth Graunkeebfe43d2014-01-18 12:48:18 -0800832 header_reg = src;
833 } else {
834 assert(inst->base_mrf != -1);
835 header_reg = brw_message_reg(inst->base_mrf);
836 }
837
Chris Forbesb38af012013-10-13 12:20:03 +1300838 brw_push_insn_state(p);
Matt Turnerdd5c8252015-04-14 12:40:34 -0700839 brw_set_default_exec_size(p, BRW_EXECUTE_8);
Kenneth Graunkee3748092014-05-31 16:57:02 -0700840 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
841 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
Kenneth Graunkeebfe43d2014-01-18 12:48:18 -0800842 /* Explicitly set up the message header by copying g0 to the MRF. */
843 brw_MOV(p, header_reg, brw_vec8_grf(0, 0));
844
Matt Turnerbd502132014-11-12 11:28:02 -0800845 if (inst->offset) {
Kenneth Graunkeebfe43d2014-01-18 12:48:18 -0800846 /* Set the offset bits in DWord 2. */
847 brw_MOV(p, get_element_ud(header_reg, 2),
Matt Turnerbd502132014-11-12 11:28:02 -0800848 brw_imm_ud(inst->offset));
Jason Ekstrand61b0cfd2016-01-14 20:42:47 -0800849 } else if (stage != MESA_SHADER_VERTEX &&
850 stage != MESA_SHADER_FRAGMENT) {
851 /* The vertex and fragment stages have g0.2 set to 0, so
852 * header0.2 is 0 when g0 is copied. Other stages may not, so we
853 * must set it to 0 to avoid setting undesirable bits in the
854 * message.
855 */
856 brw_MOV(p, get_element_ud(header_reg, 2), brw_imm_ud(0));
Kenneth Graunkeebfe43d2014-01-18 12:48:18 -0800857 }
Kenneth Graunke6943ac02014-01-18 13:29:39 -0800858
Jason Ekstrand7de8a3e2015-01-22 13:46:44 -0800859 brw_adjust_sampler_state_pointer(p, header_reg, sampler_index);
Chris Forbesb38af012013-10-13 12:20:03 +1300860 brw_pop_insn_state(p);
Chris Forbesb38af012013-10-13 12:20:03 +1300861 }
Kenneth Graunke82bfb4b2012-08-04 20:33:13 -0700862 }
863
Chris Forbes4ba51712014-08-10 11:58:06 +1200864 uint32_t base_binding_table_index = (inst->opcode == SHADER_OPCODE_TG4 ||
865 inst->opcode == SHADER_OPCODE_TG4_OFFSET)
Jordan Justenc43ae402014-08-29 12:50:46 -0700866 ? prog_data->binding_table.gather_texture_start
867 : prog_data->binding_table.texture_start;
Chris Forbesdd4c2a52013-09-15 18:23:14 +1200868
Jason Ekstrandc0c14de2015-11-02 15:24:05 -0800869 if (surface_index.file == BRW_IMMEDIATE_VALUE &&
870 sampler_index.file == BRW_IMMEDIATE_VALUE) {
871 uint32_t surface = surface_index.ud;
Matt Turnere42fb0c2015-10-22 19:41:30 -0700872 uint32_t sampler = sampler_index.ud;
Kenneth Graunke6d89bc82013-08-14 19:49:33 -0700873
Chris Forbes4ba51712014-08-10 11:58:06 +1200874 brw_SAMPLE(p,
875 retype(dst, BRW_REGISTER_TYPE_UW),
876 inst->base_mrf,
877 src,
Jason Ekstrandc0c14de2015-11-02 15:24:05 -0800878 surface + base_binding_table_index,
Chris Forbes4ba51712014-08-10 11:58:06 +1200879 sampler % 16,
880 msg_type,
Jason Ekstrandacc2f1f2015-10-09 11:24:35 -0700881 inst->regs_written,
Chris Forbes4ba51712014-08-10 11:58:06 +1200882 inst->mlen,
Jason Ekstrand76c10862015-03-24 10:17:32 -0700883 inst->header_size != 0,
Chris Forbes4ba51712014-08-10 11:58:06 +1200884 simd_mode,
885 return_format);
886
Jason Ekstrandc0c14de2015-11-02 15:24:05 -0800887 brw_mark_surface_used(prog_data, surface + base_binding_table_index);
Chris Forbes4ba51712014-08-10 11:58:06 +1200888 } else {
Chris Forbesfbfcd672014-08-10 12:02:22 +1200889 /* Non-const sampler index */
Chris Forbesfbfcd672014-08-10 12:02:22 +1200890
891 struct brw_reg addr = vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD));
Jason Ekstrandc0c14de2015-11-02 15:24:05 -0800892 struct brw_reg surface_reg = vec1(retype(surface_index, BRW_REGISTER_TYPE_UD));
Chris Forbesfbfcd672014-08-10 12:02:22 +1200893 struct brw_reg sampler_reg = vec1(retype(sampler_index, BRW_REGISTER_TYPE_UD));
894
895 brw_push_insn_state(p);
896 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
897 brw_set_default_access_mode(p, BRW_ALIGN_1);
898
Kenneth Graunke59156b22016-05-13 16:41:13 -0700899 if (brw_regs_equal(&surface_reg, &sampler_reg)) {
Jason Ekstrandc0c14de2015-11-02 15:24:05 -0800900 brw_MUL(p, addr, sampler_reg, brw_imm_uw(0x101));
901 } else {
902 brw_SHL(p, addr, sampler_reg, brw_imm_ud(8));
903 brw_OR(p, addr, addr, surface_reg);
904 }
Neil Roberts7f62fda2015-05-29 13:41:48 +0100905 if (base_binding_table_index)
906 brw_ADD(p, addr, addr, brw_imm_ud(base_binding_table_index));
Neil Roberts6c846dc2015-05-28 15:27:31 +0100907 brw_AND(p, addr, addr, brw_imm_ud(0xfff));
Chris Forbesfbfcd672014-08-10 12:02:22 +1200908
Francisco Jereza815cd82015-02-26 17:24:03 +0200909 brw_pop_insn_state(p);
910
Francisco Jereza902a5d2015-03-19 15:44:24 +0200911 /* dst = send(offset, a0.0 | <descriptor>) */
912 brw_inst *insn = brw_send_indirect_message(
913 p, BRW_SFID_SAMPLER, dst, src, addr);
914 brw_set_sampler_message(p, insn,
Chris Forbesfbfcd672014-08-10 12:02:22 +1200915 0 /* surface */,
916 0 /* sampler */,
917 msg_type,
Jason Ekstrandacc2f1f2015-10-09 11:24:35 -0700918 inst->regs_written,
Chris Forbesfbfcd672014-08-10 12:02:22 +1200919 inst->mlen /* mlen */,
Jason Ekstrand76c10862015-03-24 10:17:32 -0700920 inst->header_size != 0 /* header */,
Chris Forbesfbfcd672014-08-10 12:02:22 +1200921 simd_mode,
922 return_format);
Chris Forbesfbfcd672014-08-10 12:02:22 +1200923
Chris Forbesfbfcd672014-08-10 12:02:22 +1200924 /* visitor knows more than we do about the surface limit required,
925 * so has already done marking.
926 */
Chris Forbes4ba51712014-08-10 11:58:06 +1200927 }
Ben Widawskyb069f9e2015-02-08 13:59:57 -0800928
929 if (is_combined_send) {
Jason Ekstrand4e9c79c2015-04-14 18:00:06 -0700930 brw_inst_set_eot(p->devinfo, brw_last_inst, true);
931 brw_inst_set_opcode(p->devinfo, brw_last_inst, BRW_OPCODE_SENDC);
Ben Widawskyb069f9e2015-02-08 13:59:57 -0800932 }
Eric Anholt11dd9e92011-05-24 16:34:27 -0700933}
934
935
936/* For OPCODE_DDX and OPCODE_DDY, per channel of output we've got input
937 * looking like:
938 *
939 * arg0: ss0.tl ss0.tr ss0.bl ss0.br ss1.tl ss1.tr ss1.bl ss1.br
940 *
Chia-I Wu848c0e72013-09-12 13:00:52 +0800941 * Ideally, we want to produce:
Eric Anholt11dd9e92011-05-24 16:34:27 -0700942 *
943 * DDX DDY
944 * dst: (ss0.tr - ss0.tl) (ss0.tl - ss0.bl)
945 * (ss0.tr - ss0.tl) (ss0.tr - ss0.br)
946 * (ss0.br - ss0.bl) (ss0.tl - ss0.bl)
947 * (ss0.br - ss0.bl) (ss0.tr - ss0.br)
948 * (ss1.tr - ss1.tl) (ss1.tl - ss1.bl)
949 * (ss1.tr - ss1.tl) (ss1.tr - ss1.br)
950 * (ss1.br - ss1.bl) (ss1.tl - ss1.bl)
951 * (ss1.br - ss1.bl) (ss1.tr - ss1.br)
952 *
953 * and add another set of two more subspans if in 16-pixel dispatch mode.
954 *
955 * For DDX, it ends up being easy: width = 2, horiz=0 gets us the same result
956 * for each pair, and vertstride = 2 jumps us 2 elements after processing a
Chia-I Wu848c0e72013-09-12 13:00:52 +0800957 * pair. But the ideal approximation may impose a huge performance cost on
958 * sample_d. On at least Haswell, sample_d instruction does some
959 * optimizations if the same LOD is used for all pixels in the subspan.
960 *
Paul Berry800610f2013-09-20 09:04:31 -0700961 * For DDY, we need to use ALIGN16 mode since it's capable of doing the
962 * appropriate swizzling.
Eric Anholt11dd9e92011-05-24 16:34:27 -0700963 */
964void
Kenneth Graunkecea37f02014-11-08 01:39:14 -0800965fs_generator::generate_ddx(enum opcode opcode,
966 struct brw_reg dst, struct brw_reg src)
Eric Anholt11dd9e92011-05-24 16:34:27 -0700967{
Chia-I Wu848c0e72013-09-12 13:00:52 +0800968 unsigned vstride, width;
969
Kenneth Graunkecea37f02014-11-08 01:39:14 -0800970 if (opcode == FS_OPCODE_DDX_FINE) {
Chia-I Wu848c0e72013-09-12 13:00:52 +0800971 /* produce accurate derivatives */
972 vstride = BRW_VERTICAL_STRIDE_2;
973 width = BRW_WIDTH_2;
Kenneth Graunkecea37f02014-11-08 01:39:14 -0800974 } else {
Chia-I Wu848c0e72013-09-12 13:00:52 +0800975 /* replicate the derivative at the top-left pixel to other pixels */
976 vstride = BRW_VERTICAL_STRIDE_4;
977 width = BRW_WIDTH_4;
978 }
979
Eric Anholt11dd9e92011-05-24 16:34:27 -0700980 struct brw_reg src0 = brw_reg(src.file, src.nr, 1,
Andres Gomez8517e662014-12-12 17:19:07 +0100981 src.negate, src.abs,
Eric Anholt11dd9e92011-05-24 16:34:27 -0700982 BRW_REGISTER_TYPE_F,
Chia-I Wu848c0e72013-09-12 13:00:52 +0800983 vstride,
984 width,
Eric Anholt11dd9e92011-05-24 16:34:27 -0700985 BRW_HORIZONTAL_STRIDE_0,
986 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
987 struct brw_reg src1 = brw_reg(src.file, src.nr, 0,
Andres Gomez8517e662014-12-12 17:19:07 +0100988 src.negate, src.abs,
Eric Anholt11dd9e92011-05-24 16:34:27 -0700989 BRW_REGISTER_TYPE_F,
Chia-I Wu848c0e72013-09-12 13:00:52 +0800990 vstride,
991 width,
Eric Anholt11dd9e92011-05-24 16:34:27 -0700992 BRW_HORIZONTAL_STRIDE_0,
993 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
994 brw_ADD(p, dst, src0, negate(src1));
995}
996
Paul Berry82d25962012-06-20 13:40:45 -0700997/* The negate_value boolean is used to negate the derivative computation for
998 * FBOs, since they place the origin at the upper left instead of the lower
999 * left.
1000 */
Eric Anholt11dd9e92011-05-24 16:34:27 -07001001void
Kenneth Graunkecea37f02014-11-08 01:39:14 -08001002fs_generator::generate_ddy(enum opcode opcode,
Kenneth Graunkedac10e82016-05-17 01:52:16 -07001003 struct brw_reg dst, struct brw_reg src)
Eric Anholt11dd9e92011-05-24 16:34:27 -07001004{
Kenneth Graunkecea37f02014-11-08 01:39:14 -08001005 if (opcode == FS_OPCODE_DDY_FINE) {
Paul Berry800610f2013-09-20 09:04:31 -07001006 /* produce accurate derivatives */
1007 struct brw_reg src0 = brw_reg(src.file, src.nr, 0,
Andres Gomez8517e662014-12-12 17:19:07 +01001008 src.negate, src.abs,
Paul Berry800610f2013-09-20 09:04:31 -07001009 BRW_REGISTER_TYPE_F,
1010 BRW_VERTICAL_STRIDE_4,
1011 BRW_WIDTH_4,
1012 BRW_HORIZONTAL_STRIDE_1,
1013 BRW_SWIZZLE_XYXY, WRITEMASK_XYZW);
1014 struct brw_reg src1 = brw_reg(src.file, src.nr, 0,
Andres Gomez8517e662014-12-12 17:19:07 +01001015 src.negate, src.abs,
Paul Berry800610f2013-09-20 09:04:31 -07001016 BRW_REGISTER_TYPE_F,
1017 BRW_VERTICAL_STRIDE_4,
1018 BRW_WIDTH_4,
1019 BRW_HORIZONTAL_STRIDE_1,
1020 BRW_SWIZZLE_ZWZW, WRITEMASK_XYZW);
1021 brw_push_insn_state(p);
Kenneth Graunkee3748092014-05-31 16:57:02 -07001022 brw_set_default_access_mode(p, BRW_ALIGN_16);
Francisco Jerezbb89beb2016-05-27 23:22:02 -07001023 brw_ADD(p, dst, negate(src0), src1);
Paul Berry800610f2013-09-20 09:04:31 -07001024 brw_pop_insn_state(p);
1025 } else {
1026 /* replicate the derivative at the top-left pixel to other pixels */
1027 struct brw_reg src0 = brw_reg(src.file, src.nr, 0,
Andres Gomez8517e662014-12-12 17:19:07 +01001028 src.negate, src.abs,
Paul Berry800610f2013-09-20 09:04:31 -07001029 BRW_REGISTER_TYPE_F,
1030 BRW_VERTICAL_STRIDE_4,
1031 BRW_WIDTH_4,
1032 BRW_HORIZONTAL_STRIDE_0,
1033 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
1034 struct brw_reg src1 = brw_reg(src.file, src.nr, 2,
Andres Gomez8517e662014-12-12 17:19:07 +01001035 src.negate, src.abs,
Paul Berry800610f2013-09-20 09:04:31 -07001036 BRW_REGISTER_TYPE_F,
1037 BRW_VERTICAL_STRIDE_4,
1038 BRW_WIDTH_4,
1039 BRW_HORIZONTAL_STRIDE_0,
1040 BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
Kenneth Graunke9b8b3f72016-05-18 10:35:54 -07001041 brw_ADD(p, dst, negate(src0), src1);
Paul Berry800610f2013-09-20 09:04:31 -07001042 }
Eric Anholt11dd9e92011-05-24 16:34:27 -07001043}
1044
1045void
Eric Anholtbeafced2012-12-06 10:15:08 -08001046fs_generator::generate_discard_jump(fs_inst *inst)
1047{
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -07001048 assert(devinfo->gen >= 6);
Eric Anholtbeafced2012-12-06 10:15:08 -08001049
1050 /* This HALT will be patched up at FB write time to point UIP at the end of
1051 * the program, and at brw_uip_jip() JIP will be set to the end of the
1052 * current block (or the program).
1053 */
1054 this->discard_halt_patches.push_tail(new(mem_ctx) ip_record(p->nr_insn));
1055
1056 brw_push_insn_state(p);
Kenneth Graunkee3748092014-05-31 16:57:02 -07001057 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
Eric Anholtbeafced2012-12-06 10:15:08 -08001058 gen6_HALT(p);
1059 brw_pop_insn_state(p);
1060}
1061
1062void
Eric Anholt60322612013-10-16 11:45:06 -07001063fs_generator::generate_scratch_write(fs_inst *inst, struct brw_reg src)
Eric Anholt11dd9e92011-05-24 16:34:27 -07001064{
1065 assert(inst->mlen != 0);
1066
1067 brw_MOV(p,
Jason Ekstrand3a5df8b2014-10-24 11:35:51 -07001068 brw_uvec_mrf(inst->exec_size, (inst->base_mrf + 1), 0),
Eric Anholt11dd9e92011-05-24 16:34:27 -07001069 retype(src, BRW_REGISTER_TYPE_UD));
Eric Anholt0e200512013-10-16 12:16:51 -07001070 brw_oword_block_write_scratch(p, brw_message_reg(inst->base_mrf),
Jason Ekstrand3a5df8b2014-10-24 11:35:51 -07001071 inst->exec_size / 8, inst->offset);
Eric Anholt11dd9e92011-05-24 16:34:27 -07001072}
1073
1074void
Eric Anholt60322612013-10-16 11:45:06 -07001075fs_generator::generate_scratch_read(fs_inst *inst, struct brw_reg dst)
Eric Anholt11dd9e92011-05-24 16:34:27 -07001076{
1077 assert(inst->mlen != 0);
1078
Eric Anholt0e200512013-10-16 12:16:51 -07001079 brw_oword_block_read_scratch(p, dst, brw_message_reg(inst->base_mrf),
Jason Ekstrand3a5df8b2014-10-24 11:35:51 -07001080 inst->exec_size / 8, inst->offset);
Eric Anholt11dd9e92011-05-24 16:34:27 -07001081}
1082
1083void
Eric Anholt8dfc9f02013-10-16 11:51:22 -07001084fs_generator::generate_scratch_read_gen7(fs_inst *inst, struct brw_reg dst)
1085{
Jason Ekstrand3a5df8b2014-10-24 11:35:51 -07001086 gen7_block_read_scratch(p, dst, inst->exec_size / 8, inst->offset);
Eric Anholt8dfc9f02013-10-16 11:51:22 -07001087}
1088
1089void
Eric Anholt29340d02012-11-07 10:42:34 -08001090fs_generator::generate_uniform_pull_constant_load(fs_inst *inst,
1091 struct brw_reg dst,
1092 struct brw_reg index,
1093 struct brw_reg offset)
Eric Anholt11dd9e92011-05-24 16:34:27 -07001094{
1095 assert(inst->mlen != 0);
1096
Eric Anholt454dc832012-06-20 15:41:14 -07001097 assert(index.file == BRW_IMMEDIATE_VALUE &&
1098 index.type == BRW_REGISTER_TYPE_UD);
Matt Turnere42fb0c2015-10-22 19:41:30 -07001099 uint32_t surf_index = index.ud;
Eric Anholt454dc832012-06-20 15:41:14 -07001100
1101 assert(offset.file == BRW_IMMEDIATE_VALUE &&
1102 offset.type == BRW_REGISTER_TYPE_UD);
Matt Turnere42fb0c2015-10-22 19:41:30 -07001103 uint32_t read_offset = offset.ud;
Eric Anholt454dc832012-06-20 15:41:14 -07001104
Eric Anholt11dd9e92011-05-24 16:34:27 -07001105 brw_oword_block_read(p, dst, brw_message_reg(inst->base_mrf),
Eric Anholt454dc832012-06-20 15:41:14 -07001106 read_offset, surf_index);
Eric Anholt11dd9e92011-05-24 16:34:27 -07001107}
1108
Eric Anholtd8214e42012-11-07 11:18:34 -08001109void
Eric Anholt461a2972012-12-05 00:06:30 -08001110fs_generator::generate_uniform_pull_constant_load_gen7(fs_inst *inst,
1111 struct brw_reg dst,
1112 struct brw_reg index,
1113 struct brw_reg offset)
1114{
Chris Forbes3fd359b2014-08-02 14:27:21 +12001115 assert(index.type == BRW_REGISTER_TYPE_UD);
Eric Anholt461a2972012-12-05 00:06:30 -08001116
1117 assert(offset.file == BRW_GENERAL_REGISTER_FILE);
Eric Anholt4c1fdae2013-03-06 14:47:22 -08001118 /* Reference just the dword we need, to avoid angering validate_reg(). */
1119 offset = brw_vec1_grf(offset.nr, 0);
Eric Anholt461a2972012-12-05 00:06:30 -08001120
Eric Anholt4c1fdae2013-03-06 14:47:22 -08001121 /* We use the SIMD4x2 mode because we want to end up with 4 components in
1122 * the destination loaded consecutively from the same offset (which appears
1123 * in the first component, and the rest are ignored).
1124 */
1125 dst.width = BRW_WIDTH_4;
Kenneth Graunke6d89bc82013-08-14 19:49:33 -07001126
Kristian Høgsberg0ac4c272014-12-10 14:59:26 -08001127 struct brw_reg src = offset;
1128 bool header_present = false;
Kristian Høgsberg0ac4c272014-12-10 14:59:26 -08001129
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -07001130 if (devinfo->gen >= 9) {
Kristian Høgsberg0ac4c272014-12-10 14:59:26 -08001131 /* Skylake requires a message header in order to use SIMD4x2 mode. */
Jason Ekstrand241317d2015-06-19 12:58:37 -07001132 src = retype(brw_vec4_grf(offset.nr, 0), BRW_REGISTER_TYPE_UD);
Kristian Høgsberg0ac4c272014-12-10 14:59:26 -08001133 header_present = true;
1134
1135 brw_push_insn_state(p);
1136 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
Neil Robertsbe119e82015-04-23 10:09:52 -07001137 brw_set_default_exec_size(p, BRW_EXECUTE_8);
Neil Roberts07c571a2015-04-10 17:20:21 +01001138 brw_MOV(p, vec8(src), retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
Kristian Høgsberg0ac4c272014-12-10 14:59:26 -08001139 brw_set_default_access_mode(p, BRW_ALIGN_1);
1140
1141 brw_MOV(p, get_element_ud(src, 2),
1142 brw_imm_ud(GEN9_SAMPLER_SIMD_MODE_EXTENSION_SIMD4X2));
1143 brw_pop_insn_state(p);
1144 }
1145
Chris Forbes3fd359b2014-08-02 14:27:21 +12001146 if (index.file == BRW_IMMEDIATE_VALUE) {
1147
Matt Turnere42fb0c2015-10-22 19:41:30 -07001148 uint32_t surf_index = index.ud;
Chris Forbes3fd359b2014-08-02 14:27:21 +12001149
1150 brw_push_insn_state(p);
1151 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1152 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1153 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
Iago Toral Quiroga2d6af622015-12-03 10:59:23 +01001154 brw_inst_set_exec_size(devinfo, send, BRW_EXECUTE_4);
Chris Forbes3fd359b2014-08-02 14:27:21 +12001155 brw_pop_insn_state(p);
1156
1157 brw_set_dest(p, send, dst);
Kristian Høgsberg0ac4c272014-12-10 14:59:26 -08001158 brw_set_src0(p, send, src);
Chris Forbes3fd359b2014-08-02 14:27:21 +12001159 brw_set_sampler_message(p, send,
1160 surf_index,
1161 0, /* LD message ignores sampler unit */
1162 GEN5_SAMPLER_MESSAGE_SAMPLE_LD,
1163 1, /* rlen */
Jason Ekstrand241317d2015-06-19 12:58:37 -07001164 inst->mlen,
Kristian Høgsberg0ac4c272014-12-10 14:59:26 -08001165 header_present,
Chris Forbes3fd359b2014-08-02 14:27:21 +12001166 BRW_SAMPLER_SIMD_MODE_SIMD4X2,
1167 0);
Chris Forbes3fd359b2014-08-02 14:27:21 +12001168 } else {
1169
1170 struct brw_reg addr = vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD));
1171
1172 brw_push_insn_state(p);
1173 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1174 brw_set_default_access_mode(p, BRW_ALIGN_1);
1175
1176 /* a0.0 = surf_index & 0xff */
1177 brw_inst *insn_and = brw_next_insn(p, BRW_OPCODE_AND);
Jason Ekstrand4e9c79c2015-04-14 18:00:06 -07001178 brw_inst_set_exec_size(p->devinfo, insn_and, BRW_EXECUTE_1);
Chris Forbes3fd359b2014-08-02 14:27:21 +12001179 brw_set_dest(p, insn_and, addr);
1180 brw_set_src0(p, insn_and, vec1(retype(index, BRW_REGISTER_TYPE_UD)));
1181 brw_set_src1(p, insn_and, brw_imm_ud(0x0ff));
1182
Francisco Jereza902a5d2015-03-19 15:44:24 +02001183 /* dst = send(payload, a0.0 | <descriptor>) */
1184 brw_inst *insn = brw_send_indirect_message(
1185 p, BRW_SFID_SAMPLER, dst, src, addr);
1186 brw_set_sampler_message(p, insn,
1187 0,
1188 0, /* LD message ignores sampler unit */
Chris Forbes3fd359b2014-08-02 14:27:21 +12001189 GEN5_SAMPLER_MESSAGE_SAMPLE_LD,
Francisco Jereza902a5d2015-03-19 15:44:24 +02001190 1, /* rlen */
Jason Ekstrand241317d2015-06-19 12:58:37 -07001191 inst->mlen,
Kristian Høgsberg0ac4c272014-12-10 14:59:26 -08001192 header_present,
Chris Forbes3fd359b2014-08-02 14:27:21 +12001193 BRW_SAMPLER_SIMD_MODE_SIMD4X2,
1194 0);
Chris Forbes3fd359b2014-08-02 14:27:21 +12001195
1196 brw_pop_insn_state(p);
Chris Forbes3fd359b2014-08-02 14:27:21 +12001197 }
Eric Anholt461a2972012-12-05 00:06:30 -08001198}
1199
1200void
Francisco Jerezed4d0e42016-05-20 13:03:31 -07001201fs_generator::generate_varying_pull_constant_load_gen4(fs_inst *inst,
1202 struct brw_reg dst,
1203 struct brw_reg index)
Eric Anholtd8214e42012-11-07 11:18:34 -08001204{
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -07001205 assert(devinfo->gen < 7); /* Should use the gen7 variant. */
Jason Ekstrand76c10862015-03-24 10:17:32 -07001206 assert(inst->header_size != 0);
Eric Anholt70b27e02013-03-18 10:16:42 -07001207 assert(inst->mlen);
Eric Anholtd8214e42012-11-07 11:18:34 -08001208
1209 assert(index.file == BRW_IMMEDIATE_VALUE &&
1210 index.type == BRW_REGISTER_TYPE_UD);
Matt Turnere42fb0c2015-10-22 19:41:30 -07001211 uint32_t surf_index = index.ud;
Eric Anholtd8214e42012-11-07 11:18:34 -08001212
Eric Anholt70b27e02013-03-18 10:16:42 -07001213 uint32_t simd_mode, rlen, msg_type;
Eric Anholtd8214e42012-11-07 11:18:34 -08001214 if (dispatch_width == 16) {
Eric Anholt70b27e02013-03-18 10:16:42 -07001215 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
1216 rlen = 8;
Eric Anholtd8214e42012-11-07 11:18:34 -08001217 } else {
Eric Anholt70b27e02013-03-18 10:16:42 -07001218 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8;
1219 rlen = 4;
Eric Anholtd8214e42012-11-07 11:18:34 -08001220 }
1221
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -07001222 if (devinfo->gen >= 5)
Eric Anholt70b27e02013-03-18 10:16:42 -07001223 msg_type = GEN5_SAMPLER_MESSAGE_SAMPLE_LD;
1224 else {
1225 /* We always use the SIMD16 message so that we only have to load U, and
1226 * not V or R.
1227 */
1228 msg_type = BRW_SAMPLER_MESSAGE_SIMD16_LD;
1229 assert(inst->mlen == 3);
1230 assert(inst->regs_written == 8);
1231 rlen = 8;
1232 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
1233 }
1234
Eric Anholtd8214e42012-11-07 11:18:34 -08001235 struct brw_reg header = brw_vec8_grf(0, 0);
1236 gen6_resolve_implied_move(p, &header, inst->base_mrf);
1237
Matt Turner7c796082014-06-13 14:29:25 -07001238 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
Francisco Jerezc19c3d32016-05-18 15:29:27 -07001239 brw_inst_set_compression(devinfo, send, false);
Kenneth Graunke71846a92014-04-16 20:15:23 -07001240 brw_set_dest(p, send, retype(dst, BRW_REGISTER_TYPE_UW));
Eric Anholtd8214e42012-11-07 11:18:34 -08001241 brw_set_src0(p, send, header);
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -07001242 if (devinfo->gen < 6)
Jason Ekstrand4e9c79c2015-04-14 18:00:06 -07001243 brw_inst_set_base_mrf(p->devinfo, send, inst->base_mrf);
Eric Anholt70b27e02013-03-18 10:16:42 -07001244
1245 /* Our surface is set up as floats, regardless of what actual data is
1246 * stored in it.
1247 */
1248 uint32_t return_format = BRW_SAMPLER_RETURN_FORMAT_FLOAT32;
1249 brw_set_sampler_message(p, send,
Eric Anholtd8214e42012-11-07 11:18:34 -08001250 surf_index,
Eric Anholt70b27e02013-03-18 10:16:42 -07001251 0, /* sampler (unused) */
Eric Anholtd8214e42012-11-07 11:18:34 -08001252 msg_type,
Eric Anholt70b27e02013-03-18 10:16:42 -07001253 rlen,
Eric Anholtd8214e42012-11-07 11:18:34 -08001254 inst->mlen,
Jason Ekstrand76c10862015-03-24 10:17:32 -07001255 inst->header_size != 0,
Eric Anholt70b27e02013-03-18 10:16:42 -07001256 simd_mode,
1257 return_format);
Eric Anholtd8214e42012-11-07 11:18:34 -08001258}
1259
1260void
1261fs_generator::generate_varying_pull_constant_load_gen7(fs_inst *inst,
1262 struct brw_reg dst,
1263 struct brw_reg index,
1264 struct brw_reg offset)
1265{
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -07001266 assert(devinfo->gen >= 7);
Eric Anholtd8214e42012-11-07 11:18:34 -08001267 /* Varying-offset pull constant loads are treated as a normal expression on
1268 * gen7, so the fact that it's a send message is hidden at the IR level.
1269 */
Jason Ekstrand76c10862015-03-24 10:17:32 -07001270 assert(inst->header_size == 0);
Eric Anholtd8214e42012-11-07 11:18:34 -08001271 assert(!inst->mlen);
Chris Forbes3fd359b2014-08-02 14:27:21 +12001272 assert(index.type == BRW_REGISTER_TYPE_UD);
Eric Anholtd8214e42012-11-07 11:18:34 -08001273
Eric Anholtdca5fc12013-03-13 14:48:55 -07001274 uint32_t simd_mode, rlen, mlen;
Eric Anholtd8214e42012-11-07 11:18:34 -08001275 if (dispatch_width == 16) {
Eric Anholtdca5fc12013-03-13 14:48:55 -07001276 mlen = 2;
1277 rlen = 8;
1278 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD16;
Eric Anholtd8214e42012-11-07 11:18:34 -08001279 } else {
Eric Anholtdca5fc12013-03-13 14:48:55 -07001280 mlen = 1;
1281 rlen = 4;
1282 simd_mode = BRW_SAMPLER_SIMD_MODE_SIMD8;
Eric Anholtd8214e42012-11-07 11:18:34 -08001283 }
1284
Chris Forbes3fd359b2014-08-02 14:27:21 +12001285 if (index.file == BRW_IMMEDIATE_VALUE) {
Kenneth Graunke6d89bc82013-08-14 19:49:33 -07001286
Matt Turnere42fb0c2015-10-22 19:41:30 -07001287 uint32_t surf_index = index.ud;
Chris Forbes3fd359b2014-08-02 14:27:21 +12001288
1289 brw_inst *send = brw_next_insn(p, BRW_OPCODE_SEND);
Jason Ekstrand1385a4b2014-09-08 15:26:24 -07001290 brw_set_dest(p, send, retype(dst, BRW_REGISTER_TYPE_UW));
Chris Forbes3fd359b2014-08-02 14:27:21 +12001291 brw_set_src0(p, send, offset);
1292 brw_set_sampler_message(p, send,
1293 surf_index,
1294 0, /* LD message ignores sampler unit */
1295 GEN5_SAMPLER_MESSAGE_SAMPLE_LD,
1296 rlen,
1297 mlen,
1298 false, /* no header */
1299 simd_mode,
1300 0);
1301
Chris Forbes3fd359b2014-08-02 14:27:21 +12001302 } else {
1303
1304 struct brw_reg addr = vec1(retype(brw_address_reg(0), BRW_REGISTER_TYPE_UD));
1305
1306 brw_push_insn_state(p);
1307 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
1308 brw_set_default_access_mode(p, BRW_ALIGN_1);
1309
1310 /* a0.0 = surf_index & 0xff */
1311 brw_inst *insn_and = brw_next_insn(p, BRW_OPCODE_AND);
Jason Ekstrand4e9c79c2015-04-14 18:00:06 -07001312 brw_inst_set_exec_size(p->devinfo, insn_and, BRW_EXECUTE_1);
Chris Forbes3fd359b2014-08-02 14:27:21 +12001313 brw_set_dest(p, insn_and, addr);
1314 brw_set_src0(p, insn_and, vec1(retype(index, BRW_REGISTER_TYPE_UD)));
1315 brw_set_src1(p, insn_and, brw_imm_ud(0x0ff));
1316
Francisco Jereza815cd82015-02-26 17:24:03 +02001317 brw_pop_insn_state(p);
1318
Francisco Jereza902a5d2015-03-19 15:44:24 +02001319 /* dst = send(offset, a0.0 | <descriptor>) */
1320 brw_inst *insn = brw_send_indirect_message(
1321 p, BRW_SFID_SAMPLER, retype(dst, BRW_REGISTER_TYPE_UW),
1322 offset, addr);
1323 brw_set_sampler_message(p, insn,
Chris Forbes3fd359b2014-08-02 14:27:21 +12001324 0 /* surface */,
1325 0 /* sampler */,
1326 GEN5_SAMPLER_MESSAGE_SAMPLE_LD,
1327 rlen /* rlen */,
1328 mlen /* mlen */,
1329 false /* header */,
1330 simd_mode,
1331 0);
Chris Forbes3fd359b2014-08-02 14:27:21 +12001332 }
Eric Anholtd8214e42012-11-07 11:18:34 -08001333}
Paul Berry3f929ef2012-06-18 14:50:04 -07001334
1335/**
1336 * Cause the current pixel/sample mask (from R1.7 bits 15:0) to be transferred
1337 * into the flags register (f0.0).
1338 *
1339 * Used only on Gen6 and above.
1340 */
1341void
Eric Anholtb278f652012-12-06 10:36:11 -08001342fs_generator::generate_mov_dispatch_to_flags(fs_inst *inst)
Paul Berry3f929ef2012-06-18 14:50:04 -07001343{
Eric Anholtb278f652012-12-06 10:36:11 -08001344 struct brw_reg flags = brw_flag_reg(0, inst->flag_subreg);
Eric Anholtd5016492012-12-06 12:15:13 -08001345 struct brw_reg dispatch_mask;
Paul Berry3f929ef2012-06-18 14:50:04 -07001346
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -07001347 if (devinfo->gen >= 6)
Eric Anholtd5016492012-12-06 12:15:13 -08001348 dispatch_mask = retype(brw_vec1_grf(1, 7), BRW_REGISTER_TYPE_UW);
1349 else
1350 dispatch_mask = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UW);
1351
Paul Berry3f929ef2012-06-18 14:50:04 -07001352 brw_push_insn_state(p);
Kenneth Graunkee3748092014-05-31 16:57:02 -07001353 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
Eric Anholtd5016492012-12-06 12:15:13 -08001354 brw_MOV(p, flags, dispatch_mask);
Paul Berry3f929ef2012-06-18 14:50:04 -07001355 brw_pop_insn_state(p);
1356}
1357
Chris Forbes6e91f2d2013-11-18 21:13:13 +13001358void
1359fs_generator::generate_pixel_interpolator_query(fs_inst *inst,
1360 struct brw_reg dst,
1361 struct brw_reg src,
1362 struct brw_reg msg_data,
1363 unsigned msg_type)
1364{
Neil Robertsda361ac2015-07-17 14:40:03 +01001365 assert(msg_data.type == BRW_REGISTER_TYPE_UD);
Chris Forbes6e91f2d2013-11-18 21:13:13 +13001366
1367 brw_pixel_interpolator_query(p,
1368 retype(dst, BRW_REGISTER_TYPE_UW),
1369 src,
1370 inst->pi_noperspective,
1371 msg_type,
Neil Robertsda361ac2015-07-17 14:40:03 +01001372 msg_data,
Chris Forbes6e91f2d2013-11-18 21:13:13 +13001373 inst->mlen,
1374 inst->regs_written);
1375}
1376
Paul Berry3f929ef2012-06-18 14:50:04 -07001377
Eric Anholt461a2972012-12-05 00:06:30 -08001378/**
Eric Anholt4c1fdae2013-03-06 14:47:22 -08001379 * Sets the first word of a vgrf for gen7+ simd4x2 uniform pull constant
1380 * sampler LD messages.
Eric Anholt461a2972012-12-05 00:06:30 -08001381 *
Eric Anholt4c1fdae2013-03-06 14:47:22 -08001382 * We don't want to bake it into the send message's code generation because
1383 * that means we don't get a chance to schedule the instructions.
Eric Anholt461a2972012-12-05 00:06:30 -08001384 */
1385void
Eric Anholt4c1fdae2013-03-06 14:47:22 -08001386fs_generator::generate_set_simd4x2_offset(fs_inst *inst,
1387 struct brw_reg dst,
1388 struct brw_reg value)
Eric Anholt461a2972012-12-05 00:06:30 -08001389{
Eric Anholt461a2972012-12-05 00:06:30 -08001390 assert(value.file == BRW_IMMEDIATE_VALUE);
1391
1392 brw_push_insn_state(p);
Matt Turnerdd5c8252015-04-14 12:40:34 -07001393 brw_set_default_exec_size(p, BRW_EXECUTE_8);
Kenneth Graunkee3748092014-05-31 16:57:02 -07001394 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
1395 brw_set_default_mask_control(p, BRW_MASK_DISABLE);
Eric Anholt4c1fdae2013-03-06 14:47:22 -08001396 brw_MOV(p, retype(brw_vec1_reg(dst.file, dst.nr, 0), value.type), value);
Eric Anholt461a2972012-12-05 00:06:30 -08001397 brw_pop_insn_state(p);
1398}
1399
Anuj Phogate12bbb52013-10-24 16:17:08 -07001400/* Sets vstride=1, width=4, hstride=0 of register src1 during
1401 * the ADD instruction.
1402 */
1403void
1404fs_generator::generate_set_sample_id(fs_inst *inst,
1405 struct brw_reg dst,
1406 struct brw_reg src0,
1407 struct brw_reg src1)
1408{
1409 assert(dst.type == BRW_REGISTER_TYPE_D ||
1410 dst.type == BRW_REGISTER_TYPE_UD);
1411 assert(src0.type == BRW_REGISTER_TYPE_D ||
1412 src0.type == BRW_REGISTER_TYPE_UD);
1413
Matt Turnere10fc052015-10-20 17:51:12 -07001414 struct brw_reg reg = stride(src1, 1, 4, 0);
Matt Turnere2707c82015-10-20 18:31:02 -07001415 if (devinfo->gen >= 8 || dispatch_width == 8) {
Jason Ekstrandf91b5662014-08-13 12:23:47 -07001416 brw_ADD(p, dst, src0, reg);
1417 } else if (dispatch_width == 16) {
Matt Turner0f747962015-10-20 18:29:42 -07001418 brw_push_insn_state(p);
1419 brw_set_default_exec_size(p, BRW_EXECUTE_8);
1420 brw_set_default_compression_control(p, BRW_COMPRESSION_NONE);
Jason Ekstrandf91b5662014-08-13 12:23:47 -07001421 brw_ADD(p, firsthalf(dst), firsthalf(src0), reg);
Matt Turner0f747962015-10-20 18:29:42 -07001422 brw_set_default_compression_control(p, BRW_COMPRESSION_2NDHALF);
Jason Ekstrandf91b5662014-08-13 12:23:47 -07001423 brw_ADD(p, sechalf(dst), sechalf(src0), suboffset(reg, 2));
Matt Turner0f747962015-10-20 18:29:42 -07001424 brw_pop_insn_state(p);
Jason Ekstrandf91b5662014-08-13 12:23:47 -07001425 }
Anuj Phogate12bbb52013-10-24 16:17:08 -07001426}
1427
Chad Versace20dfa502013-01-09 11:46:42 -08001428void
1429fs_generator::generate_pack_half_2x16_split(fs_inst *inst,
1430 struct brw_reg dst,
1431 struct brw_reg x,
1432 struct brw_reg y)
1433{
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -07001434 assert(devinfo->gen >= 7);
Chad Versace20dfa502013-01-09 11:46:42 -08001435 assert(dst.type == BRW_REGISTER_TYPE_UD);
Vinson Lee15599942013-01-26 08:27:50 +01001436 assert(x.type == BRW_REGISTER_TYPE_F);
1437 assert(y.type == BRW_REGISTER_TYPE_F);
Chad Versace20dfa502013-01-09 11:46:42 -08001438
1439 /* From the Ivybridge PRM, Vol4, Part3, Section 6.27 f32to16:
1440 *
1441 * Because this instruction does not have a 16-bit floating-point type,
1442 * the destination data type must be Word (W).
1443 *
1444 * The destination must be DWord-aligned and specify a horizontal stride
1445 * (HorzStride) of 2. The 16-bit result is stored in the lower word of
1446 * each destination channel and the upper word is not modified.
1447 */
Francisco Jerez509f5872015-02-04 17:58:49 +02001448 struct brw_reg dst_w = spread(retype(dst, BRW_REGISTER_TYPE_W), 2);
Chad Versace20dfa502013-01-09 11:46:42 -08001449
Francisco Jerez509f5872015-02-04 17:58:49 +02001450 /* Give each 32-bit channel of dst the form below, where "." means
Chad Versace20dfa502013-01-09 11:46:42 -08001451 * unchanged.
1452 * 0x....hhhh
1453 */
1454 brw_F32TO16(p, dst_w, y);
1455
1456 /* Now the form:
1457 * 0xhhhh0000
1458 */
1459 brw_SHL(p, dst, dst, brw_imm_ud(16u));
1460
1461 /* And, finally the form of packHalf2x16's output:
1462 * 0xhhhhllll
1463 */
1464 brw_F32TO16(p, dst_w, x);
1465}
1466
1467void
1468fs_generator::generate_unpack_half_2x16_split(fs_inst *inst,
1469 struct brw_reg dst,
1470 struct brw_reg src)
1471{
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -07001472 assert(devinfo->gen >= 7);
Chad Versace20dfa502013-01-09 11:46:42 -08001473 assert(dst.type == BRW_REGISTER_TYPE_F);
1474 assert(src.type == BRW_REGISTER_TYPE_UD);
1475
1476 /* From the Ivybridge PRM, Vol4, Part3, Section 6.26 f16to32:
1477 *
1478 * Because this instruction does not have a 16-bit floating-point type,
1479 * the source data type must be Word (W). The destination type must be
1480 * F (Float).
1481 */
Francisco Jerez509f5872015-02-04 17:58:49 +02001482 struct brw_reg src_w = spread(retype(src, BRW_REGISTER_TYPE_W), 2);
Chad Versace20dfa502013-01-09 11:46:42 -08001483
1484 /* Each channel of src has the form of unpackHalf2x16's input: 0xhhhhllll.
1485 * For the Y case, we wish to access only the upper word; therefore
1486 * a 16-bit subregister offset is needed.
1487 */
1488 assert(inst->opcode == FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X ||
1489 inst->opcode == FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y);
1490 if (inst->opcode == FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y)
Chad Versace09740312013-01-24 21:48:40 -08001491 src_w.subnr += 2;
Chad Versace20dfa502013-01-09 11:46:42 -08001492
1493 brw_F16TO32(p, dst, src_w);
1494}
1495
Eric Anholt11dd9e92011-05-24 16:34:27 -07001496void
Eric Anholt5c5218e2013-03-19 15:28:11 -07001497fs_generator::generate_shader_time_add(fs_inst *inst,
1498 struct brw_reg payload,
1499 struct brw_reg offset,
1500 struct brw_reg value)
1501{
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -07001502 assert(devinfo->gen >= 7);
Eric Anholt5c5218e2013-03-19 15:28:11 -07001503 brw_push_insn_state(p);
Kenneth Graunkee3748092014-05-31 16:57:02 -07001504 brw_set_default_mask_control(p, true);
Eric Anholt5c5218e2013-03-19 15:28:11 -07001505
1506 assert(payload.file == BRW_GENERAL_REGISTER_FILE);
1507 struct brw_reg payload_offset = retype(brw_vec1_grf(payload.nr, 0),
1508 offset.type);
1509 struct brw_reg payload_value = retype(brw_vec1_grf(payload.nr + 1, 0),
1510 value.type);
1511
1512 assert(offset.file == BRW_IMMEDIATE_VALUE);
1513 if (value.file == BRW_GENERAL_REGISTER_FILE) {
1514 value.width = BRW_WIDTH_1;
1515 value.hstride = BRW_HORIZONTAL_STRIDE_0;
1516 value.vstride = BRW_VERTICAL_STRIDE_0;
1517 } else {
1518 assert(value.file == BRW_IMMEDIATE_VALUE);
1519 }
1520
1521 /* Trying to deal with setup of the params from the IR is crazy in the FS8
1522 * case, and we don't really care about squeezing every bit of performance
1523 * out of this path, so we just emit the MOVs from here.
1524 */
1525 brw_MOV(p, payload_offset, offset);
1526 brw_MOV(p, payload_value, value);
Eric Anholt3c9dc2d2013-10-02 14:07:40 -07001527 brw_shader_time_add(p, payload,
Jordan Justenc43ae402014-08-29 12:50:46 -07001528 prog_data->binding_table.shader_time_start);
Eric Anholt5c5218e2013-03-19 15:28:11 -07001529 brw_pop_insn_state(p);
Kenneth Graunke6d89bc82013-08-14 19:49:33 -07001530
Jordan Justenc43ae402014-08-29 12:50:46 -07001531 brw_mark_surface_used(prog_data,
1532 prog_data->binding_table.shader_time_start);
Eric Anholt5c5218e2013-03-19 15:28:11 -07001533}
1534
1535void
Kristian Høgsberg9a1af7b2014-10-27 19:40:47 -07001536fs_generator::enable_debug(const char *shader_name)
1537{
1538 debug_flag = true;
1539 this->shader_name = shader_name;
1540}
1541
Kristian Høgsbergf2bb6552014-11-13 16:28:08 -08001542int
1543fs_generator::generate_code(const cfg_t *cfg, int dispatch_width)
Eric Anholt11dd9e92011-05-24 16:34:27 -07001544{
Kristian Høgsbergf2bb6552014-11-13 16:28:08 -08001545 /* align to 64 byte boundary. */
1546 while (p->next_insn_offset % 64)
1547 brw_NOP(p);
1548
1549 this->dispatch_width = dispatch_width;
Kristian Høgsbergf2bb6552014-11-13 16:28:08 -08001550
Matt Turner92b05562014-05-25 10:42:32 -07001551 int start_offset = p->next_insn_offset;
Matt Turnerb0d422c2015-03-16 12:18:31 -07001552 int spill_count = 0, fill_count = 0;
Abdiel Janulguef3401452014-08-06 11:27:58 +03001553 int loop_count = 0;
Matt Turner92b05562014-05-25 10:42:32 -07001554
1555 struct annotation_info annotation;
1556 memset(&annotation, 0, sizeof(annotation));
1557
Matt Turnera3d0ccb2014-07-11 21:16:13 -07001558 foreach_block_and_inst (block, fs_inst, inst, cfg) {
Eric Anholt11dd9e92011-05-24 16:34:27 -07001559 struct brw_reg src[3], dst;
Kenneth Graunke776ad512014-05-30 16:41:32 -07001560 unsigned int last_insn_offset = p->next_insn_offset;
Matt Turner7452f182014-12-30 12:56:13 -08001561 bool multiple_instructions_emitted = false;
Eric Anholt11dd9e92011-05-24 16:34:27 -07001562
Matt Turnerf01d92f2016-05-02 23:32:13 -07001563 /* From the Broadwell PRM, Volume 7, "3D-Media-GPGPU", in the
1564 * "Register Region Restrictions" section: for BDW, SKL:
1565 *
1566 * "A POW/FDIV operation must not be followed by an instruction
1567 * that requires two destination registers."
1568 *
1569 * The documentation is often lacking annotations for Atom parts,
1570 * and empirically this affects CHV as well.
1571 */
1572 if (devinfo->gen >= 8 &&
1573 p->nr_insn > 1 &&
1574 brw_inst_opcode(devinfo, brw_last_inst) == BRW_OPCODE_MATH &&
1575 brw_inst_math_function(devinfo, brw_last_inst) == BRW_MATH_FUNCTION_POW &&
1576 inst->dst.component_size(inst->exec_size) > REG_SIZE) {
1577 brw_NOP(p);
1578 last_insn_offset = p->next_insn_offset;
1579 }
1580
Matt Turnerf0f7fb12014-05-19 10:20:37 -07001581 if (unlikely(debug_flag))
Jason Ekstrand9c89e472015-04-15 15:01:25 -07001582 annotate(p->devinfo, &annotation, cfg, inst, p->next_insn_offset);
Eric Anholt11dd9e92011-05-24 16:34:27 -07001583
Francisco Jerez3340a662016-05-18 18:48:04 -07001584 /* If the instruction writes to more than one register, it needs to be
1585 * explicitly marked as compressed on Gen <= 5. On Gen >= 6 the
1586 * hardware figures out by itself what the right compression mode is,
1587 * but we still need to know whether the instruction is compressed to
1588 * set up the source register regions appropriately.
1589 *
1590 * XXX - This is wrong for instructions that write a single register but
1591 * read more than one which should strictly speaking be treated as
1592 * compressed. For instructions that don't write any registers it
1593 * relies on the destination being a null register of the correct
1594 * type and regioning so the instruction is considered compressed
1595 * or not accordingly.
1596 */
1597 p->compressed = inst->dst.component_size(inst->exec_size) > REG_SIZE;
1598 brw_set_default_compression(p, p->compressed);
1599 brw_set_default_group(p, inst->force_sechalf ? 8 : 0);
Eric Anholt11dd9e92011-05-24 16:34:27 -07001600
Kenneth Graunke062ad812016-05-16 15:09:17 -07001601 for (unsigned int i = 0; i < inst->sources; i++) {
Kenneth Graunkedabaf4f2016-05-18 19:02:45 -07001602 src[i] = brw_reg_from_fs_reg(inst, &inst->src[i], devinfo->gen,
1603 p->compressed);
Kenneth Graunke062ad812016-05-16 15:09:17 -07001604
1605 /* The accumulator result appears to get used for the
1606 * conditional modifier generation. When negating a UD
1607 * value, there is a 33rd bit generated for the sign in the
1608 * accumulator value, so now you can't check, for example,
1609 * equality with a 32-bit value. See piglit fs-op-neg-uvec4.
1610 */
1611 assert(!inst->conditional_mod ||
1612 inst->src[i].type != BRW_REGISTER_TYPE_UD ||
1613 !inst->src[i].negate);
1614 }
Kenneth Graunkedabaf4f2016-05-18 19:02:45 -07001615 dst = brw_reg_from_fs_reg(inst, &inst->dst, devinfo->gen,
1616 p->compressed);
Kenneth Graunke062ad812016-05-16 15:09:17 -07001617
Francisco Jerez117a9a02016-05-18 18:41:28 -07001618 brw_set_default_access_mode(p, BRW_ALIGN_1);
Kenneth Graunke062ad812016-05-16 15:09:17 -07001619 brw_set_default_predicate_control(p, inst->predicate);
1620 brw_set_default_predicate_inverse(p, inst->predicate_inverse);
1621 brw_set_default_flag_reg(p, 0, inst->flag_subreg);
1622 brw_set_default_saturate(p, inst->saturate);
1623 brw_set_default_mask_control(p, inst->force_writemask_all);
1624 brw_set_default_acc_write_control(p, inst->writes_accumulator);
1625 brw_set_default_exec_size(p, cvt(inst->exec_size) - 1);
1626
Francisco Jerez3340a662016-05-18 18:48:04 -07001627 assert(inst->force_writemask_all || inst->exec_size >= 8);
Kenneth Graunke062ad812016-05-16 15:09:17 -07001628 assert(inst->base_mrf + inst->mlen <= BRW_MAX_MRF(devinfo->gen));
1629 assert(inst->mlen <= BRW_MAX_MSG_LENGTH);
1630
Eric Anholt11dd9e92011-05-24 16:34:27 -07001631 switch (inst->opcode) {
1632 case BRW_OPCODE_MOV:
1633 brw_MOV(p, dst, src[0]);
1634 break;
1635 case BRW_OPCODE_ADD:
1636 brw_ADD(p, dst, src[0], src[1]);
1637 break;
1638 case BRW_OPCODE_MUL:
1639 brw_MUL(p, dst, src[0], src[1]);
1640 break;
Topi Pohjolainen8f3e5362013-12-17 16:39:16 +02001641 case BRW_OPCODE_AVG:
1642 brw_AVG(p, dst, src[0], src[1]);
1643 break;
Eric Anholt3f78f712011-08-15 22:36:18 -07001644 case BRW_OPCODE_MACH:
Eric Anholt3f78f712011-08-15 22:36:18 -07001645 brw_MACH(p, dst, src[0], src[1]);
Eric Anholt3f78f712011-08-15 22:36:18 -07001646 break;
Eric Anholt11dd9e92011-05-24 16:34:27 -07001647
Matt Turner6be863a2014-04-01 17:25:12 -07001648 case BRW_OPCODE_LINE:
1649 brw_LINE(p, dst, src[0], src[1]);
1650 break;
1651
Eric Anholt7d55f372012-02-07 00:59:11 +01001652 case BRW_OPCODE_MAD:
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -07001653 assert(devinfo->gen >= 6);
Kenneth Graunkee3748092014-05-31 16:57:02 -07001654 brw_set_default_access_mode(p, BRW_ALIGN_16);
Francisco Jerezf14b9ea2016-05-17 19:51:50 -07001655 brw_MAD(p, dst, src[0], src[1], src[2]);
Eric Anholt7d55f372012-02-07 00:59:11 +01001656 break;
1657
Kenneth Graunke0a1d1452012-12-02 00:08:15 -08001658 case BRW_OPCODE_LRP:
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -07001659 assert(devinfo->gen >= 6);
Kenneth Graunkee3748092014-05-31 16:57:02 -07001660 brw_set_default_access_mode(p, BRW_ALIGN_16);
Francisco Jerezf14b9ea2016-05-17 19:51:50 -07001661 brw_LRP(p, dst, src[0], src[1], src[2]);
Kenneth Graunke0a1d1452012-12-02 00:08:15 -08001662 break;
1663
Eric Anholt11dd9e92011-05-24 16:34:27 -07001664 case BRW_OPCODE_FRC:
1665 brw_FRC(p, dst, src[0]);
1666 break;
1667 case BRW_OPCODE_RNDD:
1668 brw_RNDD(p, dst, src[0]);
1669 break;
1670 case BRW_OPCODE_RNDE:
1671 brw_RNDE(p, dst, src[0]);
1672 break;
1673 case BRW_OPCODE_RNDZ:
1674 brw_RNDZ(p, dst, src[0]);
1675 break;
1676
1677 case BRW_OPCODE_AND:
1678 brw_AND(p, dst, src[0], src[1]);
1679 break;
1680 case BRW_OPCODE_OR:
1681 brw_OR(p, dst, src[0], src[1]);
1682 break;
1683 case BRW_OPCODE_XOR:
1684 brw_XOR(p, dst, src[0], src[1]);
1685 break;
1686 case BRW_OPCODE_NOT:
1687 brw_NOT(p, dst, src[0]);
1688 break;
1689 case BRW_OPCODE_ASR:
1690 brw_ASR(p, dst, src[0], src[1]);
1691 break;
1692 case BRW_OPCODE_SHR:
1693 brw_SHR(p, dst, src[0], src[1]);
1694 break;
1695 case BRW_OPCODE_SHL:
1696 brw_SHL(p, dst, src[0], src[1]);
1697 break;
Chad Versace20dfa502013-01-09 11:46:42 -08001698 case BRW_OPCODE_F32TO16:
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -07001699 assert(devinfo->gen >= 7);
Chad Versace20dfa502013-01-09 11:46:42 -08001700 brw_F32TO16(p, dst, src[0]);
1701 break;
1702 case BRW_OPCODE_F16TO32:
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -07001703 assert(devinfo->gen >= 7);
Chad Versace20dfa502013-01-09 11:46:42 -08001704 brw_F16TO32(p, dst, src[0]);
1705 break;
Eric Anholt11dd9e92011-05-24 16:34:27 -07001706 case BRW_OPCODE_CMP:
Francisco Jerez95272f52016-05-17 19:59:18 -07001707 if (inst->exec_size >= 16 && devinfo->gen == 7 && !devinfo->is_haswell &&
1708 dst.file == BRW_ARCHITECTURE_REGISTER_FILE) {
1709 /* For unknown reasons the WaCMPInstFlagDepClearedEarly workaround
1710 * implemented in the compiler is not sufficient. Overriding the
1711 * type when the destination is the null register is necessary but
1712 * not sufficient by itself.
1713 */
1714 assert(dst.nr == BRW_ARF_NULL);
1715 dst.type = BRW_REGISTER_TYPE_D;
Matt Turner7e607942015-02-03 17:38:49 -08001716 }
Francisco Jerez95272f52016-05-17 19:59:18 -07001717 brw_CMP(p, dst, inst->conditional_mod, src[0], src[1]);
Eric Anholt11dd9e92011-05-24 16:34:27 -07001718 break;
1719 case BRW_OPCODE_SEL:
1720 brw_SEL(p, dst, src[0], src[1]);
1721 break;
Matt Turner1f0f26d2013-04-09 19:22:34 -07001722 case BRW_OPCODE_BFREV:
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -07001723 assert(devinfo->gen >= 7);
Matt Turner1f0f26d2013-04-09 19:22:34 -07001724 /* BFREV only supports UD type for src and dst. */
1725 brw_BFREV(p, retype(dst, BRW_REGISTER_TYPE_UD),
1726 retype(src[0], BRW_REGISTER_TYPE_UD));
1727 break;
1728 case BRW_OPCODE_FBH:
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -07001729 assert(devinfo->gen >= 7);
Matt Turner1f0f26d2013-04-09 19:22:34 -07001730 /* FBH only supports UD type for dst. */
1731 brw_FBH(p, retype(dst, BRW_REGISTER_TYPE_UD), src[0]);
1732 break;
1733 case BRW_OPCODE_FBL:
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -07001734 assert(devinfo->gen >= 7);
Matt Turner1f0f26d2013-04-09 19:22:34 -07001735 /* FBL only supports UD type for dst. */
1736 brw_FBL(p, retype(dst, BRW_REGISTER_TYPE_UD), src[0]);
1737 break;
1738 case BRW_OPCODE_CBIT:
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -07001739 assert(devinfo->gen >= 7);
Matt Turner1f0f26d2013-04-09 19:22:34 -07001740 /* CBIT only supports UD type for dst. */
1741 brw_CBIT(p, retype(dst, BRW_REGISTER_TYPE_UD), src[0]);
1742 break;
Matt Turner014cce32013-09-19 13:01:08 -07001743 case BRW_OPCODE_ADDC:
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -07001744 assert(devinfo->gen >= 7);
Matt Turner014cce32013-09-19 13:01:08 -07001745 brw_ADDC(p, dst, src[0], src[1]);
Matt Turner014cce32013-09-19 13:01:08 -07001746 break;
1747 case BRW_OPCODE_SUBB:
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -07001748 assert(devinfo->gen >= 7);
Matt Turner014cce32013-09-19 13:01:08 -07001749 brw_SUBB(p, dst, src[0], src[1]);
Matt Turner014cce32013-09-19 13:01:08 -07001750 break;
Juha-Pekka Heikkilada0c3b02014-03-28 15:28:32 +02001751 case BRW_OPCODE_MAC:
1752 brw_MAC(p, dst, src[0], src[1]);
1753 break;
Matt Turner1f0f26d2013-04-09 19:22:34 -07001754
1755 case BRW_OPCODE_BFE:
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -07001756 assert(devinfo->gen >= 7);
Kenneth Graunkee3748092014-05-31 16:57:02 -07001757 brw_set_default_access_mode(p, BRW_ALIGN_16);
Francisco Jerezf14b9ea2016-05-17 19:51:50 -07001758 brw_BFE(p, dst, src[0], src[1], src[2]);
Matt Turner1f0f26d2013-04-09 19:22:34 -07001759 break;
1760
1761 case BRW_OPCODE_BFI1:
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -07001762 assert(devinfo->gen >= 7);
Francisco Jerez982c48d2016-05-17 20:02:29 -07001763 brw_BFI1(p, dst, src[0], src[1]);
Matt Turner1f0f26d2013-04-09 19:22:34 -07001764 break;
1765 case BRW_OPCODE_BFI2:
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -07001766 assert(devinfo->gen >= 7);
Kenneth Graunkee3748092014-05-31 16:57:02 -07001767 brw_set_default_access_mode(p, BRW_ALIGN_16);
Francisco Jerez982c48d2016-05-17 20:02:29 -07001768 brw_BFI2(p, dst, src[0], src[1], src[2]);
Matt Turner1f0f26d2013-04-09 19:22:34 -07001769 break;
Eric Anholt11dd9e92011-05-24 16:34:27 -07001770
1771 case BRW_OPCODE_IF:
1772 if (inst->src[0].file != BAD_FILE) {
1773 /* The instruction has an embedded compare (only allowed on gen6) */
Jason Ekstrand5bda1ff2015-04-14 17:45:40 -07001774 assert(devinfo->gen == 6);
Eric Anholt11dd9e92011-05-24 16:34:27 -07001775 gen6_IF(p, inst->conditional_mod, src[0], src[1]);
1776 } else {
Francisco Jerez8ef56372016-05-18 19:36:03 -07001777 brw_IF(p, brw_inst_exec_size(devinfo, p->current));
Eric Anholt11dd9e92011-05-24 16:34:27 -07001778 }
Eric Anholt11dd9e92011-05-24 16:34:27 -07001779 break;
1780
1781 case BRW_OPCODE_ELSE:
1782 brw_ELSE(p);
1783 break;
1784 case BRW_OPCODE_ENDIF:
1785 brw_ENDIF(p);
Eric Anholt11dd9e92011-05-24 16:34:27 -07001786 break;
1787
1788 case BRW_OPCODE_DO:
Francisco Jerez8ef56372016-05-18 19:36:03 -07001789 brw_DO(p, brw_inst_exec_size(devinfo, p->current));
Eric Anholt11dd9e92011-05-24 16:34:27 -07001790 break;
1791
1792 case BRW_OPCODE_BREAK:
Eric Anholtf1d89632011-12-06 12:44:41 -08001793 brw_BREAK(p);
Kenneth Graunkee3748092014-05-31 16:57:02 -07001794 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
Eric Anholt11dd9e92011-05-24 16:34:27 -07001795 break;
1796 case BRW_OPCODE_CONTINUE:
Kenneth Graunkee64dbd02014-08-04 14:26:26 -07001797 brw_CONT(p);
Kenneth Graunkee3748092014-05-31 16:57:02 -07001798 brw_set_default_predicate_control(p, BRW_PREDICATE_NONE);
Eric Anholt11dd9e92011-05-24 16:34:27 -07001799 break;
1800
Eric Anholtce6be332011-12-06 12:30:03 -08001801 case BRW_OPCODE_WHILE:
Eric Anholtce6be332011-12-06 12:30:03 -08001802 brw_WHILE(p);
Abdiel Janulguef3401452014-08-06 11:27:58 +03001803 loop_count++;
Eric Anholt11dd9e92011-05-24 16:34:27 -07001804 break;
1805
Eric Anholt65b5cbb2011-08-05 12:38:58 -07001806 case SHADER_OPCODE_RCP:
1807 case SHADER_OPCODE_RSQ:
1808 case SHADER_OPCODE_SQRT:
1809 case SHADER_OPCODE_EXP2:
1810 case SHADER_OPCODE_LOG2:
Eric Anholt65b5cbb2011-08-05 12:38:58 -07001811 case SHADER_OPCODE_SIN:
1812 case SHADER_OPCODE_COS:
Matt Turnerf5bef2d2014-11-21 12:34:22 -08001813 assert(inst->conditional_mod == BRW_CONDITIONAL_NONE);
Francisco Jerez3a541d02016-05-17 19:10:48 -07001814 if (devinfo->gen >= 6) {
1815 assert(inst->mlen == 0);
1816 assert(devinfo->gen >= 7 || inst->exec_size == 8);
1817 gen6_math(p, dst, brw_math_function(inst->opcode),
1818 src[0], brw_null_reg());
Kenneth Graunke74e927b2011-08-18 11:55:42 -07001819 } else {
Francisco Jerez3a541d02016-05-17 19:10:48 -07001820 assert(inst->mlen >= 1);
1821 assert(devinfo->gen == 5 || devinfo->is_g4x || inst->exec_size == 8);
1822 gen4_math(p, dst,
1823 brw_math_function(inst->opcode),
1824 inst->base_mrf, src[0],
1825 BRW_MATH_PRECISION_FULL);
Kenneth Graunke74e927b2011-08-18 11:55:42 -07001826 }
1827 break;
Kenneth Graunkeff8f2722011-09-28 17:37:54 -07001828 case SHADER_OPCODE_INT_QUOTIENT:
1829 case SHADER_OPCODE_INT_REMAINDER:
Kenneth Graunke74e927b2011-08-18 11:55:42 -07001830 case SHADER_OPCODE_POW:
Matt Turnerf5bef2d2014-11-21 12:34:22 -08001831 assert(inst->conditional_mod == BRW_CONDITIONAL_NONE);
Francisco Jerez3a541d02016-05-17 19:10:48 -07001832 if (devinfo->gen >= 6) {
1833 assert(inst->mlen == 0);
1834 assert((devinfo->gen >= 7 && inst->opcode == SHADER_OPCODE_POW) ||
1835 inst->exec_size == 8);
Kenneth Graunke35e48bd2014-06-07 02:27:43 -07001836 gen6_math(p, dst, brw_math_function(inst->opcode), src[0], src[1]);
Francisco Jerez3a541d02016-05-17 19:10:48 -07001837 } else {
1838 assert(inst->mlen >= 1);
1839 assert(inst->exec_size == 8);
1840 gen4_math(p, dst, brw_math_function(inst->opcode),
1841 inst->base_mrf, src[0],
1842 BRW_MATH_PRECISION_FULL);
Kenneth Graunke74e927b2011-08-18 11:55:42 -07001843 }
Eric Anholt11dd9e92011-05-24 16:34:27 -07001844 break;
Eric Anholt11dd9e92011-05-24 16:34:27 -07001845 case FS_OPCODE_CINTERP:
1846 brw_MOV(p, dst, src[0]);
1847 break;
1848 case FS_OPCODE_LINTERP:
1849 generate_linterp(inst, dst, src);
1850 break;
Matt Turner529064f2015-04-14 13:17:38 -07001851 case FS_OPCODE_PIXEL_X:
1852 assert(src[0].type == BRW_REGISTER_TYPE_UW);
1853 src[0].subnr = 0 * type_sz(src[0].type);
1854 brw_MOV(p, dst, stride(src[0], 8, 4, 1));
1855 break;
1856 case FS_OPCODE_PIXEL_Y:
1857 assert(src[0].type == BRW_REGISTER_TYPE_UW);
1858 src[0].subnr = 4 * type_sz(src[0].type);
1859 brw_MOV(p, dst, stride(src[0], 8, 4, 1));
1860 break;
Samuel Iglesias Gonsalvezb23eb642015-04-13 16:55:49 +02001861 case FS_OPCODE_GET_BUFFER_SIZE:
1862 generate_get_buffer_size(inst, dst, src[0], src[1]);
1863 break;
Kenneth Graunkefebad172011-10-26 12:58:37 -07001864 case SHADER_OPCODE_TEX:
Eric Anholt11dd9e92011-05-24 16:34:27 -07001865 case FS_OPCODE_TXB:
Kenneth Graunkefebad172011-10-26 12:58:37 -07001866 case SHADER_OPCODE_TXD:
1867 case SHADER_OPCODE_TXF:
Matt Turner75dccf52016-05-04 15:46:45 -07001868 case SHADER_OPCODE_TXF_LZ:
Topi Pohjolainence527a62013-12-10 16:36:31 +02001869 case SHADER_OPCODE_TXF_CMS:
Neil Robertse386fb02015-09-08 15:52:09 +01001870 case SHADER_OPCODE_TXF_CMS_W:
Topi Pohjolainen41d397f2013-12-10 16:38:15 +02001871 case SHADER_OPCODE_TXF_UMS:
Chris Forbes7629c482013-11-30 10:32:16 +13001872 case SHADER_OPCODE_TXF_MCS:
Kenneth Graunkefebad172011-10-26 12:58:37 -07001873 case SHADER_OPCODE_TXL:
Matt Turner75dccf52016-05-04 15:46:45 -07001874 case SHADER_OPCODE_TXL_LZ:
Kenneth Graunkefebad172011-10-26 12:58:37 -07001875 case SHADER_OPCODE_TXS:
Matt Turnerb8aa9f72013-03-06 14:47:01 -08001876 case SHADER_OPCODE_LOD:
Chris Forbesfb455502013-03-31 21:31:12 +13001877 case SHADER_OPCODE_TG4:
Chris Forbes6bb2cf22013-10-08 21:42:10 +13001878 case SHADER_OPCODE_TG4_OFFSET:
Ilia Mirkin0b91bce2015-08-11 20:37:32 -04001879 case SHADER_OPCODE_SAMPLEINFO:
Jason Ekstrandb8ab9c82016-02-05 18:24:02 -08001880 generate_tex(inst, dst, src[0], src[1], src[2]);
Eric Anholt11dd9e92011-05-24 16:34:27 -07001881 break;
Kenneth Graunkecea37f02014-11-08 01:39:14 -08001882 case FS_OPCODE_DDX_COARSE:
1883 case FS_OPCODE_DDX_FINE:
1884 generate_ddx(inst->opcode, dst, src[0]);
1885 break;
1886 case FS_OPCODE_DDY_COARSE:
1887 case FS_OPCODE_DDY_FINE:
Kenneth Graunkedac10e82016-05-17 01:52:16 -07001888 generate_ddy(inst->opcode, dst, src[0]);
Eric Anholt11dd9e92011-05-24 16:34:27 -07001889 break;
1890
Eric Anholt60322612013-10-16 11:45:06 -07001891 case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
1892 generate_scratch_write(inst, src[0]);
Matt Turnerb0d422c2015-03-16 12:18:31 -07001893 spill_count++;
Eric Anholt11dd9e92011-05-24 16:34:27 -07001894 break;
1895
Eric Anholt60322612013-10-16 11:45:06 -07001896 case SHADER_OPCODE_GEN4_SCRATCH_READ:
1897 generate_scratch_read(inst, dst);
Matt Turnerb0d422c2015-03-16 12:18:31 -07001898 fill_count++;
Eric Anholt11dd9e92011-05-24 16:34:27 -07001899 break;
1900
Eric Anholt8dfc9f02013-10-16 11:51:22 -07001901 case SHADER_OPCODE_GEN7_SCRATCH_READ:
1902 generate_scratch_read_gen7(inst, dst);
Matt Turnerb0d422c2015-03-16 12:18:31 -07001903 fill_count++;
Eric Anholt8dfc9f02013-10-16 11:51:22 -07001904 break;
1905
Kenneth Graunked2f089b2015-11-07 18:58:34 -08001906 case SHADER_OPCODE_MOV_INDIRECT:
1907 generate_mov_indirect(inst, dst, src[0], src[1]);
1908 break;
1909
Kenneth Graunkeac988882015-09-29 14:32:02 -07001910 case SHADER_OPCODE_URB_READ_SIMD8:
Kenneth Graunke5480bbd2015-11-07 01:37:33 -08001911 case SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT:
Kenneth Graunkeac988882015-09-29 14:32:02 -07001912 generate_urb_read(inst, dst, src[0]);
1913 break;
1914
Kristian Høgsbergd9e29f52014-10-20 23:00:50 -07001915 case SHADER_OPCODE_URB_WRITE_SIMD8:
Kenneth Graunkebea75222015-05-06 00:04:10 -07001916 case SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT:
1917 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED:
1918 case SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT:
Kristian Høgsbergd9e29f52014-10-20 23:00:50 -07001919 generate_urb_write(inst, src[0]);
1920 break;
1921
Eric Anholt29340d02012-11-07 10:42:34 -08001922 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
Francisco Jerez01503502016-05-23 14:07:23 -07001923 assert(inst->force_writemask_all);
Eric Anholt29340d02012-11-07 10:42:34 -08001924 generate_uniform_pull_constant_load(inst, dst, src[0], src[1]);
Eric Anholt11dd9e92011-05-24 16:34:27 -07001925 break;
1926
Eric Anholt461a2972012-12-05 00:06:30 -08001927 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7:
Francisco Jerez01503502016-05-23 14:07:23 -07001928 assert(inst->force_writemask_all);
Eric Anholt461a2972012-12-05 00:06:30 -08001929 generate_uniform_pull_constant_load_gen7(inst, dst, src[0], src[1]);
1930 break;
1931
Francisco Jerezed4d0e42016-05-20 13:03:31 -07001932 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN4:
1933 generate_varying_pull_constant_load_gen4(inst, dst, src[0]);
Eric Anholtd8214e42012-11-07 11:18:34 -08001934 break;
1935
1936 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7:
1937 generate_varying_pull_constant_load_gen7(inst, dst, src[0], src[1]);
1938 break;
1939
Kristian Høgsbergf9dc7aa2014-07-07 15:27:17 -07001940 case FS_OPCODE_REP_FB_WRITE:
Eric Anholt11dd9e92011-05-24 16:34:27 -07001941 case FS_OPCODE_FB_WRITE:
Jason Ekstrand8b0e4b32014-09-16 15:16:20 -07001942 generate_fb_write(inst, src[0]);
Eric Anholt11dd9e92011-05-24 16:34:27 -07001943 break;
Paul Berry3f929ef2012-06-18 14:50:04 -07001944
1945 case FS_OPCODE_MOV_DISPATCH_TO_FLAGS:
Eric Anholtb278f652012-12-06 10:36:11 -08001946 generate_mov_dispatch_to_flags(inst);
Paul Berry3f929ef2012-06-18 14:50:04 -07001947 break;
1948
Eric Anholtbeafced2012-12-06 10:15:08 -08001949 case FS_OPCODE_DISCARD_JUMP:
1950 generate_discard_jump(inst);
1951 break;
1952
Eric Anholt71f06342012-11-27 14:10:52 -08001953 case SHADER_OPCODE_SHADER_TIME_ADD:
Eric Anholt5c5218e2013-03-19 15:28:11 -07001954 generate_shader_time_add(inst, src[0], src[1], src[2]);
Eric Anholt71f06342012-11-27 14:10:52 -08001955 break;
1956
Francisco Jerezcfaaa9b2013-09-11 14:01:50 -07001957 case SHADER_OPCODE_UNTYPED_ATOMIC:
Francisco Jerez3af26232015-07-20 17:38:15 +03001958 assert(src[2].file == BRW_IMMEDIATE_VALUE);
Matt Turnere42fb0c2015-10-22 19:41:30 -07001959 brw_untyped_atomic(p, dst, src[0], src[1], src[2].ud,
Francisco Jerez8865fe32015-02-26 17:41:46 +02001960 inst->mlen, !inst->dst.is_null());
Francisco Jerezcfaaa9b2013-09-11 14:01:50 -07001961 break;
1962
Francisco Jerez5e621cb2013-09-11 14:03:13 -07001963 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
Francisco Jerez3af26232015-07-20 17:38:15 +03001964 assert(src[2].file == BRW_IMMEDIATE_VALUE);
Francisco Jerez0519a622015-04-22 21:10:43 +03001965 brw_untyped_surface_read(p, dst, src[0], src[1],
Matt Turnere42fb0c2015-10-22 19:41:30 -07001966 inst->mlen, src[2].ud);
Francisco Jerez5e621cb2013-09-11 14:03:13 -07001967 break;
1968
Francisco Jerez0775d882015-04-23 14:24:14 +03001969 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
1970 assert(src[2].file == BRW_IMMEDIATE_VALUE);
1971 brw_untyped_surface_write(p, src[0], src[1],
Matt Turnere42fb0c2015-10-22 19:41:30 -07001972 inst->mlen, src[2].ud);
Francisco Jerez0775d882015-04-23 14:24:14 +03001973 break;
1974
Francisco Jerezf118e5d2015-04-23 14:28:25 +03001975 case SHADER_OPCODE_TYPED_ATOMIC:
1976 assert(src[2].file == BRW_IMMEDIATE_VALUE);
1977 brw_typed_atomic(p, dst, src[0], src[1],
Matt Turnere42fb0c2015-10-22 19:41:30 -07001978 src[2].ud, inst->mlen, !inst->dst.is_null());
Francisco Jerezf118e5d2015-04-23 14:28:25 +03001979 break;
1980
1981 case SHADER_OPCODE_TYPED_SURFACE_READ:
1982 assert(src[2].file == BRW_IMMEDIATE_VALUE);
1983 brw_typed_surface_read(p, dst, src[0], src[1],
Matt Turnere42fb0c2015-10-22 19:41:30 -07001984 inst->mlen, src[2].ud);
Francisco Jerezf118e5d2015-04-23 14:28:25 +03001985 break;
1986
1987 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
1988 assert(src[2].file == BRW_IMMEDIATE_VALUE);
Matt Turnere42fb0c2015-10-22 19:41:30 -07001989 brw_typed_surface_write(p, src[0], src[1], inst->mlen, src[2].ud);
Francisco Jerezf118e5d2015-04-23 14:28:25 +03001990 break;
1991
Francisco Jerezf1d1d172015-04-23 14:30:28 +03001992 case SHADER_OPCODE_MEMORY_FENCE:
1993 brw_memory_fence(p, dst);
1994 break;
1995
Eric Anholt4c1fdae2013-03-06 14:47:22 -08001996 case FS_OPCODE_SET_SIMD4X2_OFFSET:
1997 generate_set_simd4x2_offset(inst, dst, src[0]);
Eric Anholt461a2972012-12-05 00:06:30 -08001998 break;
1999
Francisco Jerez715bc6d2015-04-23 14:42:53 +03002000 case SHADER_OPCODE_FIND_LIVE_CHANNEL:
2001 brw_find_live_channel(p, dst);
2002 break;
2003
Francisco Jerezc74511f2015-02-20 20:14:24 +02002004 case SHADER_OPCODE_BROADCAST:
2005 brw_broadcast(p, dst, src[0], src[1]);
2006 break;
2007
Anuj Phogate12bbb52013-10-24 16:17:08 -07002008 case FS_OPCODE_SET_SAMPLE_ID:
2009 generate_set_sample_id(inst, dst, src[0], src[1]);
2010 break;
2011
Chad Versace20dfa502013-01-09 11:46:42 -08002012 case FS_OPCODE_PACK_HALF_2x16_SPLIT:
2013 generate_pack_half_2x16_split(inst, dst, src[0], src[1]);
2014 break;
2015
2016 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X:
2017 case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y:
2018 generate_unpack_half_2x16_split(inst, dst, src[0]);
2019 break;
2020
Kenneth Graunke57a50252013-03-27 23:19:39 -07002021 case FS_OPCODE_PLACEHOLDER_HALT:
2022 /* This is the place where the final HALT needs to be inserted if
2023 * we've emitted any discards. If not, this will emit no code.
2024 */
Matt Turnerf0f7fb12014-05-19 10:20:37 -07002025 if (!patch_discard_jumps_to_fb_writes()) {
Matt Turnerc9fd6842014-05-25 10:30:13 -07002026 if (unlikely(debug_flag)) {
Matt Turner92b05562014-05-25 10:42:32 -07002027 annotation.ann_count--;
Matt Turnerc9fd6842014-05-25 10:30:13 -07002028 }
Matt Turnerf0f7fb12014-05-19 10:20:37 -07002029 }
Kenneth Graunke57a50252013-03-27 23:19:39 -07002030 break;
2031
Chris Forbes6e91f2d2013-11-18 21:13:13 +13002032 case FS_OPCODE_INTERPOLATE_AT_CENTROID:
2033 generate_pixel_interpolator_query(inst, dst, src[0], src[1],
2034 GEN7_PIXEL_INTERPOLATOR_LOC_CENTROID);
2035 break;
2036
2037 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
2038 generate_pixel_interpolator_query(inst, dst, src[0], src[1],
2039 GEN7_PIXEL_INTERPOLATOR_LOC_SAMPLE);
2040 break;
2041
2042 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
2043 generate_pixel_interpolator_query(inst, dst, src[0], src[1],
2044 GEN7_PIXEL_INTERPOLATOR_LOC_SHARED_OFFSET);
2045 break;
2046
2047 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
2048 generate_pixel_interpolator_query(inst, dst, src[0], src[1],
2049 GEN7_PIXEL_INTERPOLATOR_LOC_PER_SLOT_OFFSET);
2050 break;
2051
Jordan Justen2a4df9c2014-08-27 11:33:25 -07002052 case CS_OPCODE_CS_TERMINATE:
2053 generate_cs_terminate(inst, src[0]);
2054 break;
2055
Jordan Justenf7ef8ec2014-08-27 11:32:08 -07002056 case SHADER_OPCODE_BARRIER:
2057 generate_barrier(inst, src[0]);
2058 break;
2059
Eric Anholt11dd9e92011-05-24 16:34:27 -07002060 default:
Jason Ekstrand5cb91db2015-04-15 14:51:18 -07002061 unreachable("Unsupported opcode");
Matt Turnerb9962162014-05-27 18:47:40 -07002062
2063 case SHADER_OPCODE_LOAD_PAYLOAD:
Matt Turner3d826722014-06-29 14:54:01 -07002064 unreachable("Should be lowered by lower_load_payload()");
Eric Anholt11dd9e92011-05-24 16:34:27 -07002065 }
Kenneth Graunke776ad512014-05-30 16:41:32 -07002066
Matt Turner7452f182014-12-30 12:56:13 -08002067 if (multiple_instructions_emitted)
2068 continue;
2069
Matt Turner6d253632014-06-28 23:31:04 -07002070 if (inst->no_dd_clear || inst->no_dd_check || inst->conditional_mod) {
2071 assert(p->next_insn_offset == last_insn_offset + 16 ||
2072 !"conditional_mod, no_dd_check, or no_dd_clear set for IR "
2073 "emitting more than 1 instruction");
2074
Matt Turner7c796082014-06-13 14:29:25 -07002075 brw_inst *last = &p->store[last_insn_offset / 16];
Matt Turner6d253632014-06-28 23:31:04 -07002076
Matt Turner56ac2592014-11-21 12:20:53 -08002077 if (inst->conditional_mod)
Jason Ekstrand4e9c79c2015-04-14 18:00:06 -07002078 brw_inst_set_cond_modifier(p->devinfo, last, inst->conditional_mod);
2079 brw_inst_set_no_dd_clear(p->devinfo, last, inst->no_dd_clear);
2080 brw_inst_set_no_dd_check(p->devinfo, last, inst->no_dd_check);
Kenneth Graunke776ad512014-05-30 16:41:32 -07002081 }
Eric Anholt11dd9e92011-05-24 16:34:27 -07002082 }
2083
Eric Anholt11dd9e92011-05-24 16:34:27 -07002084 brw_set_uip_jip(p);
Matt Turner92b05562014-05-25 10:42:32 -07002085 annotation_finalize(&annotation, p->next_insn_offset);
2086
Matt Turner0b45d472015-06-29 14:08:51 -07002087#ifndef NDEBUG
2088 bool validated = brw_validate_instructions(p, start_offset, &annotation);
2089#else
2090 if (unlikely(debug_flag))
2091 brw_validate_instructions(p, start_offset, &annotation);
2092#endif
2093
Matt Turner757d7dd2014-05-25 14:56:41 -07002094 int before_size = p->next_insn_offset - start_offset;
Matt Turner92b05562014-05-25 10:42:32 -07002095 brw_compact_instructions(p, start_offset, annotation.ann_count,
2096 annotation.ann);
Matt Turner757d7dd2014-05-25 14:56:41 -07002097 int after_size = p->next_insn_offset - start_offset;
Matt Turner92b05562014-05-25 10:42:32 -07002098
2099 if (unlikely(debug_flag)) {
Kristian Høgsberg9a1af7b2014-10-27 19:40:47 -07002100 fprintf(stderr, "Native code for %s\n"
Connor Abbott45cd76e2015-06-06 10:55:21 -04002101 "SIMD%d shader: %d instructions. %d loops. %u cycles. %d:%d spills:fills. Promoted %u constants. Compacted %d to %d"
Kristian Høgsberg9a1af7b2014-10-27 19:40:47 -07002102 " bytes (%.0f%%)\n",
Connor Abbott45cd76e2015-06-06 10:55:21 -04002103 shader_name, dispatch_width, before_size / 16, loop_count, cfg->cycle_count,
Matt Turnerb0d422c2015-03-16 12:18:31 -07002104 spill_count, fill_count, promoted_constants, before_size, after_size,
Matt Turner757d7dd2014-05-25 14:56:41 -07002105 100.0f * (before_size - after_size) / before_size);
Matt Turner6fdfe3f2014-05-25 10:46:55 -07002106
Jason Ekstrand9c89e472015-04-15 15:01:25 -07002107 dump_assembly(p->store, annotation.ann_count, annotation.ann,
Jason Ekstrande00314b2015-10-05 15:49:34 -07002108 p->devinfo);
Matt Turner93e371c2015-06-29 14:05:27 -07002109 ralloc_free(annotation.mem_ctx);
Matt Turner92b05562014-05-25 10:42:32 -07002110 }
Matt Turner0b45d472015-06-29 14:08:51 -07002111 assert(validated);
Kristian Høgsbergf2bb6552014-11-13 16:28:08 -08002112
Jason Ekstrande639a6f2015-04-16 14:13:52 -07002113 compiler->shader_debug_log(log_data,
Connor Abbott45cd76e2015-06-06 10:55:21 -04002114 "%s SIMD%d shader: %d inst, %d loops, %u cycles, "
Jason Ekstrande639a6f2015-04-16 14:13:52 -07002115 "%d:%d spills:fills, Promoted %u constants, "
Matt Turner138a7dc2016-01-13 16:17:26 -08002116 "compacted %d to %d bytes.",
Jason Ekstrand9870f792016-01-14 20:27:51 -08002117 _mesa_shader_stage_to_abbrev(stage),
2118 dispatch_width, before_size / 16,
Connor Abbott45cd76e2015-06-06 10:55:21 -04002119 loop_count, cfg->cycle_count, spill_count,
2120 fill_count, promoted_constants, before_size,
2121 after_size);
Matt Turner9ed8d002014-11-14 12:46:44 -08002122
Kristian Høgsbergf2bb6552014-11-13 16:28:08 -08002123 return start_offset;
Eric Anholt11dd9e92011-05-24 16:34:27 -07002124}
Kenneth Graunkeea681a02012-11-09 01:05:47 -08002125
2126const unsigned *
Kristian Høgsbergf2bb6552014-11-13 16:28:08 -08002127fs_generator::get_assembly(unsigned int *assembly_size)
Kenneth Graunkeea681a02012-11-09 01:05:47 -08002128{
Kenneth Graunkeea681a02012-11-09 01:05:47 -08002129 return brw_get_program(p, assembly_size);
2130}