blob: d2c08471bc4c4eba33beb4b89e8ac2d25eef35f4 [file] [log] [blame]
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001/*
Alyssa Rosenzweig11554462019-05-19 23:20:34 +00002 * Copyright (C) 2018-2019 Alyssa Rosenzweig <alyssa@rosenzweig.io>
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00003 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#include <sys/types.h>
25#include <sys/stat.h>
26#include <sys/mman.h>
27#include <fcntl.h>
28#include <stdint.h>
29#include <stdlib.h>
30#include <stdio.h>
31#include <err.h>
32
Tomeu Vizosof0b1bbe2019-03-08 15:04:50 +010033#include "main/mtypes.h"
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +000034#include "compiler/glsl/glsl_to_nir.h"
35#include "compiler/nir_types.h"
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +000036#include "compiler/nir/nir_builder.h"
37#include "util/half_float.h"
Alyssa Rosenzweig213b6282019-06-18 09:02:20 -070038#include "util/u_math.h"
Tomeu Vizosof0b1bbe2019-03-08 15:04:50 +010039#include "util/u_debug.h"
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +000040#include "util/u_dynarray.h"
41#include "util/list.h"
42#include "main/mtypes.h"
43
44#include "midgard.h"
45#include "midgard_nir.h"
46#include "midgard_compile.h"
Alyssa Rosenzweig11554462019-05-19 23:20:34 +000047#include "midgard_ops.h"
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +000048#include "helpers.h"
Alyssa Rosenzweig11554462019-05-19 23:20:34 +000049#include "compiler.h"
Alyssa Rosenzweigfcf144d2019-11-19 20:55:42 -050050#include "midgard_quirks.h"
Icecream951e1eee92020-07-06 19:30:37 +120051#include "panfrost-quirks.h"
52#include "panfrost/util/pan_lower_framebuffer.h"
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +000053
54#include "disassemble.h"
55
Tomeu Vizosof0b1bbe2019-03-08 15:04:50 +010056static const struct debug_named_value debug_options[] = {
Alyssa Rosenzweige4bd6fb2019-07-10 10:00:50 -070057 {"msgs", MIDGARD_DBG_MSGS, "Print debug messages"},
58 {"shaders", MIDGARD_DBG_SHADERS, "Dump shaders in NIR and MIR"},
Alyssa Rosenzweig138e40d2019-07-08 16:42:29 -070059 {"shaderdb", MIDGARD_DBG_SHADERDB, "Prints shader-db statistics"},
Alyssa Rosenzweige4bd6fb2019-07-10 10:00:50 -070060 DEBUG_NAMED_VALUE_END
Tomeu Vizosof0b1bbe2019-03-08 15:04:50 +010061};
62
63DEBUG_GET_ONCE_FLAGS_OPTION(midgard_debug, "MIDGARD_MESA_DEBUG", debug_options, 0)
64
Alyssa Rosenzweig138e40d2019-07-08 16:42:29 -070065unsigned SHADER_DB_COUNT = 0;
66
Tomeu Vizosof0b1bbe2019-03-08 15:04:50 +010067int midgard_debug = 0;
68
69#define DBG(fmt, ...) \
70 do { if (midgard_debug & MIDGARD_DBG_MSGS) \
71 fprintf(stderr, "%s:%d: "fmt, \
72 __FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
Alyssa Rosenzweigcc2ba8e2019-08-30 10:53:13 -070073static midgard_block *
74create_empty_block(compiler_context *ctx)
75{
76 midgard_block *blk = rzalloc(ctx, midgard_block);
77
Alyssa Rosenzweig5aaaf7b2020-03-11 08:36:31 -040078 blk->base.predecessors = _mesa_set_create(blk,
Alyssa Rosenzweigcc2ba8e2019-08-30 10:53:13 -070079 _mesa_hash_pointer,
80 _mesa_key_pointer_equal);
81
Alyssa Rosenzweig5aaaf7b2020-03-11 08:36:31 -040082 blk->base.name = ctx->block_source_count++;
Alyssa Rosenzweigcc2ba8e2019-08-30 10:53:13 -070083
84 return blk;
85}
86
Alyssa Rosenzweigc0fb2602019-04-21 03:29:47 +000087static void
Alyssa Rosenzweigcc2ba8e2019-08-30 10:53:13 -070088schedule_barrier(compiler_context *ctx)
89{
90 midgard_block *temp = ctx->after_block;
91 ctx->after_block = create_empty_block(ctx);
92 ctx->block_count++;
Alyssa Rosenzweig5aaaf7b2020-03-11 08:36:31 -040093 list_addtail(&ctx->after_block->base.link, &ctx->blocks);
94 list_inithead(&ctx->after_block->base.instructions);
95 pan_block_add_successor(&ctx->current_block->base, &ctx->after_block->base);
Alyssa Rosenzweigcc2ba8e2019-08-30 10:53:13 -070096 ctx->current_block = ctx->after_block;
97 ctx->after_block = temp;
98}
99
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000100/* Helpers to generate midgard_instruction's using macro magic, since every
101 * driver seems to do it that way */
102
103#define EMIT(op, ...) emit_mir_instruction(ctx, v_##op(__VA_ARGS__));
Alyssa Rosenzweig56f9b472019-06-14 16:03:01 -0700104
Alyssa Rosenzweig714eba82020-04-27 19:01:40 -0400105#define M_LOAD_STORE(name, store, T) \
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000106 static midgard_instruction m_##name(unsigned ssa, unsigned address) { \
107 midgard_instruction i = { \
108 .type = TAG_LOAD_STORE_4, \
Alyssa Rosenzweigf8b18a42019-07-01 18:51:48 -0700109 .mask = 0xF, \
Alyssa Rosenzweig75b6be22019-08-26 11:58:27 -0700110 .dest = ~0, \
Alyssa Rosenzweigccbc9a42019-12-19 10:35:18 -0500111 .src = { ~0, ~0, ~0, ~0 }, \
Alyssa Rosenzweig70072a22019-10-26 14:06:17 -0400112 .swizzle = SWIZZLE_IDENTITY_4, \
Italo Nicolabea6a652020-07-23 19:24:39 +0000113 .op = midgard_op_##name, \
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000114 .load_store = { \
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000115 .address = address \
116 } \
117 }; \
Alyssa Rosenzweigd4bcca12019-08-02 15:25:02 -0700118 \
Alyssa Rosenzweig714eba82020-04-27 19:01:40 -0400119 if (store) { \
Alyssa Rosenzweig75b6be22019-08-26 11:58:27 -0700120 i.src[0] = ssa; \
Alyssa Rosenzweig714eba82020-04-27 19:01:40 -0400121 i.src_types[0] = T; \
Alyssa Rosenzweig9915bb22020-05-07 10:12:38 -0400122 i.dest_type = T; \
Alyssa Rosenzweig714eba82020-04-27 19:01:40 -0400123 } else { \
Alyssa Rosenzweig75b6be22019-08-26 11:58:27 -0700124 i.dest = ssa; \
Alyssa Rosenzweig714eba82020-04-27 19:01:40 -0400125 i.dest_type = T; \
126 } \
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000127 return i; \
128 }
129
Alyssa Rosenzweig714eba82020-04-27 19:01:40 -0400130#define M_LOAD(name, T) M_LOAD_STORE(name, false, T)
131#define M_STORE(name, T) M_LOAD_STORE(name, true, T)
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000132
Alyssa Rosenzweig714eba82020-04-27 19:01:40 -0400133M_LOAD(ld_attr_32, nir_type_uint32);
134M_LOAD(ld_vary_32, nir_type_uint32);
135M_LOAD(ld_ubo_int4, nir_type_uint32);
136M_LOAD(ld_int4, nir_type_uint32);
137M_STORE(st_int4, nir_type_uint32);
138M_LOAD(ld_color_buffer_32u, nir_type_uint32);
Alyssa Rosenzweig36af05b2020-06-01 14:14:33 -0400139M_LOAD(ld_color_buffer_as_fp16, nir_type_float16);
Icecream952fbe7ca2020-07-09 23:44:41 +1200140M_LOAD(ld_color_buffer_as_fp32, nir_type_float32);
Alyssa Rosenzweig714eba82020-04-27 19:01:40 -0400141M_STORE(st_vary_32, nir_type_uint32);
142M_LOAD(ld_cubemap_coords, nir_type_uint32);
143M_LOAD(ld_compute_id, nir_type_uint32);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000144
145static midgard_instruction
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000146v_branch(bool conditional, bool invert)
147{
148 midgard_instruction ins = {
149 .type = TAG_ALU_4,
Alyssa Rosenzweig5abb7b52019-02-17 22:09:09 +0000150 .unit = ALU_ENAB_BRANCH,
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000151 .compact_branch = true,
152 .branch = {
153 .conditional = conditional,
154 .invert_conditional = invert
Alyssa Rosenzweig29416a82019-07-30 12:20:24 -0700155 },
Alyssa Rosenzweig75b6be22019-08-26 11:58:27 -0700156 .dest = ~0,
Alyssa Rosenzweigccbc9a42019-12-19 10:35:18 -0500157 .src = { ~0, ~0, ~0, ~0 },
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000158 };
159
160 return ins;
161}
162
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000163static void
164attach_constants(compiler_context *ctx, midgard_instruction *ins, void *constants, int name)
165{
166 ins->has_constants = true;
167 memcpy(&ins->constants, constants, 16);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000168}
169
170static int
Timothy Arceri035759b2019-03-29 12:39:48 +1100171glsl_type_size(const struct glsl_type *type, bool bindless)
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000172{
173 return glsl_count_attribute_slots(type, false);
174}
175
176/* Lower fdot2 to a vector multiplication followed by channel addition */
Icecream9527516ba2020-09-05 17:00:37 +1200177static bool
178midgard_nir_lower_fdot2_instr(nir_builder *b, nir_instr *instr, void *data)
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000179{
Icecream9527516ba2020-09-05 17:00:37 +1200180 if (instr->type != nir_instr_type_alu)
181 return false;
182
183 nir_alu_instr *alu = nir_instr_as_alu(instr);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000184 if (alu->op != nir_op_fdot2)
Icecream9527516ba2020-09-05 17:00:37 +1200185 return false;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000186
187 b->cursor = nir_before_instr(&alu->instr);
188
189 nir_ssa_def *src0 = nir_ssa_for_alu_src(b, alu, 0);
190 nir_ssa_def *src1 = nir_ssa_for_alu_src(b, alu, 1);
191
192 nir_ssa_def *product = nir_fmul(b, src0, src1);
193
Alyssa Rosenzweige4bd6fb2019-07-10 10:00:50 -0700194 nir_ssa_def *sum = nir_fadd(b,
195 nir_channel(b, product, 0),
196 nir_channel(b, product, 1));
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000197
198 /* Replace the fdot2 with this sum */
199 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(sum));
Icecream9527516ba2020-09-05 17:00:37 +1200200
201 return true;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000202}
203
204static bool
205midgard_nir_lower_fdot2(nir_shader *shader)
206{
Icecream9527516ba2020-09-05 17:00:37 +1200207 return nir_shader_instructions_pass(shader,
208 midgard_nir_lower_fdot2_instr,
209 nir_metadata_block_index | nir_metadata_dominance,
210 NULL);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000211}
212
Icecream957534a312020-06-06 15:39:22 +1200213static const nir_variable *
Jason Ekstrand94f0bae2020-07-20 16:07:11 -0500214search_var(nir_shader *nir, nir_variable_mode mode, unsigned driver_loc)
Icecream957534a312020-06-06 15:39:22 +1200215{
Jason Ekstrand94f0bae2020-07-20 16:07:11 -0500216 nir_foreach_variable_with_modes(var, nir, mode) {
Icecream957534a312020-06-06 15:39:22 +1200217 if (var->data.driver_location == driver_loc)
218 return var;
219 }
220
221 return NULL;
222}
223
Icecream95d37e9012020-06-06 17:25:08 +1200224/* Midgard can write all of color, depth and stencil in a single writeout
225 * operation, so we merge depth/stencil stores with color stores.
226 * If there are no color stores, we add a write to the "depth RT".
227 */
228static bool
229midgard_nir_lower_zs_store(nir_shader *nir)
230{
231 if (nir->info.stage != MESA_SHADER_FRAGMENT)
232 return false;
233
234 nir_variable *z_var = NULL, *s_var = NULL;
235
Jason Ekstrand2956d532020-07-18 18:24:25 -0500236 nir_foreach_shader_out_variable(var, nir) {
Icecream95d37e9012020-06-06 17:25:08 +1200237 if (var->data.location == FRAG_RESULT_DEPTH)
238 z_var = var;
239 else if (var->data.location == FRAG_RESULT_STENCIL)
240 s_var = var;
241 }
242
243 if (!z_var && !s_var)
244 return false;
245
246 bool progress = false;
247
248 nir_foreach_function(function, nir) {
249 if (!function->impl) continue;
250
251 nir_intrinsic_instr *z_store = NULL, *s_store = NULL;
252
253 nir_foreach_block(block, function->impl) {
254 nir_foreach_instr_safe(instr, block) {
255 if (instr->type != nir_instr_type_intrinsic)
256 continue;
257
258 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
259 if (intr->intrinsic != nir_intrinsic_store_output)
260 continue;
261
262 if (z_var && nir_intrinsic_base(intr) == z_var->data.driver_location) {
263 assert(!z_store);
264 z_store = intr;
265 }
266
267 if (s_var && nir_intrinsic_base(intr) == s_var->data.driver_location) {
268 assert(!s_store);
269 s_store = intr;
270 }
271 }
272 }
273
274 if (!z_store && !s_store) continue;
275
276 bool replaced = false;
277
278 nir_foreach_block(block, function->impl) {
279 nir_foreach_instr_safe(instr, block) {
280 if (instr->type != nir_instr_type_intrinsic)
281 continue;
282
283 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
284 if (intr->intrinsic != nir_intrinsic_store_output)
285 continue;
286
Jason Ekstrand94f0bae2020-07-20 16:07:11 -0500287 const nir_variable *var = search_var(nir, nir_var_shader_out, nir_intrinsic_base(intr));
Icecream95d37e9012020-06-06 17:25:08 +1200288 assert(var);
289
290 if (var->data.location != FRAG_RESULT_COLOR &&
291 var->data.location < FRAG_RESULT_DATA0)
292 continue;
293
Icecream95334dab02020-07-10 23:28:21 +1200294 if (var->data.index)
295 continue;
296
Icecream95d37e9012020-06-06 17:25:08 +1200297 assert(nir_src_is_const(intr->src[1]) && "no indirect outputs");
298
299 nir_builder b;
300 nir_builder_init(&b, function->impl);
301
302 assert(!z_store || z_store->instr.block == instr->block);
303 assert(!s_store || s_store->instr.block == instr->block);
304 b.cursor = nir_after_block_before_jump(instr->block);
305
306 nir_intrinsic_instr *combined_store;
307 combined_store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_combined_output_pan);
308
309 combined_store->num_components = intr->src[0].ssa->num_components;
310
311 nir_intrinsic_set_base(combined_store, nir_intrinsic_base(intr));
312
313 unsigned writeout = PAN_WRITEOUT_C;
314 if (z_store)
315 writeout |= PAN_WRITEOUT_Z;
316 if (s_store)
317 writeout |= PAN_WRITEOUT_S;
318
319 nir_intrinsic_set_component(combined_store, writeout);
320
321 struct nir_ssa_def *zero = nir_imm_int(&b, 0);
322
323 struct nir_ssa_def *src[4] = {
324 intr->src[0].ssa,
325 intr->src[1].ssa,
326 z_store ? z_store->src[0].ssa : zero,
327 s_store ? s_store->src[0].ssa : zero,
328 };
329
330 for (int i = 0; i < 4; ++i)
331 combined_store->src[i] = nir_src_for_ssa(src[i]);
332
333 nir_builder_instr_insert(&b, &combined_store->instr);
334
335 nir_instr_remove(instr);
336
337 replaced = true;
338 }
339 }
340
341 /* Insert a store to the depth RT (0xff) if needed */
342 if (!replaced) {
343 nir_builder b;
344 nir_builder_init(&b, function->impl);
345
346 nir_block *block = NULL;
347 if (z_store && s_store)
348 assert(z_store->instr.block == s_store->instr.block);
349
350 if (z_store)
351 block = z_store->instr.block;
352 else
353 block = s_store->instr.block;
354
355 b.cursor = nir_after_block_before_jump(block);
356
357 nir_intrinsic_instr *combined_store;
358 combined_store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_store_combined_output_pan);
359
360 combined_store->num_components = 4;
361
Icecream9518059f42020-07-08 16:00:51 +1200362 unsigned base;
363 if (z_store)
364 base = nir_intrinsic_base(z_store);
365 else
366 base = nir_intrinsic_base(s_store);
367 nir_intrinsic_set_base(combined_store, base);
Icecream95d37e9012020-06-06 17:25:08 +1200368
369 unsigned writeout = 0;
370 if (z_store)
371 writeout |= PAN_WRITEOUT_Z;
372 if (s_store)
373 writeout |= PAN_WRITEOUT_S;
374
375 nir_intrinsic_set_component(combined_store, writeout);
376
377 struct nir_ssa_def *zero = nir_imm_int(&b, 0);
378
379 struct nir_ssa_def *src[4] = {
380 nir_imm_vec4(&b, 0, 0, 0, 0),
381 zero,
382 z_store ? z_store->src[0].ssa : zero,
383 s_store ? s_store->src[0].ssa : zero,
384 };
385
386 for (int i = 0; i < 4; ++i)
387 combined_store->src[i] = nir_src_for_ssa(src[i]);
388
389 nir_builder_instr_insert(&b, &combined_store->instr);
390 }
391
392 if (z_store)
393 nir_instr_remove(&z_store->instr);
394
395 if (s_store)
396 nir_instr_remove(&s_store->instr);
397
398 nir_metadata_preserve(function->impl, nir_metadata_block_index | nir_metadata_dominance);
399 progress = true;
400 }
401
402 return progress;
403}
404
Icecream950ff62632020-07-06 23:52:40 +1200405/* Real writeout stores, which break execution, need to be moved to after
406 * dual-source stores, which are just standard register writes. */
407static bool
408midgard_nir_reorder_writeout(nir_shader *nir)
409{
410 bool progress = false;
411
412 nir_foreach_function(function, nir) {
413 if (!function->impl) continue;
414
415 nir_foreach_block(block, function->impl) {
416 nir_instr *last_writeout = NULL;
417
418 nir_foreach_instr_reverse_safe(instr, block) {
419 if (instr->type != nir_instr_type_intrinsic)
420 continue;
421
422 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
423 if (intr->intrinsic != nir_intrinsic_store_output)
424 continue;
425
Jason Ekstrand94f0bae2020-07-20 16:07:11 -0500426 const nir_variable *var = search_var(nir, nir_var_shader_out, nir_intrinsic_base(intr));
Icecream950ff62632020-07-06 23:52:40 +1200427
428 if (var->data.index) {
429 if (!last_writeout)
430 last_writeout = instr;
431 continue;
432 }
433
434 if (!last_writeout)
435 continue;
436
437 /* This is a real store, so move it to after dual-source stores */
438 exec_node_remove(&instr->node);
439 exec_node_insert_after(&last_writeout->node, &instr->node);
440
441 progress = true;
442 }
443 }
444 }
445
446 return progress;
447}
448
Alyssa Rosenzweig2486fe62020-08-27 14:55:11 -0400449static bool
450mdg_is_64(const nir_instr *instr, const void *_unused)
451{
452 const nir_alu_instr *alu = nir_instr_as_alu(instr);
453
454 if (nir_dest_bit_size(alu->dest.dest) == 64)
455 return true;
456
457 switch (alu->op) {
458 case nir_op_umul_high:
459 case nir_op_imul_high:
460 return true;
461 default:
462 return false;
463 }
464}
465
Alyssa Rosenzweiga2f1a062019-07-08 12:40:34 -0700466/* Flushes undefined values to zero */
467
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000468static void
Alyssa Rosenzweig7c793a42020-05-22 16:23:06 -0400469optimise_nir(nir_shader *nir, unsigned quirks, bool is_blend)
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000470{
471 bool progress;
Ian Romanickd41cdef2018-08-18 16:42:04 -0700472 unsigned lower_flrp =
473 (nir->options->lower_flrp16 ? 16 : 0) |
474 (nir->options->lower_flrp32 ? 32 : 0) |
475 (nir->options->lower_flrp64 ? 64 : 0);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000476
477 NIR_PASS(progress, nir, nir_lower_regs_to_ssa);
Rhys Perry8b98d092019-02-05 15:56:24 +0000478 NIR_PASS(progress, nir, nir_lower_idiv, nir_lower_idiv_fast);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000479
Alyssa Rosenzweig44a6c382019-08-14 08:44:40 -0700480 nir_lower_tex_options lower_tex_options = {
481 .lower_txs_lod = true,
Alyssa Rosenzweig4c43b352019-11-21 13:40:00 -0500482 .lower_txp = ~0,
483 .lower_tex_without_implicit_lod =
484 (quirks & MIDGARD_EXPLICIT_LOD),
Alyssa Rosenzweig7dab5742020-08-28 09:48:38 -0400485 .lower_tg4_broadcom_swizzle = true,
Alyssa Rosenzweigc57337b2019-12-19 11:12:50 -0500486
487 /* TODO: we have native gradient.. */
488 .lower_txd = true,
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000489 };
490
Alyssa Rosenzweig44a6c382019-08-14 08:44:40 -0700491 NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_options);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000492
Alyssa Rosenzweigc57337b2019-12-19 11:12:50 -0500493 /* Must lower fdot2 after tex is lowered */
494 NIR_PASS(progress, nir, midgard_nir_lower_fdot2);
495
Alyssa Rosenzweigbda2bb32019-11-21 08:45:27 -0500496 /* T720 is broken. */
497
498 if (quirks & MIDGARD_BROKEN_LOD)
499 NIR_PASS_V(nir, midgard_nir_lod_errata);
500
Alyssa Rosenzweigc495c6c2020-05-12 19:07:48 -0400501 NIR_PASS(progress, nir, midgard_nir_lower_algebraic_early);
502
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000503 do {
504 progress = false;
505
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000506 NIR_PASS(progress, nir, nir_lower_var_copies);
507 NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
508
509 NIR_PASS(progress, nir, nir_copy_prop);
Boris Brezillon440b0d62020-01-06 14:31:38 +0100510 NIR_PASS(progress, nir, nir_opt_remove_phis);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000511 NIR_PASS(progress, nir, nir_opt_dce);
512 NIR_PASS(progress, nir, nir_opt_dead_cf);
513 NIR_PASS(progress, nir, nir_opt_cse);
514 NIR_PASS(progress, nir, nir_opt_peephole_select, 64, false, true);
515 NIR_PASS(progress, nir, nir_opt_algebraic);
516 NIR_PASS(progress, nir, nir_opt_constant_folding);
Ian Romanickd41cdef2018-08-18 16:42:04 -0700517
518 if (lower_flrp != 0) {
Ian Romanick1f1007a2019-05-08 07:32:43 -0700519 bool lower_flrp_progress = false;
Ian Romanickd41cdef2018-08-18 16:42:04 -0700520 NIR_PASS(lower_flrp_progress,
521 nir,
522 nir_lower_flrp,
523 lower_flrp,
Marek Olšákac55b1a2020-07-22 22:13:16 -0400524 false /* always_precise */);
Ian Romanickd41cdef2018-08-18 16:42:04 -0700525 if (lower_flrp_progress) {
526 NIR_PASS(progress, nir,
527 nir_opt_constant_folding);
528 progress = true;
529 }
530
531 /* Nothing should rematerialize any flrps, so we only
532 * need to do this lowering once.
533 */
534 lower_flrp = 0;
535 }
536
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000537 NIR_PASS(progress, nir, nir_opt_undef);
Alyssa Rosenzweiga2f1a062019-07-08 12:40:34 -0700538 NIR_PASS(progress, nir, nir_undef_to_zero);
539
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000540 NIR_PASS(progress, nir, nir_opt_loop_unroll,
541 nir_var_shader_in |
542 nir_var_shader_out |
543 nir_var_function_temp);
544
Eric Anholtf25e1692020-08-27 12:49:13 -0700545 NIR_PASS(progress, nir, nir_opt_vectorize, NULL, NULL);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000546 } while (progress);
547
Alyssa Rosenzweig2486fe62020-08-27 14:55:11 -0400548 NIR_PASS_V(nir, nir_lower_alu_to_scalar, mdg_is_64, NULL);
549
Alyssa Rosenzweigd838cb92020-06-16 13:07:02 -0400550 /* Run after opts so it can hit more */
551 if (!is_blend)
552 NIR_PASS(progress, nir, nir_fuse_io_16);
553
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000554 /* Must be run at the end to prevent creation of fsin/fcos ops */
555 NIR_PASS(progress, nir, midgard_nir_scale_trig);
556
557 do {
558 progress = false;
559
560 NIR_PASS(progress, nir, nir_opt_dce);
561 NIR_PASS(progress, nir, nir_opt_algebraic);
562 NIR_PASS(progress, nir, nir_opt_constant_folding);
563 NIR_PASS(progress, nir, nir_copy_prop);
564 } while (progress);
565
566 NIR_PASS(progress, nir, nir_opt_algebraic_late);
Alyssa Rosenzweig211dee42020-04-29 20:27:16 -0400567 NIR_PASS(progress, nir, nir_opt_algebraic_distribute_src_mods);
Alyssa Rosenzweig726f0262019-05-07 02:52:08 +0000568
569 /* We implement booleans as 32-bit 0/~0 */
570 NIR_PASS(progress, nir, nir_lower_bool_to_int32);
571
572 /* Now that booleans are lowered, we can run out late opts */
Alyssa Rosenzweigeffe6fb02019-03-25 02:49:04 +0000573 NIR_PASS(progress, nir, midgard_nir_lower_algebraic_late);
Alyssa Rosenzweig449e5de2020-04-30 13:46:35 -0400574 NIR_PASS(progress, nir, midgard_nir_cancel_inot);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000575
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000576 NIR_PASS(progress, nir, nir_copy_prop);
577 NIR_PASS(progress, nir, nir_opt_dce);
578
579 /* Take us out of SSA */
580 NIR_PASS(progress, nir, nir_lower_locals_to_regs);
581 NIR_PASS(progress, nir, nir_convert_from_ssa, true);
582
583 /* We are a vector architecture; write combine where possible */
584 NIR_PASS(progress, nir, nir_move_vec_src_uses_to_dest);
585 NIR_PASS(progress, nir, nir_lower_vec_to_movs);
586
587 NIR_PASS(progress, nir, nir_opt_dce);
588}
589
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000590/* Do not actually emit a load; instead, cache the constant for inlining */
591
592static void
593emit_load_const(compiler_context *ctx, nir_load_const_instr *instr)
594{
595 nir_ssa_def def = instr->def;
596
Boris Brezillon15c92d12020-01-20 15:00:57 +0100597 midgard_constants *consts = rzalloc(NULL, midgard_constants);
598
599 assert(instr->def.num_components * instr->def.bit_size <= sizeof(*consts) * 8);
600
601#define RAW_CONST_COPY(bits) \
602 nir_const_value_to_array(consts->u##bits, instr->value, \
603 instr->def.num_components, u##bits)
604
605 switch (instr->def.bit_size) {
606 case 64:
607 RAW_CONST_COPY(64);
608 break;
609 case 32:
610 RAW_CONST_COPY(32);
611 break;
612 case 16:
613 RAW_CONST_COPY(16);
614 break;
615 case 8:
616 RAW_CONST_COPY(8);
617 break;
618 default:
619 unreachable("Invalid bit_size for load_const instruction\n");
620 }
Alyssa Rosenzweig9beb3392019-07-26 11:30:06 -0700621
622 /* Shifted for SSA, +1 for off-by-one */
Boris Brezillon15c92d12020-01-20 15:00:57 +0100623 _mesa_hash_table_u64_insert(ctx->ssa_constants, (def.index << 1) + 1, consts);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000624}
625
Alyssa Rosenzweige1693012019-07-24 12:52:27 -0700626/* Normally constants are embedded implicitly, but for I/O and such we have to
627 * explicitly emit a move with the constant source */
628
629static void
630emit_explicit_constant(compiler_context *ctx, unsigned node, unsigned to)
631{
632 void *constant_value = _mesa_hash_table_u64_search(ctx->ssa_constants, node + 1);
633
634 if (constant_value) {
Alyssa Rosenzweigc3a46e72019-10-30 16:29:28 -0400635 midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), to);
Alyssa Rosenzweige1693012019-07-24 12:52:27 -0700636 attach_constants(ctx, &ins, constant_value, node + 1);
637 emit_mir_instruction(ctx, ins);
638 }
639}
640
Alyssa Rosenzweig726f0262019-05-07 02:52:08 +0000641static bool
642nir_is_non_scalar_swizzle(nir_alu_src *src, unsigned nr_components)
643{
644 unsigned comp = src->swizzle[0];
645
646 for (unsigned c = 1; c < nr_components; ++c) {
647 if (src->swizzle[c] != comp)
648 return true;
649 }
650
651 return false;
652}
653
Italo Nicola8e221f52020-08-31 11:17:48 +0000654#define ATOMIC_CASE_IMPL(ctx, instr, nir, op, is_shared) \
655 case nir_intrinsic_##nir: \
656 emit_atomic(ctx, instr, is_shared, midgard_op_##op); \
657 break;
658
659#define ATOMIC_CASE(ctx, instr, nir, op) \
660 ATOMIC_CASE_IMPL(ctx, instr, shared_atomic_##nir, atomic_##op, true); \
661 ATOMIC_CASE_IMPL(ctx, instr, global_atomic_##nir, atomic_##op, false);
662
Alyssa Rosenzweig97dcad82019-02-07 03:39:25 +0000663#define ALU_CASE(nir, _op) \
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000664 case nir_op_##nir: \
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000665 op = midgard_alu_op_##_op; \
Alyssa Rosenzweig0ed8cca2019-07-01 17:35:25 -0700666 assert(src_bitsize == dst_bitsize); \
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000667 break;
Alyssa Rosenzweig195e2972019-06-19 07:23:27 -0700668
Alyssa Rosenzweig93513cd2020-05-25 14:19:11 -0400669#define ALU_CASE_RTZ(nir, _op) \
670 case nir_op_##nir: \
671 op = midgard_alu_op_##_op; \
672 roundmode = MIDGARD_RTZ; \
Alyssa Rosenzweig93513cd2020-05-25 14:19:11 -0400673 break;
674
Italo Nicolacea032a2020-09-23 05:41:38 +0000675#define ALU_CHECK_CMP() \
Alyssa Rosenzweig1108eaa2020-05-08 17:41:49 -0400676 assert(src_bitsize == 16 || src_bitsize == 32); \
677 assert(dst_bitsize == 16 || dst_bitsize == 32); \
678
Alyssa Rosenzweig195e2972019-06-19 07:23:27 -0700679#define ALU_CASE_BCAST(nir, _op, count) \
680 case nir_op_##nir: \
681 op = midgard_alu_op_##_op; \
682 broadcast_swizzle = count; \
Italo Nicolacea032a2020-09-23 05:41:38 +0000683 ALU_CHECK_CMP(); \
Alyssa Rosenzweig195e2972019-06-19 07:23:27 -0700684 break;
Alyssa Rosenzweigeb28a362020-05-07 10:12:24 -0400685
Italo Nicolacea032a2020-09-23 05:41:38 +0000686#define ALU_CASE_CMP(nir, _op) \
Alyssa Rosenzweigeb28a362020-05-07 10:12:24 -0400687 case nir_op_##nir: \
688 op = midgard_alu_op_##_op; \
Italo Nicolacea032a2020-09-23 05:41:38 +0000689 ALU_CHECK_CMP(); \
690 break;
Alyssa Rosenzweig4df80ca2019-07-01 15:26:22 -0700691
Alyssa Rosenzweig449e5de2020-04-30 13:46:35 -0400692/* Compare mir_lower_invert */
693static bool
694nir_accepts_inot(nir_op op, unsigned src)
695{
696 switch (op) {
697 case nir_op_ior:
Alyssa Rosenzweig6b023b32020-05-08 17:42:40 -0400698 case nir_op_iand: /* TODO: b2f16 */
Alyssa Rosenzweig449e5de2020-04-30 13:46:35 -0400699 case nir_op_ixor:
700 return true;
701 case nir_op_b32csel:
702 /* Only the condition */
703 return (src == 0);
704 default:
705 return false;
706 }
707}
708
Alyssa Rosenzweig29afa882020-05-04 17:33:52 -0400709static bool
710mir_accept_dest_mod(compiler_context *ctx, nir_dest **dest, nir_op op)
711{
712 if (pan_has_dest_mod(dest, op)) {
713 assert((*dest)->is_ssa);
714 BITSET_SET(ctx->already_emitted, (*dest)->ssa.index);
715 return true;
716 }
717
718 return false;
719}
720
Italo Nicola83592de2020-07-15 18:48:42 +0000721/* Look for floating point mods. We have the mods fsat, fsat_signed,
722 * and fpos. We also have the relations (note 3 * 2 = 6 cases):
723 *
724 * fsat_signed(fpos(x)) = fsat(x)
725 * fsat_signed(fsat(x)) = fsat(x)
726 * fpos(fsat_signed(x)) = fsat(x)
727 * fpos(fsat(x)) = fsat(x)
728 * fsat(fsat_signed(x)) = fsat(x)
729 * fsat(fpos(x)) = fsat(x)
730 *
731 * So by cases any composition of output modifiers is equivalent to
732 * fsat alone.
733 */
734static unsigned
735mir_determine_float_outmod(compiler_context *ctx, nir_dest **dest, unsigned prior_outmod)
736{
737 bool fpos = mir_accept_dest_mod(ctx, dest, nir_op_fclamp_pos);
738 bool fsat = mir_accept_dest_mod(ctx, dest, nir_op_fsat);
739 bool ssat = mir_accept_dest_mod(ctx, dest, nir_op_fsat_signed);
740 bool prior = (prior_outmod != midgard_outmod_none);
741 int count = (int) prior + (int) fpos + (int) ssat + (int) fsat;
742
743 return ((count > 1) || fsat) ? midgard_outmod_sat :
744 fpos ? midgard_outmod_pos :
745 ssat ? midgard_outmod_sat_signed :
746 prior_outmod;
747}
748
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000749static void
Alyssa Rosenzweigf8b881f2020-05-25 14:19:24 -0400750mir_copy_src(midgard_instruction *ins, nir_alu_instr *instr, unsigned i, unsigned to, bool *abs, bool *neg, bool *not, enum midgard_roundmode *roundmode, bool is_int, unsigned bcast_count)
Alyssa Rosenzweig6757c482020-04-27 18:55:11 -0400751{
Alyssa Rosenzweig22bb5a92020-04-29 18:08:26 -0400752 nir_alu_src src = instr->src[i];
Alyssa Rosenzweigb124f532020-04-29 18:10:43 -0400753
754 if (!is_int) {
755 if (pan_has_source_mod(&src, nir_op_fneg))
756 *neg = !(*neg);
757
758 if (pan_has_source_mod(&src, nir_op_fabs))
759 *abs = true;
760 }
761
Alyssa Rosenzweig449e5de2020-04-30 13:46:35 -0400762 if (nir_accepts_inot(instr->op, i) && pan_has_source_mod(&src, nir_op_inot))
763 *not = true;
764
Alyssa Rosenzweigf8b881f2020-05-25 14:19:24 -0400765 if (roundmode) {
766 if (pan_has_source_mod(&src, nir_op_fround_even))
767 *roundmode = MIDGARD_RTE;
768
769 if (pan_has_source_mod(&src, nir_op_ftrunc))
770 *roundmode = MIDGARD_RTZ;
771
772 if (pan_has_source_mod(&src, nir_op_ffloor))
773 *roundmode = MIDGARD_RTN;
774
775 if (pan_has_source_mod(&src, nir_op_fceil))
776 *roundmode = MIDGARD_RTP;
777 }
778
Alyssa Rosenzweig22bb5a92020-04-29 18:08:26 -0400779 unsigned bits = nir_src_bit_size(src.src);
Alyssa Rosenzweig6757c482020-04-27 18:55:11 -0400780
Alyssa Rosenzweig22bb5a92020-04-29 18:08:26 -0400781 ins->src[to] = nir_src_index(NULL, &src.src);
Alyssa Rosenzweig6757c482020-04-27 18:55:11 -0400782 ins->src_types[to] = nir_op_infos[instr->op].input_types[i] | bits;
Alyssa Rosenzweig22bb5a92020-04-29 18:08:26 -0400783
784 for (unsigned c = 0; c < NIR_MAX_VEC_COMPONENTS; ++c) {
785 ins->swizzle[to][c] = src.swizzle[
786 (!bcast_count || c < bcast_count) ? c :
787 (bcast_count - 1)];
788 }
Alyssa Rosenzweig6757c482020-04-27 18:55:11 -0400789}
790
Alyssa Rosenzweigd39f95b2020-05-04 15:45:47 -0400791/* Midgard features both fcsel and icsel, depending on whether you want int or
792 * float modifiers. NIR's csel is typeless, so we want a heuristic to guess if
793 * we should emit an int or float csel depending on what modifiers could be
794 * placed. In the absense of modifiers, this is probably arbitrary. */
795
796static bool
797mir_is_bcsel_float(nir_alu_instr *instr)
798{
799 nir_op intmods[] = {
800 nir_op_i2i8, nir_op_i2i16,
801 nir_op_i2i32, nir_op_i2i64
802 };
803
804 nir_op floatmods[] = {
805 nir_op_fabs, nir_op_fneg,
806 nir_op_f2f16, nir_op_f2f32,
807 nir_op_f2f64
808 };
809
810 nir_op floatdestmods[] = {
811 nir_op_fsat, nir_op_fsat_signed, nir_op_fclamp_pos,
812 nir_op_f2f16, nir_op_f2f32
813 };
814
815 signed score = 0;
816
817 for (unsigned i = 1; i < 3; ++i) {
818 nir_alu_src s = instr->src[i];
819 for (unsigned q = 0; q < ARRAY_SIZE(intmods); ++q) {
820 if (pan_has_source_mod(&s, intmods[q]))
821 score--;
822 }
823 }
824
825 for (unsigned i = 1; i < 3; ++i) {
826 nir_alu_src s = instr->src[i];
827 for (unsigned q = 0; q < ARRAY_SIZE(floatmods); ++q) {
828 if (pan_has_source_mod(&s, floatmods[q]))
829 score++;
830 }
831 }
832
833 for (unsigned q = 0; q < ARRAY_SIZE(floatdestmods); ++q) {
834 nir_dest *dest = &instr->dest.dest;
835 if (pan_has_dest_mod(&dest, floatdestmods[q]))
836 score++;
837 }
838
839 return (score > 0);
840}
841
Alyssa Rosenzweig6757c482020-04-27 18:55:11 -0400842static void
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000843emit_alu(compiler_context *ctx, nir_alu_instr *instr)
844{
Alyssa Rosenzweig22bb5a92020-04-29 18:08:26 -0400845 nir_dest *dest = &instr->dest.dest;
846
847 if (dest->is_ssa && BITSET_TEST(ctx->already_emitted, dest->ssa.index))
848 return;
849
Alyssa Rosenzweig8f887322019-07-29 15:11:12 -0700850 /* Derivatives end up emitted on the texture pipe, not the ALUs. This
851 * is handled elsewhere */
852
853 if (instr->op == nir_op_fddx || instr->op == nir_op_fddy) {
854 midgard_emit_derivatives(ctx, instr);
855 return;
856 }
857
Alyssa Rosenzweig22bb5a92020-04-29 18:08:26 -0400858 bool is_ssa = dest->is_ssa;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000859
Alyssa Rosenzweig22bb5a92020-04-29 18:08:26 -0400860 unsigned nr_components = nir_dest_num_components(*dest);
Alyssa Rosenzweig97dcad82019-02-07 03:39:25 +0000861 unsigned nr_inputs = nir_op_infos[instr->op].num_inputs;
Alyssa Rosenzweig04f76ad2020-04-27 18:58:21 -0400862 unsigned op = 0;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000863
Alyssa Rosenzweig195e2972019-06-19 07:23:27 -0700864 /* Number of components valid to check for the instruction (the rest
865 * will be forced to the last), or 0 to use as-is. Relevant as
866 * ball-type instructions have a channel count in NIR but are all vec4
867 * in Midgard */
868
869 unsigned broadcast_swizzle = 0;
870
Alyssa Rosenzweig22bb5a92020-04-29 18:08:26 -0400871 /* Should we swap arguments? */
872 bool flip_src12 = false;
873
Eric Anholt4c24c822020-08-25 10:15:27 -0700874 ASSERTED unsigned src_bitsize = nir_src_bit_size(instr->src[0].src);
875 ASSERTED unsigned dst_bitsize = nir_dest_bit_size(*dest);
Alyssa Rosenzweig0ed8cca2019-07-01 17:35:25 -0700876
Alyssa Rosenzweig93513cd2020-05-25 14:19:11 -0400877 enum midgard_roundmode roundmode = MIDGARD_RTE;
878
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000879 switch (instr->op) {
Alyssa Rosenzweig97dcad82019-02-07 03:39:25 +0000880 ALU_CASE(fadd, fadd);
881 ALU_CASE(fmul, fmul);
882 ALU_CASE(fmin, fmin);
883 ALU_CASE(fmax, fmax);
884 ALU_CASE(imin, imin);
885 ALU_CASE(imax, imax);
Alyssa Rosenzweig2e7555b2019-04-05 05:16:54 +0000886 ALU_CASE(umin, umin);
887 ALU_CASE(umax, umax);
Alyssa Rosenzweig97dcad82019-02-07 03:39:25 +0000888 ALU_CASE(ffloor, ffloor);
Alyssa Rosenzweigc6be9962019-02-23 01:12:10 +0000889 ALU_CASE(fround_even, froundeven);
890 ALU_CASE(ftrunc, ftrunc);
Alyssa Rosenzweig97dcad82019-02-07 03:39:25 +0000891 ALU_CASE(fceil, fceil);
892 ALU_CASE(fdot3, fdot3);
893 ALU_CASE(fdot4, fdot4);
894 ALU_CASE(iadd, iadd);
895 ALU_CASE(isub, isub);
896 ALU_CASE(imul, imul);
Alyssa Rosenzweig3e2cb212020-08-27 14:35:23 -0400897 ALU_CASE(imul_high, imul);
898 ALU_CASE(umul_high, imul);
Alyssa Rosenzweig9f14e202019-06-05 15:18:35 +0000899
900 /* Zero shoved as second-arg */
901 ALU_CASE(iabs, iabsdiff);
902
Italo Nicolac9192d12020-09-19 10:36:08 +0000903 ALU_CASE(uabs_isub, iabsdiff);
904 ALU_CASE(uabs_usub, uabsdiff);
905
Jason Ekstrandf2dc0f22019-05-06 11:45:46 -0500906 ALU_CASE(mov, imov);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000907
Italo Nicolacea032a2020-09-23 05:41:38 +0000908 ALU_CASE_CMP(feq32, feq);
909 ALU_CASE_CMP(fneu32, fne);
910 ALU_CASE_CMP(flt32, flt);
911 ALU_CASE_CMP(ieq32, ieq);
912 ALU_CASE_CMP(ine32, ine);
913 ALU_CASE_CMP(ilt32, ilt);
914 ALU_CASE_CMP(ult32, ult);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000915
Alyssa Rosenzweig3208c9d2019-03-25 01:13:12 +0000916 /* We don't have a native b2f32 instruction. Instead, like many
917 * GPUs, we exploit booleans as 0/~0 for false/true, and
918 * correspondingly AND
919 * by 1.0 to do the type conversion. For the moment, prime us
920 * to emit:
921 *
922 * iand [whatever], #0
923 *
924 * At the end of emit_alu (as MIR), we'll fix-up the constant
925 */
926
Italo Nicolacea032a2020-09-23 05:41:38 +0000927 ALU_CASE_CMP(b2f32, iand);
928 ALU_CASE_CMP(b2f16, iand);
929 ALU_CASE_CMP(b2i32, iand);
Alyssa Rosenzweig3208c9d2019-03-25 01:13:12 +0000930
Alyssa Rosenzweigae43b8f2019-03-25 00:53:46 +0000931 /* Likewise, we don't have a dedicated f2b32 instruction, but
Alyssa Rosenzweig3208c9d2019-03-25 01:13:12 +0000932 * we can do a "not equal to 0.0" test. */
Alyssa Rosenzweigae43b8f2019-03-25 00:53:46 +0000933
Italo Nicolacea032a2020-09-23 05:41:38 +0000934 ALU_CASE_CMP(f2b32, fne);
935 ALU_CASE_CMP(i2b32, ine);
Alyssa Rosenzweigae43b8f2019-03-25 00:53:46 +0000936
Alyssa Rosenzweig97dcad82019-02-07 03:39:25 +0000937 ALU_CASE(frcp, frcp);
938 ALU_CASE(frsq, frsqrt);
939 ALU_CASE(fsqrt, fsqrt);
940 ALU_CASE(fexp2, fexp2);
941 ALU_CASE(flog2, flog2);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000942
Alyssa Rosenzweig93513cd2020-05-25 14:19:11 -0400943 ALU_CASE_RTZ(f2i64, f2i_rte);
944 ALU_CASE_RTZ(f2u64, f2u_rte);
945 ALU_CASE_RTZ(i2f64, i2f_rte);
946 ALU_CASE_RTZ(u2f64, u2f_rte);
Boris Brezillonfcceeaf2020-01-20 22:05:14 +0100947
Alyssa Rosenzweig93513cd2020-05-25 14:19:11 -0400948 ALU_CASE_RTZ(f2i32, f2i_rte);
949 ALU_CASE_RTZ(f2u32, f2u_rte);
950 ALU_CASE_RTZ(i2f32, i2f_rte);
951 ALU_CASE_RTZ(u2f32, u2f_rte);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000952
Alyssa Rosenzweig0ae01412020-05-25 14:46:40 -0400953 ALU_CASE_RTZ(f2i8, f2i_rte);
954 ALU_CASE_RTZ(f2u8, f2u_rte);
955
Alyssa Rosenzweig93513cd2020-05-25 14:19:11 -0400956 ALU_CASE_RTZ(f2i16, f2i_rte);
957 ALU_CASE_RTZ(f2u16, f2u_rte);
958 ALU_CASE_RTZ(i2f16, i2f_rte);
959 ALU_CASE_RTZ(u2f16, u2f_rte);
Alyssa Rosenzweigd8c084d2019-07-01 17:41:20 -0700960
Alyssa Rosenzweig97dcad82019-02-07 03:39:25 +0000961 ALU_CASE(fsin, fsin);
962 ALU_CASE(fcos, fcos);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000963
Alyssa Rosenzweig449e5de2020-04-30 13:46:35 -0400964 /* We'll get 0 in the second arg, so:
965 * ~a = ~(a | 0) = nor(a, 0) */
966 ALU_CASE(inot, inor);
Alyssa Rosenzweig97dcad82019-02-07 03:39:25 +0000967 ALU_CASE(iand, iand);
968 ALU_CASE(ior, ior);
969 ALU_CASE(ixor, ixor);
Alyssa Rosenzweig97dcad82019-02-07 03:39:25 +0000970 ALU_CASE(ishl, ishl);
971 ALU_CASE(ishr, iasr);
972 ALU_CASE(ushr, ilsr);
973
Alyssa Rosenzweig195e2972019-06-19 07:23:27 -0700974 ALU_CASE_BCAST(b32all_fequal2, fball_eq, 2);
975 ALU_CASE_BCAST(b32all_fequal3, fball_eq, 3);
Italo Nicolacea032a2020-09-23 05:41:38 +0000976 ALU_CASE_CMP(b32all_fequal4, fball_eq);
Alyssa Rosenzweig53664102019-03-25 00:12:06 +0000977
Alyssa Rosenzweig195e2972019-06-19 07:23:27 -0700978 ALU_CASE_BCAST(b32any_fnequal2, fbany_neq, 2);
979 ALU_CASE_BCAST(b32any_fnequal3, fbany_neq, 3);
Italo Nicolacea032a2020-09-23 05:41:38 +0000980 ALU_CASE_CMP(b32any_fnequal4, fbany_neq);
Alyssa Rosenzweig53664102019-03-25 00:12:06 +0000981
Alyssa Rosenzweig195e2972019-06-19 07:23:27 -0700982 ALU_CASE_BCAST(b32all_iequal2, iball_eq, 2);
983 ALU_CASE_BCAST(b32all_iequal3, iball_eq, 3);
Italo Nicolacea032a2020-09-23 05:41:38 +0000984 ALU_CASE_CMP(b32all_iequal4, iball_eq);
Alyssa Rosenzweig53664102019-03-25 00:12:06 +0000985
Alyssa Rosenzweig195e2972019-06-19 07:23:27 -0700986 ALU_CASE_BCAST(b32any_inequal2, ibany_neq, 2);
987 ALU_CASE_BCAST(b32any_inequal3, ibany_neq, 3);
Italo Nicolacea032a2020-09-23 05:41:38 +0000988 ALU_CASE_CMP(b32any_inequal4, ibany_neq);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +0000989
Alyssa Rosenzweig659aa3d2019-05-26 03:16:37 +0000990 /* Source mods will be shoved in later */
991 ALU_CASE(fabs, fmov);
992 ALU_CASE(fneg, fmov);
993 ALU_CASE(fsat, fmov);
Alyssa Rosenzweig24e2e242020-05-04 16:12:41 -0400994 ALU_CASE(fsat_signed, fmov);
995 ALU_CASE(fclamp_pos, fmov);
Alyssa Rosenzweig659aa3d2019-05-26 03:16:37 +0000996
Alyssa Rosenzweig4df80ca2019-07-01 15:26:22 -0700997 /* For size conversion, we use a move. Ideally though we would squash
998 * these ops together; maybe that has to happen after in NIR as part of
999 * propagation...? An earlier algebraic pass ensured we step down by
Alyssa Rosenzweig7f807ef2019-07-01 16:44:00 -07001000 * only / exactly one size. If stepping down, we use a dest override to
1001 * reduce the size; if stepping up, we use a larger-sized move with a
1002 * half source and a sign/zero-extension modifier */
Alyssa Rosenzweig4df80ca2019-07-01 15:26:22 -07001003
Alyssa Rosenzweig7f807ef2019-07-01 16:44:00 -07001004 case nir_op_i2i8:
1005 case nir_op_i2i16:
1006 case nir_op_i2i32:
Alyssa Rosenzweig2655a302019-11-04 22:21:20 -05001007 case nir_op_i2i64:
Alyssa Rosenzweig4df80ca2019-07-01 15:26:22 -07001008 case nir_op_u2u8:
1009 case nir_op_u2u16:
Alyssa Rosenzweig2655a302019-11-04 22:21:20 -05001010 case nir_op_u2u32:
Boris Brezillonf53a0792020-01-20 16:03:52 +01001011 case nir_op_u2u64:
1012 case nir_op_f2f16:
Boris Brezillone1f9e8d2020-01-20 16:05:31 +01001013 case nir_op_f2f32:
1014 case nir_op_f2f64: {
1015 if (instr->op == nir_op_f2f16 || instr->op == nir_op_f2f32 ||
1016 instr->op == nir_op_f2f64)
Boris Brezillonf53a0792020-01-20 16:03:52 +01001017 op = midgard_alu_op_fmov;
1018 else
1019 op = midgard_alu_op_imov;
Alyssa Rosenzweig7f807ef2019-07-01 16:44:00 -07001020
Alyssa Rosenzweig4df80ca2019-07-01 15:26:22 -07001021 break;
1022 }
1023
Alyssa Rosenzweig7b78af82019-03-26 04:01:33 +00001024 /* For greater-or-equal, we lower to less-or-equal and flip the
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001025 * arguments */
1026
Alyssa Rosenzweig7b78af82019-03-26 04:01:33 +00001027 case nir_op_fge:
1028 case nir_op_fge32:
1029 case nir_op_ige32:
1030 case nir_op_uge32: {
1031 op =
1032 instr->op == nir_op_fge ? midgard_alu_op_fle :
1033 instr->op == nir_op_fge32 ? midgard_alu_op_fle :
1034 instr->op == nir_op_ige32 ? midgard_alu_op_ile :
1035 instr->op == nir_op_uge32 ? midgard_alu_op_ule :
1036 0;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001037
Alyssa Rosenzweig22bb5a92020-04-29 18:08:26 -04001038 flip_src12 = true;
Italo Nicolacea032a2020-09-23 05:41:38 +00001039 ALU_CHECK_CMP();
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001040 break;
1041 }
1042
Alyssa Rosenzweig3fb88422019-03-25 00:25:01 +00001043 case nir_op_b32csel: {
Alyssa Rosenzweig726f0262019-05-07 02:52:08 +00001044 bool mixed = nir_is_non_scalar_swizzle(&instr->src[0], nr_components);
Alyssa Rosenzweigd39f95b2020-05-04 15:45:47 -04001045 bool is_float = mir_is_bcsel_float(instr);
1046 op = is_float ?
1047 (mixed ? midgard_alu_op_fcsel_v : midgard_alu_op_fcsel) :
1048 (mixed ? midgard_alu_op_icsel_v : midgard_alu_op_icsel);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001049
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001050 break;
1051 }
1052
Alyssa Rosenzweig551d9902020-05-13 16:17:46 -04001053 case nir_op_unpack_32_2x16:
1054 case nir_op_unpack_32_4x8:
1055 case nir_op_pack_32_2x16:
1056 case nir_op_pack_32_4x8: {
1057 op = midgard_alu_op_imov;
1058 break;
1059 }
1060
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001061 default:
Tomeu Vizosof0b1bbe2019-03-08 15:04:50 +01001062 DBG("Unhandled ALU op %s\n", nir_op_infos[instr->op].name);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001063 assert(0);
1064 return;
1065 }
1066
Alyssa Rosenzweig72c1e3a2020-05-21 12:31:40 -04001067 /* Promote imov to fmov if it might help inline a constant */
1068 if (op == midgard_alu_op_imov && nir_src_is_const(instr->src[0].src)
1069 && nir_src_bit_size(instr->src[0].src) == 32
1070 && nir_is_same_comp_swizzle(instr->src[0].swizzle,
1071 nir_src_num_components(instr->src[0].src))) {
1072 op = midgard_alu_op_fmov;
1073 }
1074
Alyssa Rosenzweig0a13bab2019-05-15 01:16:51 +00001075 /* Midgard can perform certain modifiers on output of an ALU op */
Alyssa Rosenzweig22bb5a92020-04-29 18:08:26 -04001076
1077 unsigned outmod = 0;
Alyssa Rosenzweig22bb5a92020-04-29 18:08:26 -04001078 bool is_int = midgard_is_integer_op(op);
Alyssa Rosenzweig7bc91b42019-04-24 23:42:30 +00001079
Alyssa Rosenzweig3e2cb212020-08-27 14:35:23 -04001080 if (instr->op == nir_op_umul_high || instr->op == nir_op_imul_high) {
1081 outmod = midgard_outmod_int_high;
1082 } else if (midgard_is_integer_out_op(op)) {
Alyssa Rosenzweig67804812019-06-05 15:17:45 -07001083 outmod = midgard_outmod_int_wrap;
Alyssa Rosenzweig22bb5a92020-04-29 18:08:26 -04001084 } else if (instr->op == nir_op_fsat) {
1085 outmod = midgard_outmod_sat;
1086 } else if (instr->op == nir_op_fsat_signed) {
1087 outmod = midgard_outmod_sat_signed;
1088 } else if (instr->op == nir_op_fclamp_pos) {
1089 outmod = midgard_outmod_pos;
Alyssa Rosenzweig67804812019-06-05 15:17:45 -07001090 }
Alyssa Rosenzweig659aa3d2019-05-26 03:16:37 +00001091
Alyssa Rosenzweig97dcad82019-02-07 03:39:25 +00001092 /* Fetch unit, quirks, etc information */
Alyssa Rosenzweig1f345bc2019-04-24 01:15:15 +00001093 unsigned opcode_props = alu_opcode_props[op].props;
Alyssa Rosenzweig97dcad82019-02-07 03:39:25 +00001094 bool quirk_flipped_r24 = opcode_props & QUIRK_FLIPPED_R24;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001095
Italo Nicola20969032020-07-13 16:19:08 +00001096 if (!midgard_is_integer_out_op(op)) {
Italo Nicola83592de2020-07-15 18:48:42 +00001097 outmod = mir_determine_float_outmod(ctx, &dest, outmod);
Alyssa Rosenzweig29afa882020-05-04 17:33:52 -04001098 }
1099
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001100 midgard_instruction ins = {
1101 .type = TAG_ALU_4,
Alyssa Rosenzweig22bb5a92020-04-29 18:08:26 -04001102 .dest = nir_dest_index(dest),
Alyssa Rosenzweigecf94662020-04-27 18:57:34 -04001103 .dest_type = nir_op_infos[instr->op].output_type
Alyssa Rosenzweig22bb5a92020-04-29 18:08:26 -04001104 | nir_dest_bit_size(*dest),
Alyssa Rosenzweig93513cd2020-05-25 14:19:11 -04001105 .roundmode = roundmode,
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001106 };
1107
Alyssa Rosenzweigf8b881f2020-05-25 14:19:24 -04001108 enum midgard_roundmode *roundptr = (opcode_props & MIDGARD_ROUNDS) ?
1109 &ins.roundmode : NULL;
1110
Alyssa Rosenzweig6757c482020-04-27 18:55:11 -04001111 for (unsigned i = nr_inputs; i < ARRAY_SIZE(ins.src); ++i)
1112 ins.src[i] = ~0;
1113
1114 if (quirk_flipped_r24) {
1115 ins.src[0] = ~0;
Alyssa Rosenzweigf8b881f2020-05-25 14:19:24 -04001116 mir_copy_src(&ins, instr, 0, 1, &ins.src_abs[1], &ins.src_neg[1], &ins.src_invert[1], roundptr, is_int, broadcast_swizzle);
Alyssa Rosenzweig6757c482020-04-27 18:55:11 -04001117 } else {
Alyssa Rosenzweig22bb5a92020-04-29 18:08:26 -04001118 for (unsigned i = 0; i < nr_inputs; ++i) {
1119 unsigned to = i;
1120
1121 if (instr->op == nir_op_b32csel) {
1122 /* The condition is the first argument; move
1123 * the other arguments up one to be a binary
1124 * instruction for Midgard with the condition
1125 * last */
1126
1127 if (i == 0)
1128 to = 2;
Alyssa Rosenzweig449e5de2020-04-30 13:46:35 -04001129 else if (flip_src12)
1130 to = 2 - i;
Alyssa Rosenzweig22bb5a92020-04-29 18:08:26 -04001131 else
1132 to = i - 1;
1133 } else if (flip_src12) {
1134 to = 1 - to;
1135 }
1136
Alyssa Rosenzweigf8b881f2020-05-25 14:19:24 -04001137 mir_copy_src(&ins, instr, i, to, &ins.src_abs[to], &ins.src_neg[to], &ins.src_invert[to], roundptr, is_int, broadcast_swizzle);
Alyssa Rosenzweig449e5de2020-04-30 13:46:35 -04001138
1139 /* (!c) ? a : b = c ? b : a */
1140 if (instr->op == nir_op_b32csel && ins.src_invert[2]) {
1141 ins.src_invert[2] = false;
1142 flip_src12 ^= true;
1143 }
Alyssa Rosenzweig22bb5a92020-04-29 18:08:26 -04001144 }
Alyssa Rosenzweig6757c482020-04-27 18:55:11 -04001145 }
1146
Alyssa Rosenzweig659aa3d2019-05-26 03:16:37 +00001147 if (instr->op == nir_op_fneg || instr->op == nir_op_fabs) {
Alyssa Rosenzweig22bb5a92020-04-29 18:08:26 -04001148 /* Lowered to move */
Alyssa Rosenzweig659aa3d2019-05-26 03:16:37 +00001149 if (instr->op == nir_op_fneg)
Alyssa Rosenzweig1cd65352020-05-21 12:38:27 -04001150 ins.src_neg[1] ^= true;
Alyssa Rosenzweig659aa3d2019-05-26 03:16:37 +00001151
1152 if (instr->op == nir_op_fabs)
Alyssa Rosenzweig1cd65352020-05-21 12:38:27 -04001153 ins.src_abs[1] = true;
Alyssa Rosenzweig659aa3d2019-05-26 03:16:37 +00001154 }
1155
Alyssa Rosenzweigf8b18a42019-07-01 18:51:48 -07001156 ins.mask = mask_of(nr_components);
1157
Alyssa Rosenzweig22bb5a92020-04-29 18:08:26 -04001158 /* Apply writemask if non-SSA, keeping in mind that we can't write to
1159 * components that don't exist. Note modifier => SSA => !reg => no
1160 * writemask, so we don't have to worry about writemasks here.*/
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001161
1162 if (!is_ssa)
Alyssa Rosenzweigf8b18a42019-07-01 18:51:48 -07001163 ins.mask &= instr->dest.write_mask;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001164
Italo Nicolaf4c89bf2020-07-09 12:02:57 +00001165 ins.op = op;
Italo Nicola50113732020-07-15 18:43:18 +00001166 ins.outmod = outmod;
Italo Nicolaf4c89bf2020-07-09 12:02:57 +00001167
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001168 /* Late fixup for emulated instructions */
1169
Alyssa Rosenzweig3208c9d2019-03-25 01:13:12 +00001170 if (instr->op == nir_op_b2f32 || instr->op == nir_op_b2i32) {
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001171 /* Presently, our second argument is an inline #0 constant.
1172 * Switch over to an embedded 1.0 constant (that can't fit
1173 * inline, since we're 32-bit, not 16-bit like the inline
1174 * constants) */
1175
Alyssa Rosenzweig75b6be22019-08-26 11:58:27 -07001176 ins.has_inline_constant = false;
1177 ins.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
Alyssa Rosenzweig074815c2020-04-29 16:29:01 -04001178 ins.src_types[1] = nir_type_float32;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001179 ins.has_constants = true;
Alyssa Rosenzweig9da46032019-03-24 16:07:31 +00001180
Boris Brezillon15c92d12020-01-20 15:00:57 +01001181 if (instr->op == nir_op_b2f32)
1182 ins.constants.f32[0] = 1.0f;
1183 else
1184 ins.constants.i32[0] = 1;
Alyssa Rosenzweig70072a22019-10-26 14:06:17 -04001185
1186 for (unsigned c = 0; c < 16; ++c)
1187 ins.swizzle[1][c] = 0;
Alyssa Rosenzweig6b023b32020-05-08 17:42:40 -04001188 } else if (instr->op == nir_op_b2f16) {
1189 ins.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
1190 ins.src_types[1] = nir_type_float16;
1191 ins.has_constants = true;
1192 ins.constants.i16[0] = _mesa_float_to_half(1.0);
1193
1194 for (unsigned c = 0; c < 16; ++c)
1195 ins.swizzle[1][c] = 0;
Alyssa Rosenzweig88c59792019-06-05 15:24:51 +00001196 } else if (nr_inputs == 1 && !quirk_flipped_r24) {
1197 /* Lots of instructions need a 0 plonked in */
Alyssa Rosenzweig75b6be22019-08-26 11:58:27 -07001198 ins.has_inline_constant = false;
1199 ins.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
Italo Nicolab1b0ce02020-07-10 14:51:52 +00001200 ins.src_types[1] = ins.src_types[0];
Alyssa Rosenzweig3208c9d2019-03-25 01:13:12 +00001201 ins.has_constants = true;
Boris Brezillon15c92d12020-01-20 15:00:57 +01001202 ins.constants.u32[0] = 0;
Alyssa Rosenzweig70072a22019-10-26 14:06:17 -04001203
1204 for (unsigned c = 0; c < 16; ++c)
1205 ins.swizzle[1][c] = 0;
Alyssa Rosenzweig551d9902020-05-13 16:17:46 -04001206 } else if (instr->op == nir_op_pack_32_2x16) {
1207 ins.dest_type = nir_type_uint16;
1208 ins.mask = mask_of(nr_components * 2);
Alyssa Rosenzweige9c780b2020-05-13 18:41:52 -04001209 ins.is_pack = true;
Alyssa Rosenzweig551d9902020-05-13 16:17:46 -04001210 } else if (instr->op == nir_op_pack_32_4x8) {
1211 ins.dest_type = nir_type_uint8;
1212 ins.mask = mask_of(nr_components * 4);
Alyssa Rosenzweige9c780b2020-05-13 18:41:52 -04001213 ins.is_pack = true;
Alyssa Rosenzweig551d9902020-05-13 16:17:46 -04001214 } else if (instr->op == nir_op_unpack_32_2x16) {
1215 ins.dest_type = nir_type_uint32;
1216 ins.mask = mask_of(nr_components >> 1);
Alyssa Rosenzweige9c780b2020-05-13 18:41:52 -04001217 ins.is_pack = true;
Alyssa Rosenzweig551d9902020-05-13 16:17:46 -04001218 } else if (instr->op == nir_op_unpack_32_4x8) {
1219 ins.dest_type = nir_type_uint32;
1220 ins.mask = mask_of(nr_components >> 2);
Alyssa Rosenzweige9c780b2020-05-13 18:41:52 -04001221 ins.is_pack = true;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001222 }
1223
Alyssa Rosenzweig97dcad82019-02-07 03:39:25 +00001224 if ((opcode_props & UNITS_ALL) == UNIT_VLUT) {
1225 /* To avoid duplicating the lookup tables (probably), true LUT
1226 * instructions can only operate as if they were scalars. Lower
1227 * them here by changing the component. */
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001228
Alyssa Rosenzweigf8b18a42019-07-01 18:51:48 -07001229 unsigned orig_mask = ins.mask;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001230
Alyssa Rosenzweig22bb5a92020-04-29 18:08:26 -04001231 unsigned swizzle_back[MIR_VEC_COMPONENTS];
1232 memcpy(&swizzle_back, ins.swizzle[0], sizeof(swizzle_back));
1233
Icecream95a6f0d7f2020-05-24 00:23:25 +12001234 midgard_instruction ins_split[MIR_VEC_COMPONENTS];
1235 unsigned ins_count = 0;
1236
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001237 for (int i = 0; i < nr_components; ++i) {
Alyssa Rosenzweig2c9e1242019-06-17 11:49:44 -07001238 /* Mask the associated component, dropping the
1239 * instruction if needed */
1240
Alyssa Rosenzweigf8b18a42019-07-01 18:51:48 -07001241 ins.mask = 1 << i;
1242 ins.mask &= orig_mask;
Alyssa Rosenzweig2c9e1242019-06-17 11:49:44 -07001243
Icecream95a6f0d7f2020-05-24 00:23:25 +12001244 for (unsigned j = 0; j < ins_count; ++j) {
1245 if (swizzle_back[i] == ins_split[j].swizzle[0][0]) {
1246 ins_split[j].mask |= ins.mask;
1247 ins.mask = 0;
1248 break;
1249 }
1250 }
1251
Alyssa Rosenzweigf8b18a42019-07-01 18:51:48 -07001252 if (!ins.mask)
Alyssa Rosenzweig2c9e1242019-06-17 11:49:44 -07001253 continue;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001254
Alyssa Rosenzweig70072a22019-10-26 14:06:17 -04001255 for (unsigned j = 0; j < MIR_VEC_COMPONENTS; ++j)
Alyssa Rosenzweig22bb5a92020-04-29 18:08:26 -04001256 ins.swizzle[0][j] = swizzle_back[i]; /* Pull from the correct component */
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001257
Icecream95a6f0d7f2020-05-24 00:23:25 +12001258 ins_split[ins_count] = ins;
1259
1260 ++ins_count;
1261 }
1262
1263 for (unsigned i = 0; i < ins_count; ++i) {
1264 emit_mir_instruction(ctx, ins_split[i]);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001265 }
1266 } else {
1267 emit_mir_instruction(ctx, ins);
1268 }
1269}
1270
Alyssa Rosenzweig97dcad82019-02-07 03:39:25 +00001271#undef ALU_CASE
1272
Alyssa Rosenzweig1798f6b2019-11-15 15:16:53 -05001273static void
1274mir_set_intr_mask(nir_instr *instr, midgard_instruction *ins, bool is_read)
Alyssa Rosenzweig65e6cb42019-08-13 09:11:48 -07001275{
1276 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
Alyssa Rosenzweig1798f6b2019-11-15 15:16:53 -05001277 unsigned nir_mask = 0;
1278 unsigned dsize = 0;
Alyssa Rosenzweig65e6cb42019-08-13 09:11:48 -07001279
Alyssa Rosenzweig1798f6b2019-11-15 15:16:53 -05001280 if (is_read) {
1281 nir_mask = mask_of(nir_intrinsic_dest_components(intr));
1282 dsize = nir_dest_bit_size(intr->dest);
1283 } else {
1284 nir_mask = nir_intrinsic_write_mask(intr);
1285 dsize = 32;
1286 }
1287
1288 /* Once we have the NIR mask, we need to normalize to work in 32-bit space */
Alyssa Rosenzweig9b8cb9f2020-03-09 20:19:29 -04001289 unsigned bytemask = pan_to_bytemask(dsize, nir_mask);
Alyssa Rosenzweigb91d7152020-05-11 15:06:53 -04001290 ins->dest_type = nir_type_uint | dsize;
Italo Nicola11012612020-08-26 14:56:13 +00001291 mir_set_bytemask(ins, bytemask);
Alyssa Rosenzweig65e6cb42019-08-13 09:11:48 -07001292}
1293
Alyssa Rosenzweig5e2c3d42019-06-20 15:51:31 -07001294/* Uniforms and UBOs use a shared code path, as uniforms are just (slightly
1295 * optimized) versions of UBO #0 */
1296
Alyssa Rosenzweig59d30fd2020-01-10 17:47:57 -05001297static midgard_instruction *
Alyssa Rosenzweig5e2c3d42019-06-20 15:51:31 -07001298emit_ubo_read(
Alyssa Rosenzweige4bd6fb2019-07-10 10:00:50 -07001299 compiler_context *ctx,
Alyssa Rosenzweig65e6cb42019-08-13 09:11:48 -07001300 nir_instr *instr,
Alyssa Rosenzweige4bd6fb2019-07-10 10:00:50 -07001301 unsigned dest,
1302 unsigned offset,
1303 nir_src *indirect_offset,
Alyssa Rosenzweig59d30fd2020-01-10 17:47:57 -05001304 unsigned indirect_shift,
Alyssa Rosenzweige4bd6fb2019-07-10 10:00:50 -07001305 unsigned index)
Alyssa Rosenzweig7e8de5a2019-04-03 01:48:09 +00001306{
1307 /* TODO: half-floats */
1308
Alyssa Rosenzweigbc9a7d02019-11-15 14:19:34 -05001309 midgard_instruction ins = m_ld_ubo_int4(dest, 0);
Boris Brezillon15c92d12020-01-20 15:00:57 +01001310 ins.constants.u32[0] = offset;
Alyssa Rosenzweigda736512019-12-19 11:12:25 -05001311
1312 if (instr->type == nir_instr_type_intrinsic)
1313 mir_set_intr_mask(instr, &ins, true);
Alyssa Rosenzweig3174bc92019-07-16 14:10:08 -07001314
1315 if (indirect_offset) {
Alyssa Rosenzweige7fd14c2019-10-26 15:50:38 -04001316 ins.src[2] = nir_src_index(ctx, indirect_offset);
Alyssa Rosenzweig074815c2020-04-29 16:29:01 -04001317 ins.src_types[2] = nir_type_uint32;
Alyssa Rosenzweig59d30fd2020-01-10 17:47:57 -05001318 ins.load_store.arg_2 = (indirect_shift << 5);
Alyssa Rosenzweig797fa872020-07-06 10:57:04 -04001319
1320 /* X component for the whole swizzle to prevent register
1321 * pressure from ballooning from the extra components */
1322 for (unsigned i = 0; i < ARRAY_SIZE(ins.swizzle[2]); ++i)
1323 ins.swizzle[2][i] = 0;
Alyssa Rosenzweig7e8de5a2019-04-03 01:48:09 +00001324 } else {
Alyssa Rosenzweigc9087722019-08-01 13:29:01 -07001325 ins.load_store.arg_2 = 0x1E;
Alyssa Rosenzweig7e8de5a2019-04-03 01:48:09 +00001326 }
Alyssa Rosenzweig3174bc92019-07-16 14:10:08 -07001327
Alyssa Rosenzweigc9087722019-08-01 13:29:01 -07001328 ins.load_store.arg_1 = index;
1329
Alyssa Rosenzweige7ac46b2019-08-02 17:09:54 -07001330 return emit_mir_instruction(ctx, ins);
Alyssa Rosenzweig7e8de5a2019-04-03 01:48:09 +00001331}
1332
Alyssa Rosenzweig3a310fb2020-02-05 15:17:44 -05001333/* Globals are like UBOs if you squint. And shared memory is like globals if
1334 * you squint even harder */
Alyssa Rosenzweig419ddd62019-08-01 10:03:02 -07001335
1336static void
Alyssa Rosenzweig31489372019-11-05 08:59:49 -05001337emit_global(
Alyssa Rosenzweig419ddd62019-08-01 10:03:02 -07001338 compiler_context *ctx,
1339 nir_instr *instr,
1340 bool is_read,
1341 unsigned srcdest,
Alyssa Rosenzweig0bb25e42020-02-27 09:41:17 -05001342 nir_src *offset,
Alyssa Rosenzweig3a310fb2020-02-05 15:17:44 -05001343 bool is_shared)
Alyssa Rosenzweig419ddd62019-08-01 10:03:02 -07001344{
1345 /* TODO: types */
1346
Dylan Baker8e369612018-09-14 12:57:32 -07001347 midgard_instruction ins;
Alyssa Rosenzweig419ddd62019-08-01 10:03:02 -07001348
1349 if (is_read)
Alyssa Rosenzweig31489372019-11-05 08:59:49 -05001350 ins = m_ld_int4(srcdest, 0);
Alyssa Rosenzweig419ddd62019-08-01 10:03:02 -07001351 else
Alyssa Rosenzweig31489372019-11-05 08:59:49 -05001352 ins = m_st_int4(srcdest, 0);
Alyssa Rosenzweig419ddd62019-08-01 10:03:02 -07001353
Alyssa Rosenzweig0bb25e42020-02-27 09:41:17 -05001354 mir_set_offset(ctx, &ins, offset, is_shared);
Alyssa Rosenzweig1798f6b2019-11-15 15:16:53 -05001355 mir_set_intr_mask(instr, &ins, is_read);
Alyssa Rosenzweig419ddd62019-08-01 10:03:02 -07001356
Alyssa Rosenzweig41184f82020-08-27 15:13:19 -04001357 /* Set a valid swizzle for masked out components */
1358 assert(ins.mask);
1359 unsigned first_component = __builtin_ffs(ins.mask) - 1;
1360
1361 for (unsigned i = 0; i < ARRAY_SIZE(ins.swizzle[0]); ++i) {
1362 if (!(ins.mask & (1 << i)))
1363 ins.swizzle[0][i] = first_component;
1364 }
1365
Alyssa Rosenzweig419ddd62019-08-01 10:03:02 -07001366 emit_mir_instruction(ctx, ins);
1367}
1368
Italo Nicola8e221f52020-08-31 11:17:48 +00001369/* If is_shared is off, the only other possible value are globals, since
1370 * SSBO's are being lowered to globals through a NIR pass. */
1371static void
1372emit_atomic(
1373 compiler_context *ctx,
1374 nir_intrinsic_instr *instr,
1375 bool is_shared,
1376 midgard_load_store_op op)
1377{
1378 unsigned bitsize = nir_src_bit_size(instr->src[1]);
1379 nir_alu_type type =
1380 (op == midgard_op_atomic_imin || op == midgard_op_atomic_imax) ?
1381 nir_type_int : nir_type_uint;
1382
1383 unsigned dest = nir_dest_index(&instr->dest);
1384 unsigned val = nir_src_index(ctx, &instr->src[1]);
1385 emit_explicit_constant(ctx, val, val);
1386
1387 midgard_instruction ins = {
1388 .type = TAG_LOAD_STORE_4,
1389 .mask = 0xF,
1390 .dest = dest,
1391 .src = { ~0, ~0, ~0, val },
1392 .src_types = { 0, 0, 0, type | bitsize },
1393 .op = op
1394 };
1395
1396 nir_src *src_offset = nir_get_io_offset_src(instr);
1397
1398 /* cmpxchg takes an extra value in arg_2, so we don't use it for the offset */
1399 if (op == midgard_op_atomic_cmpxchg) {
1400 unsigned addr = nir_src_index(ctx, src_offset);
1401
1402 ins.src[1] = addr;
1403 ins.src_types[1] = nir_type_uint | nir_src_bit_size(*src_offset);
1404
1405 unsigned xchg_val = nir_src_index(ctx, &instr->src[2]);
1406 emit_explicit_constant(ctx, xchg_val, xchg_val);
1407
1408 ins.src[2] = val;
1409 ins.src_types[2] = type | bitsize;
1410 ins.src[3] = xchg_val;
1411
1412 if (is_shared)
1413 ins.load_store.arg_1 |= 0x6E;
1414 } else {
1415 mir_set_offset(ctx, &ins, src_offset, is_shared);
1416 }
1417
1418 mir_set_intr_mask(&instr->instr, &ins, true);
1419
1420 emit_mir_instruction(ctx, ins);
1421}
1422
Alyssa Rosenzweig7e8de5a2019-04-03 01:48:09 +00001423static void
Alyssa Rosenzweig15fae1e2019-06-04 23:26:09 +00001424emit_varying_read(
Alyssa Rosenzweige4bd6fb2019-07-10 10:00:50 -07001425 compiler_context *ctx,
1426 unsigned dest, unsigned offset,
1427 unsigned nr_comp, unsigned component,
Alyssa Rosenzweigc17a4412019-12-27 15:32:50 -05001428 nir_src *indirect_offset, nir_alu_type type, bool flat)
Alyssa Rosenzweig15fae1e2019-06-04 23:26:09 +00001429{
1430 /* XXX: Half-floats? */
1431 /* TODO: swizzle, mask */
1432
1433 midgard_instruction ins = m_ld_vary_32(dest, offset);
Alyssa Rosenzweigf8b18a42019-07-01 18:51:48 -07001434 ins.mask = mask_of(nr_comp);
Alyssa Rosenzweig2d168832020-06-04 11:32:59 -04001435 ins.dest_type = type;
1436
1437 if (type == nir_type_float16) {
1438 /* Ensure we are aligned so we can pack it later */
1439 ins.mask = mask_of(ALIGN_POT(nr_comp, 2));
1440 }
Alyssa Rosenzweig70072a22019-10-26 14:06:17 -04001441
1442 for (unsigned i = 0; i < ARRAY_SIZE(ins.swizzle[0]); ++i)
1443 ins.swizzle[0][i] = MIN2(i + component, COMPONENT_W);
Alyssa Rosenzweig15fae1e2019-06-04 23:26:09 +00001444
1445 midgard_varying_parameter p = {
1446 .is_varying = 1,
1447 .interpolation = midgard_interp_default,
Alyssa Rosenzweigc17a4412019-12-27 15:32:50 -05001448 .flat = flat,
Alyssa Rosenzweig15fae1e2019-06-04 23:26:09 +00001449 };
1450
1451 unsigned u;
1452 memcpy(&u, &p, sizeof(p));
1453 ins.load_store.varying_parameters = u;
1454
Alyssa Rosenzweig074815c2020-04-29 16:29:01 -04001455 if (indirect_offset) {
Alyssa Rosenzweige7fd14c2019-10-26 15:50:38 -04001456 ins.src[2] = nir_src_index(ctx, indirect_offset);
Alyssa Rosenzweig074815c2020-04-29 16:29:01 -04001457 ins.src_types[2] = nir_type_uint32;
1458 } else
Alyssa Rosenzweigc9087722019-08-01 13:29:01 -07001459 ins.load_store.arg_2 = 0x1E;
Alyssa Rosenzweig15fae1e2019-06-04 23:26:09 +00001460
Alyssa Rosenzweigc9087722019-08-01 13:29:01 -07001461 ins.load_store.arg_1 = 0x9E;
1462
Alyssa Rosenzweig9b97ed12019-06-28 09:30:59 -07001463 /* Use the type appropriate load */
1464 switch (type) {
Alyssa Rosenzweig5f8dd412020-05-22 16:22:48 -04001465 case nir_type_uint32:
1466 case nir_type_bool32:
Italo Nicolabea6a652020-07-23 19:24:39 +00001467 ins.op = midgard_op_ld_vary_32u;
Alyssa Rosenzweige4bd6fb2019-07-10 10:00:50 -07001468 break;
Alyssa Rosenzweig5f8dd412020-05-22 16:22:48 -04001469 case nir_type_int32:
Italo Nicolabea6a652020-07-23 19:24:39 +00001470 ins.op = midgard_op_ld_vary_32i;
Alyssa Rosenzweige4bd6fb2019-07-10 10:00:50 -07001471 break;
Alyssa Rosenzweig5f8dd412020-05-22 16:22:48 -04001472 case nir_type_float32:
Italo Nicolabea6a652020-07-23 19:24:39 +00001473 ins.op = midgard_op_ld_vary_32;
Alyssa Rosenzweige4bd6fb2019-07-10 10:00:50 -07001474 break;
Alyssa Rosenzweig5f8dd412020-05-22 16:22:48 -04001475 case nir_type_float16:
Italo Nicolabea6a652020-07-23 19:24:39 +00001476 ins.op = midgard_op_ld_vary_16;
Alyssa Rosenzweig5f8dd412020-05-22 16:22:48 -04001477 break;
Alyssa Rosenzweige4bd6fb2019-07-10 10:00:50 -07001478 default:
1479 unreachable("Attempted to load unknown type");
1480 break;
Alyssa Rosenzweig9b97ed12019-06-28 09:30:59 -07001481 }
1482
Alyssa Rosenzweig15fae1e2019-06-04 23:26:09 +00001483 emit_mir_instruction(ctx, ins);
1484}
1485
Alyssa Rosenzweig6e688902019-12-19 13:24:17 -05001486static void
1487emit_attr_read(
1488 compiler_context *ctx,
1489 unsigned dest, unsigned offset,
1490 unsigned nr_comp, nir_alu_type t)
1491{
1492 midgard_instruction ins = m_ld_attr_32(dest, offset);
1493 ins.load_store.arg_1 = 0x1E;
1494 ins.load_store.arg_2 = 0x1E;
1495 ins.mask = mask_of(nr_comp);
1496
1497 /* Use the type appropriate load */
1498 switch (t) {
1499 case nir_type_uint:
1500 case nir_type_bool:
Italo Nicolabea6a652020-07-23 19:24:39 +00001501 ins.op = midgard_op_ld_attr_32u;
Alyssa Rosenzweig6e688902019-12-19 13:24:17 -05001502 break;
1503 case nir_type_int:
Italo Nicolabea6a652020-07-23 19:24:39 +00001504 ins.op = midgard_op_ld_attr_32i;
Alyssa Rosenzweig6e688902019-12-19 13:24:17 -05001505 break;
1506 case nir_type_float:
Italo Nicolabea6a652020-07-23 19:24:39 +00001507 ins.op = midgard_op_ld_attr_32;
Alyssa Rosenzweig6e688902019-12-19 13:24:17 -05001508 break;
1509 default:
1510 unreachable("Attempted to load unknown type");
1511 break;
1512 }
1513
1514 emit_mir_instruction(ctx, ins);
1515}
1516
Alyssa Rosenzweigfcbb3d42020-02-04 09:46:17 -05001517static void
Alyssa Rosenzweigb756a662020-03-10 16:19:33 -04001518emit_sysval_read(compiler_context *ctx, nir_instr *instr,
Alyssa Rosenzweigfcbb3d42020-02-04 09:46:17 -05001519 unsigned nr_components, unsigned offset)
Alyssa Rosenzweig7e8de5a2019-04-03 01:48:09 +00001520{
Alyssa Rosenzweig674b24d2020-03-10 15:54:17 -04001521 nir_dest nir_dest;
Alyssa Rosenzweig6d8490f2019-07-11 15:34:56 -07001522
Boris Brezillonbd49c8f2019-06-14 09:59:20 +02001523 /* Figure out which uniform this is */
Alyssa Rosenzweige6102672020-03-10 16:06:30 -04001524 int sysval = panfrost_sysval_for_instr(instr, &nir_dest);
Alyssa Rosenzweigc2ff3bb2020-03-10 16:00:56 -04001525 void *val = _mesa_hash_table_u64_search(ctx->sysvals.sysval_to_id, sysval);
Alyssa Rosenzweig7e8de5a2019-04-03 01:48:09 +00001526
Alyssa Rosenzweig674b24d2020-03-10 15:54:17 -04001527 unsigned dest = nir_dest_index(&nir_dest);
1528
Alyssa Rosenzweig7e8de5a2019-04-03 01:48:09 +00001529 /* Sysvals are prefix uniforms */
1530 unsigned uniform = ((uintptr_t) val) - 1;
1531
Alyssa Rosenzweig6a466c02019-04-20 23:52:42 +00001532 /* Emit the read itself -- this is never indirect */
Alyssa Rosenzweig63e240d2019-08-02 17:10:18 -07001533 midgard_instruction *ins =
Alyssa Rosenzweigfcbb3d42020-02-04 09:46:17 -05001534 emit_ubo_read(ctx, instr, dest, (uniform * 16) + offset, NULL, 0, 0);
Alyssa Rosenzweig63e240d2019-08-02 17:10:18 -07001535
1536 ins->mask = mask_of(nr_components);
Alyssa Rosenzweig7e8de5a2019-04-03 01:48:09 +00001537}
1538
Alyssa Rosenzweig7229af72019-08-06 13:47:17 -07001539static unsigned
1540compute_builtin_arg(nir_op op)
1541{
1542 switch (op) {
1543 case nir_intrinsic_load_work_group_id:
1544 return 0x14;
1545 case nir_intrinsic_load_local_invocation_id:
1546 return 0x10;
1547 default:
1548 unreachable("Invalid compute paramater loaded");
1549 }
1550}
1551
Alyssa Rosenzweigdff49862019-08-12 12:36:46 -07001552static void
Icecream95a6806342020-06-06 15:41:51 +12001553emit_fragment_store(compiler_context *ctx, unsigned src, unsigned src_z, unsigned src_s, enum midgard_rt_id rt)
Alyssa Rosenzweigdff49862019-08-12 12:36:46 -07001554{
Boris Brezillone1ba0cd2020-01-31 10:05:16 +01001555 assert(rt < ARRAY_SIZE(ctx->writeout_branch));
1556
1557 midgard_instruction *br = ctx->writeout_branch[rt];
1558
1559 assert(!br);
1560
Alyssa Rosenzweig5e06d902019-08-30 11:06:33 -07001561 emit_explicit_constant(ctx, src, src);
Alyssa Rosenzweigdff49862019-08-12 12:36:46 -07001562
Alyssa Rosenzweigdff49862019-08-12 12:36:46 -07001563 struct midgard_instruction ins =
Alyssa Rosenzweig02f503e2019-12-30 18:53:04 -05001564 v_branch(false, false);
1565
Icecream9592d3f1f2020-06-06 15:08:06 +12001566 bool depth_only = (rt == MIDGARD_ZS_RT);
1567
Icecream95a6806342020-06-06 15:41:51 +12001568 ins.writeout = depth_only ? 0 : PAN_WRITEOUT_C;
Alyssa Rosenzweigdff49862019-08-12 12:36:46 -07001569
1570 /* Add dependencies */
Alyssa Rosenzweig76529832019-08-30 11:01:15 -07001571 ins.src[0] = src;
Alyssa Rosenzweig074815c2020-04-29 16:29:01 -04001572 ins.src_types[0] = nir_type_uint32;
Icecream9592d3f1f2020-06-06 15:08:06 +12001573 ins.constants.u32[0] = depth_only ? 0xFF : (rt - MIDGARD_COLOR_RT0) * 0x100;
Icecream952a5504f2020-06-06 14:42:18 +12001574 for (int i = 0; i < 4; ++i)
1575 ins.swizzle[0][i] = i;
Alyssa Rosenzweigdff49862019-08-12 12:36:46 -07001576
Icecream95a6806342020-06-06 15:41:51 +12001577 if (~src_z) {
1578 emit_explicit_constant(ctx, src_z, src_z);
1579 ins.src[2] = src_z;
1580 ins.src_types[2] = nir_type_uint32;
1581 ins.writeout |= PAN_WRITEOUT_Z;
1582 }
1583 if (~src_s) {
1584 emit_explicit_constant(ctx, src_s, src_s);
1585 ins.src[3] = src_s;
1586 ins.src_types[3] = nir_type_uint32;
1587 ins.writeout |= PAN_WRITEOUT_S;
1588 }
1589
Alyssa Rosenzweigdff49862019-08-12 12:36:46 -07001590 /* Emit the branch */
Boris Brezillone1ba0cd2020-01-31 10:05:16 +01001591 br = emit_mir_instruction(ctx, ins);
Alyssa Rosenzweig281cc6f2019-11-23 12:43:55 -05001592 schedule_barrier(ctx);
Alyssa Rosenzweig5bc62af2020-01-02 12:27:59 -05001593 ctx->writeout_branch[rt] = br;
1594
1595 /* Push our current location = current block count - 1 = where we'll
1596 * jump to. Maybe a bit too clever for my own good */
1597
1598 br->branch.target_block = ctx->block_count - 1;
Alyssa Rosenzweigdff49862019-08-12 12:36:46 -07001599}
1600
Alyssa Rosenzweig7229af72019-08-06 13:47:17 -07001601static void
1602emit_compute_builtin(compiler_context *ctx, nir_intrinsic_instr *instr)
1603{
Alyssa Rosenzweig7c2647f2020-03-10 15:48:52 -04001604 unsigned reg = nir_dest_index(&instr->dest);
Alyssa Rosenzweig7229af72019-08-06 13:47:17 -07001605 midgard_instruction ins = m_ld_compute_id(reg, 0);
1606 ins.mask = mask_of(3);
Alyssa Rosenzweigd3747fb2020-02-12 08:39:29 -05001607 ins.swizzle[0][3] = COMPONENT_X; /* xyzx */
Alyssa Rosenzweig7229af72019-08-06 13:47:17 -07001608 ins.load_store.arg_1 = compute_builtin_arg(instr->intrinsic);
1609 emit_mir_instruction(ctx, ins);
1610}
Alyssa Rosenzweig306800d2019-12-19 13:31:21 -05001611
1612static unsigned
1613vertex_builtin_arg(nir_op op)
1614{
1615 switch (op) {
1616 case nir_intrinsic_load_vertex_id:
1617 return PAN_VERTEX_ID;
1618 case nir_intrinsic_load_instance_id:
1619 return PAN_INSTANCE_ID;
1620 default:
1621 unreachable("Invalid vertex builtin");
1622 }
1623}
1624
1625static void
1626emit_vertex_builtin(compiler_context *ctx, nir_intrinsic_instr *instr)
1627{
Alyssa Rosenzweig7c2647f2020-03-10 15:48:52 -04001628 unsigned reg = nir_dest_index(&instr->dest);
Alyssa Rosenzweig306800d2019-12-19 13:31:21 -05001629 emit_attr_read(ctx, reg, vertex_builtin_arg(instr->intrinsic), 1, nir_type_int);
1630}
1631
Alyssa Rosenzweig3f590982020-02-03 20:23:41 -05001632static void
Alyssa Rosenzweig80ebf112020-08-27 19:55:53 -04001633emit_special(compiler_context *ctx, nir_intrinsic_instr *instr, unsigned idx)
Alyssa Rosenzweigda2eed32020-07-15 09:56:24 -04001634{
1635 unsigned reg = nir_dest_index(&instr->dest);
1636
1637 midgard_instruction ld = m_ld_color_buffer_32u(reg, 0);
Italo Nicolabea6a652020-07-23 19:24:39 +00001638 ld.op = midgard_op_ld_color_buffer_32u_old;
Alyssa Rosenzweig80ebf112020-08-27 19:55:53 -04001639 ld.load_store.address = idx;
Alyssa Rosenzweigda2eed32020-07-15 09:56:24 -04001640 ld.load_store.arg_2 = 0x1E;
1641
1642 for (int i = 0; i < 4; ++i)
1643 ld.swizzle[0][i] = COMPONENT_X;
1644
1645 emit_mir_instruction(ctx, ld);
1646}
1647
1648static void
Alyssa Rosenzweig3f590982020-02-03 20:23:41 -05001649emit_control_barrier(compiler_context *ctx)
1650{
1651 midgard_instruction ins = {
1652 .type = TAG_TEXTURE_4,
Alyssa Rosenzweigfde1f2b2020-05-13 11:05:34 -04001653 .dest = ~0,
Alyssa Rosenzweig3f590982020-02-03 20:23:41 -05001654 .src = { ~0, ~0, ~0, ~0 },
Italo Nicola92c808c2020-07-29 19:10:25 +00001655 .op = TEXTURE_OP_BARRIER,
Alyssa Rosenzweig3f590982020-02-03 20:23:41 -05001656 };
1657
1658 emit_mir_instruction(ctx, ins);
1659}
1660
Alyssa Rosenzweigdb7b0eb2020-04-30 14:17:06 -04001661static unsigned
1662mir_get_branch_cond(nir_src *src, bool *invert)
1663{
1664 /* Wrap it. No swizzle since it's a scalar */
1665
1666 nir_alu_src alu = {
1667 .src = *src
1668 };
1669
1670 *invert = pan_has_source_mod(&alu, nir_op_inot);
1671 return nir_src_index(NULL, &alu.src);
1672}
1673
Icecream957781d2c2020-07-06 19:54:56 +12001674static uint8_t
Icecream95e7641922020-07-19 22:31:26 +12001675output_load_rt_addr(compiler_context *ctx, nir_intrinsic_instr *instr)
Icecream957781d2c2020-07-06 19:54:56 +12001676{
Icecream95e7641922020-07-19 22:31:26 +12001677 if (ctx->is_blend)
1678 return ctx->blend_rt;
1679
Icecream957781d2c2020-07-06 19:54:56 +12001680 const nir_variable *var;
Jason Ekstrand94f0bae2020-07-20 16:07:11 -05001681 var = search_var(ctx->nir, nir_var_shader_out, nir_intrinsic_base(instr));
Icecream957781d2c2020-07-06 19:54:56 +12001682 assert(var);
1683
1684 unsigned loc = var->data.location;
1685
1686 if (loc == FRAG_RESULT_COLOR)
1687 loc = FRAG_RESULT_DATA0;
1688
1689 if (loc >= FRAG_RESULT_DATA0)
1690 return loc - FRAG_RESULT_DATA0;
1691
1692 if (loc == FRAG_RESULT_DEPTH)
1693 return 0x1F;
1694 if (loc == FRAG_RESULT_STENCIL)
1695 return 0x1E;
1696
Icecream956493d292020-07-14 15:06:09 +12001697 unreachable("Invalid RT to load from");
Icecream957781d2c2020-07-06 19:54:56 +12001698}
1699
Alyssa Rosenzweig7e8de5a2019-04-03 01:48:09 +00001700static void
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001701emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
1702{
Alyssa Rosenzweig5e2c3d42019-06-20 15:51:31 -07001703 unsigned offset = 0, reg;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001704
1705 switch (instr->intrinsic) {
1706 case nir_intrinsic_discard_if:
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001707 case nir_intrinsic_discard: {
Alyssa Rosenzweig779e1402019-02-17 23:24:39 +00001708 bool conditional = instr->intrinsic == nir_intrinsic_discard_if;
1709 struct midgard_instruction discard = v_branch(conditional, false);
1710 discard.branch.target_type = TARGET_DISCARD;
Alyssa Rosenzweigd6e4e362019-08-26 13:59:29 -07001711
Alyssa Rosenzweig074815c2020-04-29 16:29:01 -04001712 if (conditional) {
Alyssa Rosenzweigdb7b0eb2020-04-30 14:17:06 -04001713 discard.src[0] = mir_get_branch_cond(&instr->src[0],
1714 &discard.branch.invert_conditional);
Alyssa Rosenzweig074815c2020-04-29 16:29:01 -04001715 discard.src_types[0] = nir_type_uint32;
1716 }
Alyssa Rosenzweigd6e4e362019-08-26 13:59:29 -07001717
Alyssa Rosenzweig779e1402019-02-17 23:24:39 +00001718 emit_mir_instruction(ctx, discard);
Alyssa Rosenzweigcc2ba8e2019-08-30 10:53:13 -07001719 schedule_barrier(ctx);
1720
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001721 break;
1722 }
1723
1724 case nir_intrinsic_load_uniform:
Alyssa Rosenzweig5e2c3d42019-06-20 15:51:31 -07001725 case nir_intrinsic_load_ubo:
Alyssa Rosenzweig31489372019-11-05 08:59:49 -05001726 case nir_intrinsic_load_global:
Alyssa Rosenzweig3a310fb2020-02-05 15:17:44 -05001727 case nir_intrinsic_load_shared:
Alyssa Rosenzweigc17a4412019-12-27 15:32:50 -05001728 case nir_intrinsic_load_input:
1729 case nir_intrinsic_load_interpolated_input: {
Alyssa Rosenzweig5e2c3d42019-06-20 15:51:31 -07001730 bool is_uniform = instr->intrinsic == nir_intrinsic_load_uniform;
1731 bool is_ubo = instr->intrinsic == nir_intrinsic_load_ubo;
Alyssa Rosenzweig31489372019-11-05 08:59:49 -05001732 bool is_global = instr->intrinsic == nir_intrinsic_load_global;
Alyssa Rosenzweig3a310fb2020-02-05 15:17:44 -05001733 bool is_shared = instr->intrinsic == nir_intrinsic_load_shared;
Alyssa Rosenzweigc17a4412019-12-27 15:32:50 -05001734 bool is_flat = instr->intrinsic == nir_intrinsic_load_input;
1735 bool is_interp = instr->intrinsic == nir_intrinsic_load_interpolated_input;
Alyssa Rosenzweig5e2c3d42019-06-20 15:51:31 -07001736
Alyssa Rosenzweigbbc050b2019-06-27 15:33:07 -07001737 /* Get the base type of the intrinsic */
Alyssa Rosenzweig8d747492019-06-27 14:13:10 -07001738 /* TODO: Infer type? Does it matter? */
1739 nir_alu_type t =
Alyssa Rosenzweig3a310fb2020-02-05 15:17:44 -05001740 (is_ubo || is_global || is_shared) ? nir_type_uint :
Alyssa Rosenzweigc17a4412019-12-27 15:32:50 -05001741 (is_interp) ? nir_type_float :
Jason Ekstrand0aa08ae2020-09-30 21:20:53 -05001742 nir_intrinsic_dest_type(instr);
Alyssa Rosenzweigc17a4412019-12-27 15:32:50 -05001743
Alyssa Rosenzweigbbc050b2019-06-27 15:33:07 -07001744 t = nir_alu_type_get_base_type(t);
1745
Alyssa Rosenzweig31489372019-11-05 08:59:49 -05001746 if (!(is_ubo || is_global)) {
Alyssa Rosenzweig5e2c3d42019-06-20 15:51:31 -07001747 offset = nir_intrinsic_base(instr);
1748 }
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001749
Alyssa Rosenzweigc1715b52019-05-22 02:44:12 +00001750 unsigned nr_comp = nir_intrinsic_dest_components(instr);
Alyssa Rosenzweig6a466c02019-04-20 23:52:42 +00001751
Alyssa Rosenzweig5e2c3d42019-06-20 15:51:31 -07001752 nir_src *src_offset = nir_get_io_offset_src(instr);
1753
1754 bool direct = nir_src_is_const(*src_offset);
Alyssa Rosenzweig419ddd62019-08-01 10:03:02 -07001755 nir_src *indirect_offset = direct ? NULL : src_offset;
Alyssa Rosenzweig5e2c3d42019-06-20 15:51:31 -07001756
1757 if (direct)
1758 offset += nir_src_as_uint(*src_offset);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001759
Alyssa Rosenzweig43568f22019-06-06 08:16:04 -07001760 /* We may need to apply a fractional offset */
Alyssa Rosenzweigc17a4412019-12-27 15:32:50 -05001761 int component = (is_flat || is_interp) ?
Alyssa Rosenzweige4bd6fb2019-07-10 10:00:50 -07001762 nir_intrinsic_component(instr) : 0;
Alyssa Rosenzweig7c2647f2020-03-10 15:48:52 -04001763 reg = nir_dest_index(&instr->dest);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001764
Alyssa Rosenzweig5e2c3d42019-06-20 15:51:31 -07001765 if (is_uniform && !ctx->is_blend) {
Alyssa Rosenzweigc2ff3bb2020-03-10 16:00:56 -04001766 emit_ubo_read(ctx, &instr->instr, reg, (ctx->sysvals.sysval_count + offset) * 16, indirect_offset, 4, 0);
Alyssa Rosenzweig5e2c3d42019-06-20 15:51:31 -07001767 } else if (is_ubo) {
1768 nir_src index = instr->src[0];
1769
Alyssa Rosenzweig59d30fd2020-01-10 17:47:57 -05001770 /* TODO: Is indirect block number possible? */
Alyssa Rosenzweig5e2c3d42019-06-20 15:51:31 -07001771 assert(nir_src_is_const(index));
Alyssa Rosenzweig5e2c3d42019-06-20 15:51:31 -07001772
Alyssa Rosenzweig5e2c3d42019-06-20 15:51:31 -07001773 uint32_t uindex = nir_src_as_uint(index) + 1;
Alyssa Rosenzweig59d30fd2020-01-10 17:47:57 -05001774 emit_ubo_read(ctx, &instr->instr, reg, offset, indirect_offset, 0, uindex);
Alyssa Rosenzweig3a310fb2020-02-05 15:17:44 -05001775 } else if (is_global || is_shared) {
Alyssa Rosenzweig0bb25e42020-02-27 09:41:17 -05001776 emit_global(ctx, &instr->instr, true, reg, src_offset, is_shared);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001777 } else if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->is_blend) {
Alyssa Rosenzweig5f8dd412020-05-22 16:22:48 -04001778 emit_varying_read(ctx, reg, offset, nr_comp, component, indirect_offset, t | nir_dest_bit_size(instr->dest), is_flat);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001779 } else if (ctx->is_blend) {
Icecream9585954ec2020-06-25 22:21:50 +12001780 /* ctx->blend_input will be precoloured to r0/r2, where
Alyssa Rosenzweig277b6162020-06-12 16:45:24 -04001781 * the input is preloaded */
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001782
Icecream9585954ec2020-06-25 22:21:50 +12001783 unsigned *input = offset ? &ctx->blend_src1 : &ctx->blend_input;
1784
1785 if (*input == ~0)
1786 *input = reg;
Alyssa Rosenzweig277b6162020-06-12 16:45:24 -04001787 else
Icecream9585954ec2020-06-25 22:21:50 +12001788 emit_mir_instruction(ctx, v_mov(*input, reg));
Alyssa Rosenzweig6e688902019-12-19 13:24:17 -05001789 } else if (ctx->stage == MESA_SHADER_VERTEX) {
1790 emit_attr_read(ctx, reg, offset, nr_comp, t);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001791 } else {
Tomeu Vizosof0b1bbe2019-03-08 15:04:50 +01001792 DBG("Unknown load\n");
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001793 assert(0);
1794 }
1795
1796 break;
Alyssa Rosenzweige4bd6fb2019-07-10 10:00:50 -07001797 }
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001798
Alyssa Rosenzweigc17a4412019-12-27 15:32:50 -05001799 /* Artefact of load_interpolated_input. TODO: other barycentric modes */
1800 case nir_intrinsic_load_barycentric_pixel:
Tomeu Vizoso25042062020-01-03 09:42:11 +01001801 case nir_intrinsic_load_barycentric_centroid:
Alyssa Rosenzweigc17a4412019-12-27 15:32:50 -05001802 break;
1803
Alyssa Rosenzweig1686ef82019-07-01 17:23:58 -07001804 /* Reads 128-bit value raw off the tilebuffer during blending, tasty */
1805
Alyssa Rosenzweig36af05b2020-06-01 14:14:33 -04001806 case nir_intrinsic_load_raw_output_pan: {
Alyssa Rosenzweig7c2647f2020-03-10 15:48:52 -04001807 reg = nir_dest_index(&instr->dest);
Alyssa Rosenzweig1686ef82019-07-01 17:23:58 -07001808
Alyssa Rosenzweig843874c2019-11-06 21:50:32 -05001809 /* T720 and below use different blend opcodes with slightly
1810 * different semantics than T760 and up */
1811
Alyssa Rosenzweig2d1e18e2020-01-02 12:28:54 -05001812 midgard_instruction ld = m_ld_color_buffer_32u(reg, 0);
Alyssa Rosenzweig843874c2019-11-06 21:50:32 -05001813
Icecream95e7641922020-07-19 22:31:26 +12001814 ld.load_store.arg_2 = output_load_rt_addr(ctx, instr);
Icecream957781d2c2020-07-06 19:54:56 +12001815
Icecream95c20d1662020-07-16 14:16:11 +12001816 if (nir_src_is_const(instr->src[0])) {
1817 ld.load_store.arg_1 = nir_src_as_uint(instr->src[0]);
1818 } else {
1819 ld.load_store.varying_parameters = 2;
1820 ld.src[1] = nir_src_index(ctx, &instr->src[0]);
1821 ld.src_types[1] = nir_type_int32;
1822 }
1823
Alyssa Rosenzweig36af05b2020-06-01 14:14:33 -04001824 if (ctx->quirks & MIDGARD_OLD_BLEND) {
Italo Nicolabea6a652020-07-23 19:24:39 +00001825 ld.op = midgard_op_ld_color_buffer_32u_old;
Alyssa Rosenzweig5a175e42020-05-29 21:11:11 -04001826 ld.load_store.address = 16;
1827 ld.load_store.arg_2 = 0x1E;
Alyssa Rosenzweig843874c2019-11-06 21:50:32 -05001828 }
1829
Alyssa Rosenzweig1a4153b2019-08-30 17:29:17 -07001830 emit_mir_instruction(ctx, ld);
Alyssa Rosenzweig39104222019-05-06 02:12:41 +00001831 break;
Alyssa Rosenzweig36af05b2020-06-01 14:14:33 -04001832 }
1833
1834 case nir_intrinsic_load_output: {
1835 reg = nir_dest_index(&instr->dest);
Alyssa Rosenzweig36af05b2020-06-01 14:14:33 -04001836
Icecream952fbe7ca2020-07-09 23:44:41 +12001837 unsigned bits = nir_dest_bit_size(instr->dest);
1838
1839 midgard_instruction ld;
1840 if (bits == 16)
1841 ld = m_ld_color_buffer_as_fp16(reg, 0);
1842 else
1843 ld = m_ld_color_buffer_as_fp32(reg, 0);
Alyssa Rosenzweig36af05b2020-06-01 14:14:33 -04001844
Icecream95e7641922020-07-19 22:31:26 +12001845 ld.load_store.arg_2 = output_load_rt_addr(ctx, instr);
Icecream957781d2c2020-07-06 19:54:56 +12001846
Alyssa Rosenzweig36af05b2020-06-01 14:14:33 -04001847 for (unsigned c = 4; c < 16; ++c)
1848 ld.swizzle[0][c] = 0;
1849
1850 if (ctx->quirks & MIDGARD_OLD_BLEND) {
Icecream952fbe7ca2020-07-09 23:44:41 +12001851 if (bits == 16)
Italo Nicolabea6a652020-07-23 19:24:39 +00001852 ld.op = midgard_op_ld_color_buffer_as_fp16_old;
Icecream952fbe7ca2020-07-09 23:44:41 +12001853 else
Italo Nicolabea6a652020-07-23 19:24:39 +00001854 ld.op = midgard_op_ld_color_buffer_as_fp32_old;
Alyssa Rosenzweig36af05b2020-06-01 14:14:33 -04001855 ld.load_store.address = 1;
1856 ld.load_store.arg_2 = 0x1E;
1857 }
1858
1859 emit_mir_instruction(ctx, ld);
1860 break;
1861 }
Alyssa Rosenzweig39104222019-05-06 02:12:41 +00001862
1863 case nir_intrinsic_load_blend_const_color_rgba: {
1864 assert(ctx->is_blend);
Alyssa Rosenzweig7c2647f2020-03-10 15:48:52 -04001865 reg = nir_dest_index(&instr->dest);
Alyssa Rosenzweig39104222019-05-06 02:12:41 +00001866
1867 /* Blend constants are embedded directly in the shader and
1868 * patched in, so we use some magic routing */
1869
Alyssa Rosenzweigc3a46e72019-10-30 16:29:28 -04001870 midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), reg);
Alyssa Rosenzweig39104222019-05-06 02:12:41 +00001871 ins.has_constants = true;
1872 ins.has_blend_constant = true;
1873 emit_mir_instruction(ctx, ins);
1874 break;
1875 }
1876
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001877 case nir_intrinsic_store_output:
Icecream95d37e9012020-06-06 17:25:08 +12001878 case nir_intrinsic_store_combined_output_pan:
Karol Herbst1aabb792019-03-29 21:40:45 +01001879 assert(nir_src_is_const(instr->src[1]) && "no indirect outputs");
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001880
Karol Herbst1aabb792019-03-29 21:40:45 +01001881 offset = nir_intrinsic_base(instr) + nir_src_as_uint(instr->src[1]);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001882
Alyssa Rosenzweig4ed23b12019-02-07 04:56:13 +00001883 reg = nir_src_index(ctx, &instr->src[0]);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001884
1885 if (ctx->stage == MESA_SHADER_FRAGMENT) {
Icecream95d37e9012020-06-06 17:25:08 +12001886 bool combined = instr->intrinsic ==
1887 nir_intrinsic_store_combined_output_pan;
1888
Boris Brezillonc68cd392020-01-31 09:22:50 +01001889 const nir_variable *var;
Jason Ekstrand94f0bae2020-07-20 16:07:11 -05001890 var = search_var(ctx->nir, nir_var_shader_out,
Boris Brezillonc68cd392020-01-31 09:22:50 +01001891 nir_intrinsic_base(instr));
1892 assert(var);
Icecream9585954ec2020-06-25 22:21:50 +12001893
1894 /* Dual-source blend writeout is done by leaving the
1895 * value in r2 for the blend shader to use. */
1896 if (var->data.index) {
1897 if (instr->src[0].is_ssa) {
1898 emit_explicit_constant(ctx, reg, reg);
1899
1900 unsigned out = make_compiler_temp(ctx);
1901
1902 midgard_instruction ins = v_mov(reg, out);
1903 emit_mir_instruction(ctx, ins);
1904
1905 ctx->blend_src1 = out;
1906 } else {
1907 ctx->blend_src1 = reg;
1908 }
1909
1910 break;
1911 }
1912
1913 enum midgard_rt_id rt;
Boris Brezillonc68cd392020-01-31 09:22:50 +01001914 if (var->data.location == FRAG_RESULT_COLOR)
1915 rt = MIDGARD_COLOR_RT0;
1916 else if (var->data.location >= FRAG_RESULT_DATA0)
1917 rt = MIDGARD_COLOR_RT0 + var->data.location -
1918 FRAG_RESULT_DATA0;
Icecream95d37e9012020-06-06 17:25:08 +12001919 else if (combined)
1920 rt = MIDGARD_ZS_RT;
Boris Brezillonc68cd392020-01-31 09:22:50 +01001921 else
Eric Anholt4c24c822020-08-25 10:15:27 -07001922 unreachable("bad rt");
Boris Brezillonc68cd392020-01-31 09:22:50 +01001923
Icecream95d37e9012020-06-06 17:25:08 +12001924 unsigned reg_z = ~0, reg_s = ~0;
1925 if (combined) {
1926 unsigned writeout = nir_intrinsic_component(instr);
1927 if (writeout & PAN_WRITEOUT_Z)
1928 reg_z = nir_src_index(ctx, &instr->src[2]);
1929 if (writeout & PAN_WRITEOUT_S)
1930 reg_s = nir_src_index(ctx, &instr->src[3]);
1931 }
1932
1933 emit_fragment_store(ctx, reg, reg_z, reg_s, rt);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001934 } else if (ctx->stage == MESA_SHADER_VERTEX) {
Icecream95d37e9012020-06-06 17:25:08 +12001935 assert(instr->intrinsic == nir_intrinsic_store_output);
1936
Alyssa Rosenzweiga3ae3cb2019-06-17 12:35:57 -07001937 /* We should have been vectorized, though we don't
1938 * currently check that st_vary is emitted only once
1939 * per slot (this is relevant, since there's not a mask
1940 * parameter available on the store [set to 0 by the
1941 * blob]). We do respect the component by adjusting the
Alyssa Rosenzweig233c0fa2019-07-24 12:54:59 -07001942 * swizzle. If this is a constant source, we'll need to
1943 * emit that explicitly. */
1944
1945 emit_explicit_constant(ctx, reg, reg);
Alyssa Rosenzweiga3ae3cb2019-06-17 12:35:57 -07001946
Boris Brezillon6af63c92020-01-16 11:20:06 +01001947 unsigned dst_component = nir_intrinsic_component(instr);
Alyssa Rosenzweig27887212019-08-15 16:53:03 -07001948 unsigned nr_comp = nir_src_num_components(instr->src[0]);
Alyssa Rosenzweigde8d49a2019-06-06 09:15:26 -07001949
Alyssa Rosenzweig233c0fa2019-07-24 12:54:59 -07001950 midgard_instruction st = m_st_vary_32(reg, offset);
Alyssa Rosenzweigc9087722019-08-01 13:29:01 -07001951 st.load_store.arg_1 = 0x9E;
1952 st.load_store.arg_2 = 0x1E;
Alyssa Rosenzweig70072a22019-10-26 14:06:17 -04001953
Jason Ekstrand0aa08ae2020-09-30 21:20:53 -05001954 switch (nir_alu_type_get_base_type(nir_intrinsic_src_type(instr))) {
Alyssa Rosenzweig66c26962019-12-27 14:25:00 -05001955 case nir_type_uint:
1956 case nir_type_bool:
Italo Nicolabea6a652020-07-23 19:24:39 +00001957 st.op = midgard_op_st_vary_32u;
Alyssa Rosenzweig66c26962019-12-27 14:25:00 -05001958 break;
1959 case nir_type_int:
Italo Nicolabea6a652020-07-23 19:24:39 +00001960 st.op = midgard_op_st_vary_32i;
Alyssa Rosenzweig66c26962019-12-27 14:25:00 -05001961 break;
1962 case nir_type_float:
Italo Nicolabea6a652020-07-23 19:24:39 +00001963 st.op = midgard_op_st_vary_32;
Alyssa Rosenzweig66c26962019-12-27 14:25:00 -05001964 break;
1965 default:
1966 unreachable("Attempted to store unknown type");
1967 break;
1968 }
1969
Boris Brezillon6af63c92020-01-16 11:20:06 +01001970 /* nir_intrinsic_component(store_intr) encodes the
1971 * destination component start. Source component offset
1972 * adjustment is taken care of in
1973 * install_registers_instr(), when offset_swizzle() is
1974 * called.
1975 */
1976 unsigned src_component = COMPONENT_X;
1977
1978 assert(nr_comp > 0);
1979 for (unsigned i = 0; i < ARRAY_SIZE(st.swizzle); ++i) {
1980 st.swizzle[0][i] = src_component;
1981 if (i >= dst_component && i < dst_component + nr_comp - 1)
1982 src_component++;
1983 }
Alyssa Rosenzweig70072a22019-10-26 14:06:17 -04001984
Alyssa Rosenzweig4aced182019-06-06 08:21:27 -07001985 emit_mir_instruction(ctx, st);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001986 } else {
Tomeu Vizosof0b1bbe2019-03-08 15:04:50 +01001987 DBG("Unknown store\n");
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00001988 assert(0);
1989 }
1990
1991 break;
1992
Alyssa Rosenzweig541b3292019-07-01 15:02:40 -07001993 /* Special case of store_output for lowered blend shaders */
1994 case nir_intrinsic_store_raw_output_pan:
1995 assert (ctx->stage == MESA_SHADER_FRAGMENT);
1996 reg = nir_src_index(ctx, &instr->src[0]);
Icecream95a6806342020-06-06 15:41:51 +12001997 emit_fragment_store(ctx, reg, ~0, ~0, ctx->blend_rt);
Alyssa Rosenzweig541b3292019-07-01 15:02:40 -07001998 break;
1999
Alyssa Rosenzweig31489372019-11-05 08:59:49 -05002000 case nir_intrinsic_store_global:
Alyssa Rosenzweig3a310fb2020-02-05 15:17:44 -05002001 case nir_intrinsic_store_shared:
Alyssa Rosenzweig419ddd62019-08-01 10:03:02 -07002002 reg = nir_src_index(ctx, &instr->src[0]);
Alyssa Rosenzweig419ddd62019-08-01 10:03:02 -07002003 emit_explicit_constant(ctx, reg, reg);
Alyssa Rosenzweig3a310fb2020-02-05 15:17:44 -05002004
Alyssa Rosenzweig0bb25e42020-02-27 09:41:17 -05002005 emit_global(ctx, &instr->instr, false, reg, &instr->src[1], instr->intrinsic == nir_intrinsic_store_shared);
Alyssa Rosenzweig419ddd62019-08-01 10:03:02 -07002006 break;
2007
Alyssa Rosenzweig31489372019-11-05 08:59:49 -05002008 case nir_intrinsic_load_ssbo_address:
Alyssa Rosenzweigb756a662020-03-10 16:19:33 -04002009 emit_sysval_read(ctx, &instr->instr, 1, 0);
Alyssa Rosenzweigfcbb3d42020-02-04 09:46:17 -05002010 break;
2011
Jason Ekstrand97501642020-09-22 03:24:45 -05002012 case nir_intrinsic_get_ssbo_size:
Alyssa Rosenzweigb756a662020-03-10 16:19:33 -04002013 emit_sysval_read(ctx, &instr->instr, 1, 8);
Alyssa Rosenzweig31489372019-11-05 08:59:49 -05002014 break;
Dylan Baker8e369612018-09-14 12:57:32 -07002015
Alyssa Rosenzweig7e8de5a2019-04-03 01:48:09 +00002016 case nir_intrinsic_load_viewport_scale:
2017 case nir_intrinsic_load_viewport_offset:
Alyssa Rosenzweig15954ab2019-08-06 14:07:10 -07002018 case nir_intrinsic_load_num_work_groups:
Alyssa Rosenzweig4e07e7b2019-11-21 08:42:28 -05002019 case nir_intrinsic_load_sampler_lod_parameters_pan:
Alyssa Rosenzweigb756a662020-03-10 16:19:33 -04002020 emit_sysval_read(ctx, &instr->instr, 3, 0);
Alyssa Rosenzweig7e8de5a2019-04-03 01:48:09 +00002021 break;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002022
Alyssa Rosenzweig7229af72019-08-06 13:47:17 -07002023 case nir_intrinsic_load_work_group_id:
2024 case nir_intrinsic_load_local_invocation_id:
2025 emit_compute_builtin(ctx, instr);
2026 break;
2027
Alyssa Rosenzweig306800d2019-12-19 13:31:21 -05002028 case nir_intrinsic_load_vertex_id:
2029 case nir_intrinsic_load_instance_id:
2030 emit_vertex_builtin(ctx, instr);
2031 break;
2032
Alyssa Rosenzweig80ebf112020-08-27 19:55:53 -04002033 case nir_intrinsic_load_sample_mask_in:
2034 emit_special(ctx, instr, 96);
2035 break;
2036
Alyssa Rosenzweigda2eed32020-07-15 09:56:24 -04002037 case nir_intrinsic_load_sample_id:
Alyssa Rosenzweig80ebf112020-08-27 19:55:53 -04002038 emit_special(ctx, instr, 97);
Alyssa Rosenzweigda2eed32020-07-15 09:56:24 -04002039 break;
2040
Alyssa Rosenzweig3f590982020-02-03 20:23:41 -05002041 case nir_intrinsic_memory_barrier_buffer:
2042 case nir_intrinsic_memory_barrier_shared:
2043 break;
2044
2045 case nir_intrinsic_control_barrier:
2046 schedule_barrier(ctx);
2047 emit_control_barrier(ctx);
2048 schedule_barrier(ctx);
2049 break;
2050
Italo Nicolad7b6d2e2020-08-31 17:32:30 +00002051 ATOMIC_CASE(ctx, instr, add, add);
2052 ATOMIC_CASE(ctx, instr, and, and);
2053 ATOMIC_CASE(ctx, instr, comp_swap, cmpxchg);
2054 ATOMIC_CASE(ctx, instr, exchange, xchg);
2055 ATOMIC_CASE(ctx, instr, imax, imax);
2056 ATOMIC_CASE(ctx, instr, imin, imin);
2057 ATOMIC_CASE(ctx, instr, or, or);
2058 ATOMIC_CASE(ctx, instr, umax, umax);
2059 ATOMIC_CASE(ctx, instr, umin, umin);
2060 ATOMIC_CASE(ctx, instr, xor, xor);
2061
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002062 default:
Tomeu Vizosoae5e6402020-02-21 13:47:38 +01002063 fprintf(stderr, "Unhandled intrinsic %s\n", nir_intrinsic_infos[instr->intrinsic].name);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002064 assert(0);
2065 break;
2066 }
2067}
2068
Alyssa Rosenzweig1d0b3ef2020-08-05 18:11:15 -04002069/* Returns dimension with 0 special casing cubemaps */
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002070static unsigned
2071midgard_tex_format(enum glsl_sampler_dim dim)
2072{
2073 switch (dim) {
Alyssa Rosenzweig83c02a52019-06-17 14:26:08 -07002074 case GLSL_SAMPLER_DIM_1D:
2075 case GLSL_SAMPLER_DIM_BUF:
Alyssa Rosenzweig1d0b3ef2020-08-05 18:11:15 -04002076 return 1;
Alyssa Rosenzweig83c02a52019-06-17 14:26:08 -07002077
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002078 case GLSL_SAMPLER_DIM_2D:
Alyssa Rosenzweiga2748d42020-06-30 15:31:30 -04002079 case GLSL_SAMPLER_DIM_MS:
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002080 case GLSL_SAMPLER_DIM_EXTERNAL:
Alyssa Rosenzweig44a6c382019-08-14 08:44:40 -07002081 case GLSL_SAMPLER_DIM_RECT:
Alyssa Rosenzweig1d0b3ef2020-08-05 18:11:15 -04002082 return 2;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002083
2084 case GLSL_SAMPLER_DIM_3D:
Alyssa Rosenzweig1d0b3ef2020-08-05 18:11:15 -04002085 return 3;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002086
2087 case GLSL_SAMPLER_DIM_CUBE:
Alyssa Rosenzweig1d0b3ef2020-08-05 18:11:15 -04002088 return 0;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002089
2090 default:
Tomeu Vizosof0b1bbe2019-03-08 15:04:50 +01002091 DBG("Unknown sampler dim type\n");
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002092 assert(0);
2093 return 0;
2094 }
2095}
2096
Alyssa Rosenzweigc6c906e2020-05-21 18:02:38 -04002097/* Tries to attach an explicit LOD or bias as a constant. Returns whether this
Alyssa Rosenzweig213b6282019-06-18 09:02:20 -07002098 * was successful */
2099
2100static bool
2101pan_attach_constant_bias(
Alyssa Rosenzweige4bd6fb2019-07-10 10:00:50 -07002102 compiler_context *ctx,
2103 nir_src lod,
2104 midgard_texture_word *word)
Alyssa Rosenzweig213b6282019-06-18 09:02:20 -07002105{
2106 /* To attach as constant, it has to *be* constant */
2107
2108 if (!nir_src_is_const(lod))
2109 return false;
2110
2111 float f = nir_src_as_float(lod);
2112
2113 /* Break into fixed-point */
2114 signed lod_int = f;
2115 float lod_frac = f - lod_int;
2116
2117 /* Carry over negative fractions */
2118 if (lod_frac < 0.0) {
2119 lod_int--;
2120 lod_frac += 1.0;
2121 }
2122
2123 /* Encode */
2124 word->bias = float_to_ubyte(lod_frac);
2125 word->bias_int = lod_int;
2126
2127 return true;
2128}
2129
Alyssa Rosenzweigf6e19dd2020-08-28 08:35:19 -04002130static enum mali_texture_mode
2131mdg_texture_mode(nir_tex_instr *instr)
2132{
Alyssa Rosenzweig7dab5742020-08-28 09:48:38 -04002133 if (instr->op == nir_texop_tg4 && instr->is_shadow)
2134 return TEXTURE_GATHER_SHADOW;
2135 else if (instr->op == nir_texop_tg4)
2136 return TEXTURE_GATHER_X + instr->component;
2137 else if (instr->is_shadow)
Alyssa Rosenzweigf6e19dd2020-08-28 08:35:19 -04002138 return TEXTURE_SHADOW;
2139 else
2140 return TEXTURE_NORMAL;
2141}
2142
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002143static void
Boris Brezillon5c17f842019-06-17 21:47:46 +02002144emit_texop_native(compiler_context *ctx, nir_tex_instr *instr,
Alyssa Rosenzweige4bd6fb2019-07-10 10:00:50 -07002145 unsigned midgard_texop)
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002146{
2147 /* TODO */
2148 //assert (!instr->sampler);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002149
Italo Nicola83592de2020-07-15 18:48:42 +00002150 nir_dest *dest = &instr->dest;
2151
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002152 int texture_index = instr->texture_index;
2153 int sampler_index = texture_index;
2154
Alyssa Rosenzweig4fb02172020-04-27 19:11:19 -04002155 nir_alu_type dest_base = nir_alu_type_get_base_type(instr->dest_type);
Italo Nicola83592de2020-07-15 18:48:42 +00002156 nir_alu_type dest_type = dest_base | nir_dest_bit_size(*dest);
2157
2158 /* texture instructions support float outmods */
2159 unsigned outmod = midgard_outmod_none;
2160 if (dest_base == nir_type_float) {
2161 outmod = mir_determine_float_outmod(ctx, &dest, 0);
2162 }
Alyssa Rosenzweig4fb02172020-04-27 19:11:19 -04002163
Alyssa Rosenzweigb0e89412019-06-18 09:02:35 -07002164 midgard_instruction ins = {
2165 .type = TAG_TEXTURE_4,
Alyssa Rosenzweigf8b18a42019-07-01 18:51:48 -07002166 .mask = 0xF,
Italo Nicola83592de2020-07-15 18:48:42 +00002167 .dest = nir_dest_index(dest),
Alyssa Rosenzweigccbc9a42019-12-19 10:35:18 -05002168 .src = { ~0, ~0, ~0, ~0 },
Alyssa Rosenzweig4fb02172020-04-27 19:11:19 -04002169 .dest_type = dest_type,
Alyssa Rosenzweig70072a22019-10-26 14:06:17 -04002170 .swizzle = SWIZZLE_IDENTITY_4,
Italo Nicola83592de2020-07-15 18:48:42 +00002171 .outmod = outmod,
Italo Nicola92c808c2020-07-29 19:10:25 +00002172 .op = midgard_texop,
Alyssa Rosenzweigb0e89412019-06-18 09:02:35 -07002173 .texture = {
Alyssa Rosenzweigb0e89412019-06-18 09:02:35 -07002174 .format = midgard_tex_format(instr->sampler_dim),
2175 .texture_handle = texture_index,
2176 .sampler_handle = sampler_index,
Alyssa Rosenzweigf6e19dd2020-08-28 08:35:19 -04002177 .mode = mdg_texture_mode(instr)
Alyssa Rosenzweigb0e89412019-06-18 09:02:35 -07002178 }
2179 };
Alyssa Rosenzweig8429bee2019-06-14 16:03:39 -07002180
Alyssa Rosenzweig7dab5742020-08-28 09:48:38 -04002181 if (instr->is_shadow && !instr->is_new_style_shadow && instr->op != nir_texop_tg4)
Icecream95d1290e72020-05-12 10:16:31 +12002182 for (int i = 0; i < 4; ++i)
2183 ins.swizzle[0][i] = COMPONENT_X;
2184
Alyssa Rosenzweigd183f842019-12-16 17:02:36 -05002185 /* We may need a temporary for the coordinate */
2186
Alyssa Rosenzweig66013cb2019-12-16 17:14:04 -05002187 bool needs_temp_coord =
2188 (midgard_texop == TEXTURE_OP_TEXEL_FETCH) ||
Alyssa Rosenzweig6b7243f2019-12-20 17:25:05 -05002189 (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) ||
Alyssa Rosenzweig66013cb2019-12-16 17:14:04 -05002190 (instr->is_shadow);
2191
Alyssa Rosenzweigd183f842019-12-16 17:02:36 -05002192 unsigned coords = needs_temp_coord ? make_compiler_temp_reg(ctx) : 0;
2193
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002194 for (unsigned i = 0; i < instr->num_srcs; ++i) {
Alyssa Rosenzweiga19ca342019-06-11 09:23:05 -07002195 int index = nir_src_index(ctx, &instr->src[i].src);
Alyssa Rosenzweigedc8e412019-08-15 16:41:53 -07002196 unsigned nr_components = nir_src_num_components(instr->src[i].src);
Alyssa Rosenzweig4fb02172020-04-27 19:11:19 -04002197 unsigned sz = nir_src_bit_size(instr->src[i].src);
2198 nir_alu_type T = nir_tex_instr_src_type(instr, i) | sz;
Alyssa Rosenzweiga19ca342019-06-11 09:23:05 -07002199
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002200 switch (instr->src[i].src_type) {
2201 case nir_tex_src_coord: {
Alyssa Rosenzweigb6946d32019-07-25 08:44:53 -07002202 emit_explicit_constant(ctx, index, index);
2203
Alyssa Rosenzweig9e5a1412019-12-20 17:01:29 -05002204 unsigned coord_mask = mask_of(instr->coord_components);
2205
Alyssa Rosenzweigbc4c8532020-01-06 21:31:46 -05002206 bool flip_zw = (instr->sampler_dim == GLSL_SAMPLER_DIM_2D) && (coord_mask & (1 << COMPONENT_Z));
2207
2208 if (flip_zw)
2209 coord_mask ^= ((1 << COMPONENT_Z) | (1 << COMPONENT_W));
2210
Alyssa Rosenzweig6b7243f2019-12-20 17:25:05 -05002211 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
2212 /* texelFetch is undefined on samplerCube */
2213 assert(midgard_texop != TEXTURE_OP_TEXEL_FETCH);
2214
2215 /* For cubemaps, we use a special ld/st op to
2216 * select the face and copy the xy into the
2217 * texture register */
2218
2219 midgard_instruction ld = m_ld_cubemap_coords(coords, 0);
2220 ld.src[1] = index;
Alyssa Rosenzweig4fb02172020-04-27 19:11:19 -04002221 ld.src_types[1] = T;
Alyssa Rosenzweig6b7243f2019-12-20 17:25:05 -05002222 ld.mask = 0x3; /* xy */
2223 ld.load_store.arg_1 = 0x20;
2224 ld.swizzle[1][3] = COMPONENT_X;
2225 emit_mir_instruction(ctx, ld);
2226
2227 /* xyzw -> xyxx */
2228 ins.swizzle[1][2] = instr->is_shadow ? COMPONENT_Z : COMPONENT_X;
2229 ins.swizzle[1][3] = COMPONENT_X;
2230 } else if (needs_temp_coord) {
Alyssa Rosenzweigd183f842019-12-16 17:02:36 -05002231 /* mov coord_temp, coords */
2232 midgard_instruction mov = v_mov(index, coords);
Alyssa Rosenzweig9e5a1412019-12-20 17:01:29 -05002233 mov.mask = coord_mask;
Alyssa Rosenzweigbc4c8532020-01-06 21:31:46 -05002234
2235 if (flip_zw)
2236 mov.swizzle[1][COMPONENT_W] = COMPONENT_Z;
2237
Alyssa Rosenzweigd183f842019-12-16 17:02:36 -05002238 emit_mir_instruction(ctx, mov);
2239 } else {
2240 coords = index;
2241 }
2242
Alyssa Rosenzweig6b7243f2019-12-20 17:25:05 -05002243 ins.src[1] = coords;
Alyssa Rosenzweig4fb02172020-04-27 19:11:19 -04002244 ins.src_types[1] = T;
Alyssa Rosenzweig6b7243f2019-12-20 17:25:05 -05002245
Alyssa Rosenzweigb6946d32019-07-25 08:44:53 -07002246 /* Texelfetch coordinates uses all four elements
2247 * (xyz/index) regardless of texture dimensionality,
2248 * which means it's necessary to zero the unused
2249 * components to keep everything happy */
2250
2251 if (midgard_texop == TEXTURE_OP_TEXEL_FETCH) {
Alyssa Rosenzweig9e5a1412019-12-20 17:01:29 -05002252 /* mov index.zw, #0, or generalized */
Alyssa Rosenzweigd183f842019-12-16 17:02:36 -05002253 midgard_instruction mov =
2254 v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), coords);
Alyssa Rosenzweigb6946d32019-07-25 08:44:53 -07002255 mov.has_constants = true;
Alyssa Rosenzweig9e5a1412019-12-20 17:01:29 -05002256 mov.mask = coord_mask ^ 0xF;
Alyssa Rosenzweigb6946d32019-07-25 08:44:53 -07002257 emit_mir_instruction(ctx, mov);
2258 }
2259
Alyssa Rosenzweigb6946d32019-07-25 08:44:53 -07002260 if (instr->sampler_dim == GLSL_SAMPLER_DIM_2D) {
Alyssa Rosenzweig4cd3dc92020-01-06 21:36:20 -05002261 /* Array component in w but NIR wants it in z,
2262 * but if we have a temp coord we already fixed
2263 * that up */
2264
Alyssa Rosenzweig70072a22019-10-26 14:06:17 -04002265 if (nr_components == 3) {
2266 ins.swizzle[1][2] = COMPONENT_Z;
Alyssa Rosenzweig4cd3dc92020-01-06 21:36:20 -05002267 ins.swizzle[1][3] = needs_temp_coord ? COMPONENT_W : COMPONENT_Z;
Alyssa Rosenzweig70072a22019-10-26 14:06:17 -04002268 } else if (nr_components == 2) {
Alyssa Rosenzweig66013cb2019-12-16 17:14:04 -05002269 ins.swizzle[1][2] =
2270 instr->is_shadow ? COMPONENT_Z : COMPONENT_X;
Alyssa Rosenzweig70072a22019-10-26 14:06:17 -04002271 ins.swizzle[1][3] = COMPONENT_X;
2272 } else
Alyssa Rosenzweigedc8e412019-08-15 16:41:53 -07002273 unreachable("Invalid texture 2D components");
Alyssa Rosenzweig70b3e5d2019-03-28 04:27:13 +00002274 }
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002275
Alyssa Rosenzweig64b2fe92019-12-20 12:38:24 -05002276 if (midgard_texop == TEXTURE_OP_TEXEL_FETCH) {
2277 /* We zeroed */
2278 ins.swizzle[1][2] = COMPONENT_Z;
2279 ins.swizzle[1][3] = COMPONENT_W;
2280 }
2281
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002282 break;
2283 }
2284
Alyssa Rosenzweig4012e062019-06-11 09:43:08 -07002285 case nir_tex_src_bias:
2286 case nir_tex_src_lod: {
Alyssa Rosenzweigb0e89412019-06-18 09:02:35 -07002287 /* Try as a constant if we can */
2288
2289 bool is_txf = midgard_texop == TEXTURE_OP_TEXEL_FETCH;
2290 if (!is_txf && pan_attach_constant_bias(ctx, instr->src[i].src, &ins.texture))
2291 break;
2292
Alyssa Rosenzweigb0e89412019-06-18 09:02:35 -07002293 ins.texture.lod_register = true;
Alyssa Rosenzweig70072a22019-10-26 14:06:17 -04002294 ins.src[2] = index;
Alyssa Rosenzweig4fb02172020-04-27 19:11:19 -04002295 ins.src_types[2] = T;
Alyssa Rosenzweig72e57492019-12-20 12:34:20 -05002296
2297 for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c)
2298 ins.swizzle[2][c] = COMPONENT_X;
2299
Alyssa Rosenzweigb6946d32019-07-25 08:44:53 -07002300 emit_explicit_constant(ctx, index, index);
Alyssa Rosenzweigb0e89412019-06-18 09:02:35 -07002301
Alyssa Rosenzweiga19ca342019-06-11 09:23:05 -07002302 break;
Alyssa Rosenzweige4bd6fb2019-07-10 10:00:50 -07002303 };
Alyssa Rosenzweiga19ca342019-06-11 09:23:05 -07002304
Alyssa Rosenzweigccbc9a42019-12-19 10:35:18 -05002305 case nir_tex_src_offset: {
2306 ins.texture.offset_register = true;
2307 ins.src[3] = index;
Alyssa Rosenzweig4fb02172020-04-27 19:11:19 -04002308 ins.src_types[3] = T;
Alyssa Rosenzweigccbc9a42019-12-19 10:35:18 -05002309
2310 for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c)
2311 ins.swizzle[3][c] = (c > COMPONENT_Z) ? 0 : c;
2312
2313 emit_explicit_constant(ctx, index, index);
Alyssa Rosenzweig4ec1f952019-12-20 12:58:10 -05002314 break;
Alyssa Rosenzweigccbc9a42019-12-19 10:35:18 -05002315 };
2316
Alyssa Rosenzweig6d9f9512020-06-30 15:31:39 -04002317 case nir_tex_src_comparator:
2318 case nir_tex_src_ms_index: {
Alyssa Rosenzweig66013cb2019-12-16 17:14:04 -05002319 unsigned comp = COMPONENT_Z;
2320
2321 /* mov coord_temp.foo, coords */
2322 midgard_instruction mov = v_mov(index, coords);
2323 mov.mask = 1 << comp;
2324
2325 for (unsigned i = 0; i < MIR_VEC_COMPONENTS; ++i)
2326 mov.swizzle[1][i] = COMPONENT_X;
2327
2328 emit_mir_instruction(ctx, mov);
2329 break;
2330 }
2331
Tomeu Vizoso226c1ef2019-12-19 15:07:39 +01002332 default: {
Tomeu Vizosoae5e6402020-02-21 13:47:38 +01002333 fprintf(stderr, "Unknown texture source type: %d\n", instr->src[i].src_type);
Tomeu Vizoso226c1ef2019-12-19 15:07:39 +01002334 assert(0);
2335 }
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002336 }
2337 }
2338
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002339 emit_mir_instruction(ctx, ins);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002340}
2341
2342static void
Boris Brezillon5c17f842019-06-17 21:47:46 +02002343emit_tex(compiler_context *ctx, nir_tex_instr *instr)
2344{
2345 switch (instr->op) {
2346 case nir_texop_tex:
2347 case nir_texop_txb:
2348 emit_texop_native(ctx, instr, TEXTURE_OP_NORMAL);
2349 break;
2350 case nir_texop_txl:
Alyssa Rosenzweig7dab5742020-08-28 09:48:38 -04002351 case nir_texop_tg4:
Boris Brezillon5c17f842019-06-17 21:47:46 +02002352 emit_texop_native(ctx, instr, TEXTURE_OP_LOD);
2353 break;
Alyssa Rosenzweigf4bb7f02019-06-21 16:17:34 -07002354 case nir_texop_txf:
Alyssa Rosenzweig63a87222020-06-30 15:32:01 -04002355 case nir_texop_txf_ms:
Alyssa Rosenzweigf4bb7f02019-06-21 16:17:34 -07002356 emit_texop_native(ctx, instr, TEXTURE_OP_TEXEL_FETCH);
2357 break;
Boris Brezillonc3558862019-06-17 22:13:04 +02002358 case nir_texop_txs:
Alyssa Rosenzweigb756a662020-03-10 16:19:33 -04002359 emit_sysval_read(ctx, &instr->instr, 4, 0);
Boris Brezillonc3558862019-06-17 22:13:04 +02002360 break;
Tomeu Vizoso226c1ef2019-12-19 15:07:39 +01002361 default: {
Tomeu Vizosoae5e6402020-02-21 13:47:38 +01002362 fprintf(stderr, "Unhandled texture op: %d\n", instr->op);
Tomeu Vizoso226c1ef2019-12-19 15:07:39 +01002363 assert(0);
2364 }
Boris Brezillon5c17f842019-06-17 21:47:46 +02002365 }
2366}
2367
2368static void
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002369emit_jump(compiler_context *ctx, nir_jump_instr *instr)
2370{
2371 switch (instr->type) {
Alyssa Rosenzweige4bd6fb2019-07-10 10:00:50 -07002372 case nir_jump_break: {
2373 /* Emit a branch out of the loop */
2374 struct midgard_instruction br = v_branch(false, false);
2375 br.branch.target_type = TARGET_BREAK;
2376 br.branch.target_break = ctx->current_loop_depth;
2377 emit_mir_instruction(ctx, br);
Alyssa Rosenzweige4bd6fb2019-07-10 10:00:50 -07002378 break;
2379 }
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002380
Alyssa Rosenzweige4bd6fb2019-07-10 10:00:50 -07002381 default:
2382 DBG("Unknown jump type %d\n", instr->type);
2383 break;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002384 }
2385}
2386
2387static void
2388emit_instr(compiler_context *ctx, struct nir_instr *instr)
2389{
2390 switch (instr->type) {
2391 case nir_instr_type_load_const:
2392 emit_load_const(ctx, nir_instr_as_load_const(instr));
2393 break;
2394
2395 case nir_instr_type_intrinsic:
2396 emit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
2397 break;
2398
2399 case nir_instr_type_alu:
2400 emit_alu(ctx, nir_instr_as_alu(instr));
2401 break;
2402
2403 case nir_instr_type_tex:
2404 emit_tex(ctx, nir_instr_as_tex(instr));
2405 break;
2406
2407 case nir_instr_type_jump:
2408 emit_jump(ctx, nir_instr_as_jump(instr));
2409 break;
2410
2411 case nir_instr_type_ssa_undef:
2412 /* Spurious */
2413 break;
2414
2415 default:
Tomeu Vizosof0b1bbe2019-03-08 15:04:50 +01002416 DBG("Unhandled instruction type\n");
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002417 break;
2418 }
2419}
2420
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002421
2422/* ALU instructions can inline or embed constants, which decreases register
2423 * pressure and saves space. */
2424
Alyssa Rosenzweig75b6be22019-08-26 11:58:27 -07002425#define CONDITIONAL_ATTACH(idx) { \
2426 void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->src[idx] + 1); \
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002427\
2428 if (entry) { \
Alyssa Rosenzweig75b6be22019-08-26 11:58:27 -07002429 attach_constants(ctx, alu, entry, alu->src[idx] + 1); \
2430 alu->src[idx] = SSA_FIXED_REGISTER(REGISTER_CONSTANT); \
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002431 } \
2432}
2433
2434static void
Alyssa Rosenzweigcc2ba8e2019-08-30 10:53:13 -07002435inline_alu_constants(compiler_context *ctx, midgard_block *block)
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002436{
Alyssa Rosenzweigcc2ba8e2019-08-30 10:53:13 -07002437 mir_foreach_instr_in_block(block, alu) {
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002438 /* Other instructions cannot inline constants */
2439 if (alu->type != TAG_ALU_4) continue;
Alyssa Rosenzweig5e06d902019-08-30 11:06:33 -07002440 if (alu->compact_branch) continue;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002441
2442 /* If there is already a constant here, we can do nothing */
2443 if (alu->has_constants) continue;
2444
Alyssa Rosenzweig75b6be22019-08-26 11:58:27 -07002445 CONDITIONAL_ATTACH(0);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002446
2447 if (!alu->has_constants) {
Alyssa Rosenzweig75b6be22019-08-26 11:58:27 -07002448 CONDITIONAL_ATTACH(1)
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002449 } else if (!alu->inline_constant) {
2450 /* Corner case: _two_ vec4 constants, for instance with a
2451 * csel. For this case, we can only use a constant
2452 * register for one, we'll have to emit a move for the
Alyssa Rosenzweig3b10bcd2020-04-27 17:47:13 -04002453 * other. */
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002454
Alyssa Rosenzweig75b6be22019-08-26 11:58:27 -07002455 void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->src[1] + 1);
Alyssa Rosenzweig3b10bcd2020-04-27 17:47:13 -04002456 unsigned scratch = make_compiler_temp(ctx);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002457
2458 if (entry) {
Alyssa Rosenzweigc3a46e72019-10-30 16:29:28 -04002459 midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), scratch);
Alyssa Rosenzweig75b6be22019-08-26 11:58:27 -07002460 attach_constants(ctx, &ins, entry, alu->src[1] + 1);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002461
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002462 /* Set the source */
Alyssa Rosenzweig75b6be22019-08-26 11:58:27 -07002463 alu->src[1] = scratch;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002464
2465 /* Inject us -before- the last instruction which set r31 */
Boris Brezillon938c5b02019-08-28 09:17:21 +02002466 mir_insert_instruction_before(ctx, mir_prev_op(alu), ins);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002467 }
2468 }
2469 }
2470}
2471
Italo Nicola5f7e0182020-07-10 09:36:58 +00002472unsigned
2473max_bitsize_for_alu(midgard_instruction *ins)
2474{
2475 unsigned max_bitsize = 0;
2476 for (int i = 0; i < MIR_SRC_COUNT; i++) {
2477 if (ins->src[i] == ~0) continue;
2478 unsigned src_bitsize = nir_alu_type_get_type_size(ins->src_types[i]);
2479 max_bitsize = MAX2(src_bitsize, max_bitsize);
2480 }
2481 unsigned dst_bitsize = nir_alu_type_get_type_size(ins->dest_type);
2482 max_bitsize = MAX2(dst_bitsize, max_bitsize);
2483
2484 /* We don't have fp16 LUTs, so we'll want to emit code like:
2485 *
2486 * vlut.fsinr hr0, hr0
2487 *
2488 * where both input and output are 16-bit but the operation is carried
2489 * out in 32-bit
2490 */
2491
2492 switch (ins->op) {
2493 case midgard_alu_op_fsqrt:
2494 case midgard_alu_op_frcp:
2495 case midgard_alu_op_frsqrt:
2496 case midgard_alu_op_fsin:
2497 case midgard_alu_op_fcos:
2498 case midgard_alu_op_fexp2:
2499 case midgard_alu_op_flog2:
2500 max_bitsize = MAX2(max_bitsize, 32);
2501 break;
2502
2503 default:
2504 break;
2505 }
2506
Alyssa Rosenzweig3e2cb212020-08-27 14:35:23 -04002507 /* High implies computing at a higher bitsize, e.g umul_high of 32-bit
2508 * requires computing at 64-bit */
2509 if (midgard_is_integer_out_op(ins->op) && ins->outmod == midgard_outmod_int_high) {
2510 max_bitsize *= 2;
2511 assert(max_bitsize <= 64);
2512 }
2513
Italo Nicola5f7e0182020-07-10 09:36:58 +00002514 return max_bitsize;
2515}
2516
2517midgard_reg_mode
2518reg_mode_for_bitsize(unsigned bitsize)
2519{
2520 switch (bitsize) {
2521 /* use 16 pipe for 8 since we don't support vec16 yet */
2522 case 8:
2523 case 16:
2524 return midgard_reg_mode_16;
2525 case 32:
2526 return midgard_reg_mode_32;
2527 case 64:
2528 return midgard_reg_mode_64;
2529 default:
2530 unreachable("invalid bit size");
2531 }
2532}
2533
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002534/* Midgard supports two types of constants, embedded constants (128-bit) and
2535 * inline constants (16-bit). Sometimes, especially with scalar ops, embedded
2536 * constants can be demoted to inline constants, for space savings and
2537 * sometimes a performance boost */
2538
2539static void
Alyssa Rosenzweigcc2ba8e2019-08-30 10:53:13 -07002540embedded_to_inline_constant(compiler_context *ctx, midgard_block *block)
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002541{
Alyssa Rosenzweigcc2ba8e2019-08-30 10:53:13 -07002542 mir_foreach_instr_in_block(block, ins) {
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002543 if (!ins->has_constants) continue;
Alyssa Rosenzweig75b6be22019-08-26 11:58:27 -07002544 if (ins->has_inline_constant) continue;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002545
2546 /* Blend constants must not be inlined by definition */
2547 if (ins->has_blend_constant) continue;
2548
Italo Nicola5f7e0182020-07-10 09:36:58 +00002549 unsigned max_bitsize = max_bitsize_for_alu(ins);
2550
Alyssa Rosenzweige92caad2019-07-01 20:02:57 -07002551 /* We can inline 32-bit (sometimes) or 16-bit (usually) */
Italo Nicola5f7e0182020-07-10 09:36:58 +00002552 bool is_16 = max_bitsize == 16;
2553 bool is_32 = max_bitsize == 32;
Alyssa Rosenzweige92caad2019-07-01 20:02:57 -07002554
2555 if (!(is_16 || is_32))
2556 continue;
2557
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002558 /* src1 cannot be an inline constant due to encoding
2559 * restrictions. So, if possible we try to flip the arguments
2560 * in that case */
2561
Italo Nicolaf4c89bf2020-07-09 12:02:57 +00002562 int op = ins->op;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002563
Alyssa Rosenzweigba9f3d12020-04-30 13:11:52 -04002564 if (ins->src[0] == SSA_FIXED_REGISTER(REGISTER_CONSTANT) &&
2565 alu_opcode_props[op].props & OP_COMMUTES) {
2566 mir_flip(ins);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002567 }
2568
Alyssa Rosenzweig75b6be22019-08-26 11:58:27 -07002569 if (ins->src[1] == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
Alyssa Rosenzweig70072a22019-10-26 14:06:17 -04002570 /* Component is from the swizzle. Take a nonzero component */
2571 assert(ins->mask);
2572 unsigned first_comp = ffs(ins->mask) - 1;
2573 unsigned component = ins->swizzle[1][first_comp];
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002574
2575 /* Scale constant appropriately, if we can legally */
Icecream95d97aaad2020-06-05 20:17:27 +12002576 int16_t scaled_constant = 0;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002577
Boris Brezillon15c92d12020-01-20 15:00:57 +01002578 if (is_16) {
2579 scaled_constant = ins->constants.u16[component];
2580 } else if (midgard_is_integer_op(op)) {
2581 scaled_constant = ins->constants.u32[component];
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002582
2583 /* Constant overflow after resize */
Boris Brezillon15c92d12020-01-20 15:00:57 +01002584 if (scaled_constant != ins->constants.u32[component])
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002585 continue;
2586 } else {
Boris Brezillon15c92d12020-01-20 15:00:57 +01002587 float original = ins->constants.f32[component];
Alyssa Rosenzweig39786142019-04-28 15:46:47 +00002588 scaled_constant = _mesa_float_to_half(original);
2589
2590 /* Check for loss of precision. If this is
2591 * mediump, we don't care, but for a highp
2592 * shader, we need to pay attention. NIR
2593 * doesn't yet tell us which mode we're in!
2594 * Practically this prevents most constants
2595 * from being inlined, sadly. */
2596
2597 float fp32 = _mesa_half_to_float(scaled_constant);
2598
2599 if (fp32 != original)
2600 continue;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002601 }
2602
Alyssa Rosenzweig1cd65352020-05-21 12:38:27 -04002603 /* Should've been const folded */
2604 if (ins->src_abs[1] || ins->src_neg[1])
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002605 continue;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002606
Alyssa Rosenzweig70072a22019-10-26 14:06:17 -04002607 /* Make sure that the constant is not itself a vector
2608 * by checking if all accessed values are the same. */
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002609
Boris Brezillon15c92d12020-01-20 15:00:57 +01002610 const midgard_constants *cons = &ins->constants;
2611 uint32_t value = is_16 ? cons->u16[component] : cons->u32[component];
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002612
2613 bool is_vector = false;
Italo Nicolaf4c89bf2020-07-09 12:02:57 +00002614 unsigned mask = effective_writemask(ins->op, ins->mask);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002615
Alyssa Rosenzweig70072a22019-10-26 14:06:17 -04002616 for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c) {
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002617 /* We only care if this component is actually used */
2618 if (!(mask & (1 << c)))
2619 continue;
2620
Boris Brezillon15c92d12020-01-20 15:00:57 +01002621 uint32_t test = is_16 ?
2622 cons->u16[ins->swizzle[1][c]] :
2623 cons->u32[ins->swizzle[1][c]];
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002624
2625 if (test != value) {
2626 is_vector = true;
2627 break;
2628 }
2629 }
2630
2631 if (is_vector)
2632 continue;
2633
2634 /* Get rid of the embedded constant */
2635 ins->has_constants = false;
Alyssa Rosenzweig75b6be22019-08-26 11:58:27 -07002636 ins->src[1] = ~0;
2637 ins->has_inline_constant = true;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002638 ins->inline_constant = scaled_constant;
2639 }
2640 }
2641}
2642
Alyssa Rosenzweigae20bee2019-06-06 11:19:13 -07002643/* Dead code elimination for branches at the end of a block - only one branch
2644 * per block is legal semantically */
2645
2646static void
Alyssa Rosenzweig1c2d4692020-04-30 13:13:24 -04002647midgard_cull_dead_branch(compiler_context *ctx, midgard_block *block)
Alyssa Rosenzweigae20bee2019-06-06 11:19:13 -07002648{
2649 bool branched = false;
2650
2651 mir_foreach_instr_in_block_safe(block, ins) {
2652 if (!midgard_is_branch_unit(ins->unit)) continue;
2653
Alyssa Rosenzweigcc2ba8e2019-08-30 10:53:13 -07002654 if (branched)
Alyssa Rosenzweigae20bee2019-06-06 11:19:13 -07002655 mir_remove_instruction(ins);
Alyssa Rosenzweigae20bee2019-06-06 11:19:13 -07002656
2657 branched = true;
2658 }
2659}
2660
Alyssa Rosenzweig622e3a82020-06-02 12:15:18 -04002661/* We want to force the invert on AND/OR to the second slot to legalize into
2662 * iandnot/iornot. The relevant patterns are for AND (and OR respectively)
2663 *
2664 * ~a & #b = ~a & ~(#~b)
2665 * ~a & b = b & ~a
2666 */
2667
2668static void
2669midgard_legalize_invert(compiler_context *ctx, midgard_block *block)
2670{
2671 mir_foreach_instr_in_block(block, ins) {
2672 if (ins->type != TAG_ALU_4) continue;
2673
Italo Nicolaf4c89bf2020-07-09 12:02:57 +00002674 if (ins->op != midgard_alu_op_iand &&
2675 ins->op != midgard_alu_op_ior) continue;
Alyssa Rosenzweig622e3a82020-06-02 12:15:18 -04002676
2677 if (ins->src_invert[1] || !ins->src_invert[0]) continue;
2678
2679 if (ins->has_inline_constant) {
2680 /* ~(#~a) = ~(~#a) = a, so valid, and forces both
2681 * inverts on */
2682 ins->inline_constant = ~ins->inline_constant;
2683 ins->src_invert[1] = true;
2684 } else {
2685 /* Flip to the right invert order. Note
2686 * has_inline_constant false by assumption on the
2687 * branch, so flipping makes sense. */
2688 mir_flip(ins);
2689 }
2690 }
2691}
2692
Alyssa Rosenzweig5bc62af2020-01-02 12:27:59 -05002693static unsigned
Alyssa Rosenzweig60396342019-11-23 16:08:02 -05002694emit_fragment_epilogue(compiler_context *ctx, unsigned rt)
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002695{
Alyssa Rosenzweig02f503e2019-12-30 18:53:04 -05002696 /* Loop to ourselves */
Boris Brezillone1ba0cd2020-01-31 10:05:16 +01002697 midgard_instruction *br = ctx->writeout_branch[rt];
Alyssa Rosenzweig02f503e2019-12-30 18:53:04 -05002698 struct midgard_instruction ins = v_branch(false, false);
Icecream9592d3f1f2020-06-06 15:08:06 +12002699 ins.writeout = br->writeout;
Alyssa Rosenzweig02f503e2019-12-30 18:53:04 -05002700 ins.branch.target_block = ctx->block_count - 1;
Boris Brezillone1ba0cd2020-01-31 10:05:16 +01002701 ins.constants.u32[0] = br->constants.u32[0];
Icecream952a5504f2020-06-06 14:42:18 +12002702 memcpy(&ins.src_types, &br->src_types, sizeof(ins.src_types));
Alyssa Rosenzweig02f503e2019-12-30 18:53:04 -05002703 emit_mir_instruction(ctx, ins);
2704
Alyssa Rosenzweig3448b262019-12-03 10:37:01 -05002705 ctx->current_block->epilogue = true;
Alyssa Rosenzweig60396342019-11-23 16:08:02 -05002706 schedule_barrier(ctx);
Alyssa Rosenzweig5bc62af2020-01-02 12:27:59 -05002707 return ins.branch.target_block;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002708}
2709
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002710static midgard_block *
Icecream95ed4d2732020-07-08 13:15:09 +12002711emit_block_init(compiler_context *ctx)
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002712{
Alyssa Rosenzweig9aeb7262019-08-02 13:48:27 -07002713 midgard_block *this_block = ctx->after_block;
2714 ctx->after_block = NULL;
2715
2716 if (!this_block)
Alyssa Rosenzweigaeeeef12019-08-15 08:11:10 -07002717 this_block = create_empty_block(ctx);
Alyssa Rosenzweig9aeb7262019-08-02 13:48:27 -07002718
Alyssa Rosenzweig5aaaf7b2020-03-11 08:36:31 -04002719 list_addtail(&this_block->base.link, &ctx->blocks);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002720
Alyssa Rosenzweigc5dd1d52020-03-11 08:22:08 -04002721 this_block->scheduled = false;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002722 ++ctx->block_count;
2723
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002724 /* Set up current block */
Alyssa Rosenzweig5aaaf7b2020-03-11 08:36:31 -04002725 list_inithead(&this_block->base.instructions);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002726 ctx->current_block = this_block;
2727
Icecream95ed4d2732020-07-08 13:15:09 +12002728 return this_block;
2729}
2730
2731static midgard_block *
2732emit_block(compiler_context *ctx, nir_block *block)
2733{
2734 midgard_block *this_block = emit_block_init(ctx);
2735
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002736 nir_foreach_instr(instr, block) {
2737 emit_instr(ctx, instr);
2738 ++ctx->instruction_count;
2739 }
2740
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002741 return this_block;
2742}
2743
2744static midgard_block *emit_cf_list(struct compiler_context *ctx, struct exec_list *list);
2745
2746static void
2747emit_if(struct compiler_context *ctx, nir_if *nif)
2748{
Alyssa Rosenzweig9aeb7262019-08-02 13:48:27 -07002749 midgard_block *before_block = ctx->current_block;
2750
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002751 /* Speculatively emit the branch, but we can't fill it in until later */
Alyssa Rosenzweigdb7b0eb2020-04-30 14:17:06 -04002752 bool inv = false;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002753 EMIT(branch, true, true);
2754 midgard_instruction *then_branch = mir_last_in_block(ctx->current_block);
Alyssa Rosenzweigdb7b0eb2020-04-30 14:17:06 -04002755 then_branch->src[0] = mir_get_branch_cond(&nif->condition, &inv);
Alyssa Rosenzweig074815c2020-04-29 16:29:01 -04002756 then_branch->src_types[0] = nir_type_uint32;
Alyssa Rosenzweigdb7b0eb2020-04-30 14:17:06 -04002757 then_branch->branch.invert_conditional = !inv;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002758
Alyssa Rosenzweig9aeb7262019-08-02 13:48:27 -07002759 /* Emit the two subblocks. */
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002760 midgard_block *then_block = emit_cf_list(ctx, &nif->then_list);
Alyssa Rosenzweig9aeb7262019-08-02 13:48:27 -07002761 midgard_block *end_then_block = ctx->current_block;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002762
2763 /* Emit a jump from the end of the then block to the end of the else */
2764 EMIT(branch, false, false);
2765 midgard_instruction *then_exit = mir_last_in_block(ctx->current_block);
2766
2767 /* Emit second block, and check if it's empty */
2768
2769 int else_idx = ctx->block_count;
2770 int count_in = ctx->instruction_count;
2771 midgard_block *else_block = emit_cf_list(ctx, &nif->else_list);
Alyssa Rosenzweig9aeb7262019-08-02 13:48:27 -07002772 midgard_block *end_else_block = ctx->current_block;
Alyssa Rosenzweig2c747092019-02-17 05:14:24 +00002773 int after_else_idx = ctx->block_count;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002774
2775 /* Now that we have the subblocks emitted, fix up the branches */
2776
2777 assert(then_block);
2778 assert(else_block);
2779
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002780 if (ctx->instruction_count == count_in) {
2781 /* The else block is empty, so don't emit an exit jump */
2782 mir_remove_instruction(then_exit);
Alyssa Rosenzweig2c747092019-02-17 05:14:24 +00002783 then_branch->branch.target_block = after_else_idx;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002784 } else {
2785 then_branch->branch.target_block = else_idx;
Alyssa Rosenzweig2c747092019-02-17 05:14:24 +00002786 then_exit->branch.target_block = after_else_idx;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002787 }
Alyssa Rosenzweig9aeb7262019-08-02 13:48:27 -07002788
2789 /* Wire up the successors */
2790
Alyssa Rosenzweigaeeeef12019-08-15 08:11:10 -07002791 ctx->after_block = create_empty_block(ctx);
Alyssa Rosenzweig9aeb7262019-08-02 13:48:27 -07002792
Alyssa Rosenzweig5aaaf7b2020-03-11 08:36:31 -04002793 pan_block_add_successor(&before_block->base, &then_block->base);
2794 pan_block_add_successor(&before_block->base, &else_block->base);
Alyssa Rosenzweig9aeb7262019-08-02 13:48:27 -07002795
Alyssa Rosenzweig5aaaf7b2020-03-11 08:36:31 -04002796 pan_block_add_successor(&end_then_block->base, &ctx->after_block->base);
2797 pan_block_add_successor(&end_else_block->base, &ctx->after_block->base);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002798}
2799
2800static void
2801emit_loop(struct compiler_context *ctx, nir_loop *nloop)
2802{
2803 /* Remember where we are */
2804 midgard_block *start_block = ctx->current_block;
2805
Alyssa Rosenzweig521ac6e2019-04-21 16:22:44 +00002806 /* Allocate a loop number, growing the current inner loop depth */
2807 int loop_idx = ++ctx->current_loop_depth;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002808
2809 /* Get index from before the body so we can loop back later */
2810 int start_idx = ctx->block_count;
2811
2812 /* Emit the body itself */
Alyssa Rosenzweig9aeb7262019-08-02 13:48:27 -07002813 midgard_block *loop_block = emit_cf_list(ctx, &nloop->body);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002814
2815 /* Branch back to loop back */
2816 struct midgard_instruction br_back = v_branch(false, false);
2817 br_back.branch.target_block = start_idx;
2818 emit_mir_instruction(ctx, br_back);
2819
Alyssa Rosenzweig9aeb7262019-08-02 13:48:27 -07002820 /* Mark down that branch in the graph. */
Alyssa Rosenzweig5aaaf7b2020-03-11 08:36:31 -04002821 pan_block_add_successor(&start_block->base, &loop_block->base);
2822 pan_block_add_successor(&ctx->current_block->base, &loop_block->base);
Alyssa Rosenzweigc0fb2602019-04-21 03:29:47 +00002823
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002824 /* Find the index of the block about to follow us (note: we don't add
2825 * one; blocks are 0-indexed so we get a fencepost problem) */
2826 int break_block_idx = ctx->block_count;
2827
2828 /* Fix up the break statements we emitted to point to the right place,
2829 * now that we can allocate a block number for them */
Alyssa Rosenzweigaeeeef12019-08-15 08:11:10 -07002830 ctx->after_block = create_empty_block(ctx);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002831
Alyssa Rosenzweig5aaaf7b2020-03-11 08:36:31 -04002832 mir_foreach_block_from(ctx, start_block, _block) {
2833 mir_foreach_instr_in_block(((midgard_block *) _block), ins) {
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002834 if (ins->type != TAG_ALU_4) continue;
2835 if (!ins->compact_branch) continue;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002836
2837 /* We found a branch -- check the type to see if we need to do anything */
2838 if (ins->branch.target_type != TARGET_BREAK) continue;
2839
2840 /* It's a break! Check if it's our break */
2841 if (ins->branch.target_break != loop_idx) continue;
2842
2843 /* Okay, cool, we're breaking out of this loop.
2844 * Rewrite from a break to a goto */
2845
2846 ins->branch.target_type = TARGET_GOTO;
2847 ins->branch.target_block = break_block_idx;
Alyssa Rosenzweig9aeb7262019-08-02 13:48:27 -07002848
Alyssa Rosenzweig5aaaf7b2020-03-11 08:36:31 -04002849 pan_block_add_successor(_block, &ctx->after_block->base);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002850 }
2851 }
Alyssa Rosenzweig521ac6e2019-04-21 16:22:44 +00002852
2853 /* Now that we've finished emitting the loop, free up the depth again
2854 * so we play nice with recursion amid nested loops */
2855 --ctx->current_loop_depth;
Alyssa Rosenzweig7ad65162019-07-09 11:10:49 -07002856
2857 /* Dump loop stats */
2858 ++ctx->loop_count;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002859}
2860
2861static midgard_block *
2862emit_cf_list(struct compiler_context *ctx, struct exec_list *list)
2863{
2864 midgard_block *start_block = NULL;
2865
2866 foreach_list_typed(nir_cf_node, node, node, list) {
2867 switch (node->type) {
2868 case nir_cf_node_block: {
2869 midgard_block *block = emit_block(ctx, nir_cf_node_as_block(node));
2870
2871 if (!start_block)
2872 start_block = block;
2873
2874 break;
2875 }
2876
2877 case nir_cf_node_if:
2878 emit_if(ctx, nir_cf_node_as_if(node));
2879 break;
2880
2881 case nir_cf_node_loop:
2882 emit_loop(ctx, nir_cf_node_as_loop(node));
2883 break;
2884
2885 case nir_cf_node_function:
2886 assert(0);
2887 break;
2888 }
2889 }
2890
2891 return start_block;
2892}
2893
Alyssa Rosenzweig5e55c112019-02-17 03:35:03 +00002894/* Due to lookahead, we need to report the first tag executed in the command
2895 * stream and in branch targets. An initial block might be empty, so iterate
2896 * until we find one that 'works' */
2897
Italo Nicola8150c1d2020-07-29 20:14:55 +00002898unsigned
Alyssa Rosenzweig5e55c112019-02-17 03:35:03 +00002899midgard_get_first_tag_from_block(compiler_context *ctx, unsigned block_idx)
2900{
2901 midgard_block *initial_block = mir_get_block(ctx, block_idx);
2902
Alyssa Rosenzweig5aaaf7b2020-03-11 08:36:31 -04002903 mir_foreach_block_from(ctx, initial_block, _v) {
2904 midgard_block *v = (midgard_block *) _v;
Alyssa Rosenzweig45ac8ea2019-11-04 10:32:49 -05002905 if (v->quadword_count) {
2906 midgard_bundle *initial_bundle =
2907 util_dynarray_element(&v->bundles, midgard_bundle, 0);
Alyssa Rosenzweig5e55c112019-02-17 03:35:03 +00002908
Alyssa Rosenzweiga55a2e022020-02-04 09:28:06 -05002909 return initial_bundle->tag;
Alyssa Rosenzweig5e55c112019-02-17 03:35:03 +00002910 }
Alyssa Rosenzweig73c40d62019-07-31 15:49:30 -07002911 }
Alyssa Rosenzweig5e55c112019-02-17 03:35:03 +00002912
Alyssa Rosenzweiga55a2e022020-02-04 09:28:06 -05002913 /* Default to a tag 1 which will break from the shader, in case we jump
2914 * to the exit block (i.e. `return` in a compute shader) */
2915
2916 return 1;
Alyssa Rosenzweig5e55c112019-02-17 03:35:03 +00002917}
2918
Alyssa Rosenzweig5bc62af2020-01-02 12:27:59 -05002919/* For each fragment writeout instruction, generate a writeout loop to
2920 * associate with it */
2921
2922static void
2923mir_add_writeout_loops(compiler_context *ctx)
2924{
2925 for (unsigned rt = 0; rt < ARRAY_SIZE(ctx->writeout_branch); ++rt) {
2926 midgard_instruction *br = ctx->writeout_branch[rt];
2927 if (!br) continue;
2928
2929 unsigned popped = br->branch.target_block;
Alyssa Rosenzweig5aaaf7b2020-03-11 08:36:31 -04002930 pan_block_add_successor(&(mir_get_block(ctx, popped - 1)->base), &ctx->current_block->base);
Alyssa Rosenzweig5bc62af2020-01-02 12:27:59 -05002931 br->branch.target_block = emit_fragment_epilogue(ctx, rt);
Alyssa Rosenzweige27fd4b2020-04-27 20:34:36 -04002932 br->branch.target_type = TARGET_GOTO;
Alyssa Rosenzweig5bc62af2020-01-02 12:27:59 -05002933
2934 /* If we have more RTs, we'll need to restore back after our
2935 * loop terminates */
2936
2937 if ((rt + 1) < ARRAY_SIZE(ctx->writeout_branch) && ctx->writeout_branch[rt + 1]) {
2938 midgard_instruction uncond = v_branch(false, false);
2939 uncond.branch.target_block = popped;
Alyssa Rosenzweige27fd4b2020-04-27 20:34:36 -04002940 uncond.branch.target_type = TARGET_GOTO;
Alyssa Rosenzweig5bc62af2020-01-02 12:27:59 -05002941 emit_mir_instruction(ctx, uncond);
Alyssa Rosenzweig5aaaf7b2020-03-11 08:36:31 -04002942 pan_block_add_successor(&ctx->current_block->base, &(mir_get_block(ctx, popped)->base));
Alyssa Rosenzweig5bc62af2020-01-02 12:27:59 -05002943 schedule_barrier(ctx);
2944 } else {
2945 /* We're last, so we can terminate here */
2946 br->last_writeout = true;
2947 }
2948 }
2949}
2950
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002951int
Boris Brezillon0a74a042020-10-08 10:09:56 +02002952midgard_compile_shader_nir(nir_shader *nir, panfrost_program *program,
2953 const struct panfrost_compile_inputs *inputs)
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002954{
2955 struct util_dynarray *compiled = &program->compiled;
2956
Alyssa Rosenzweige4bd6fb2019-07-10 10:00:50 -07002957 midgard_debug = debug_get_option_midgard_debug();
Tomeu Vizosof0b1bbe2019-03-08 15:04:50 +01002958
Alyssa Rosenzweig4fa09322019-08-15 08:10:46 -07002959 /* TODO: Bound against what? */
2960 compiler_context *ctx = rzalloc(NULL, compiler_context);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002961
Alyssa Rosenzweig4fa09322019-08-15 08:10:46 -07002962 ctx->nir = nir;
Alyssa Rosenzweig4fa09322019-08-15 08:10:46 -07002963 ctx->stage = nir->info.stage;
Boris Brezillon0a74a042020-10-08 10:09:56 +02002964 ctx->is_blend = inputs->is_blend;
2965 ctx->blend_rt = MIDGARD_COLOR_RT0 + inputs->blend.rt;
Alyssa Rosenzweig277b6162020-06-12 16:45:24 -04002966 ctx->blend_input = ~0;
Icecream9585954ec2020-06-25 22:21:50 +12002967 ctx->blend_src1 = ~0;
Boris Brezillon0a74a042020-10-08 10:09:56 +02002968 ctx->quirks = midgard_get_quirks(inputs->gpu_id);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002969
Alyssa Rosenzweig3174bc92019-07-16 14:10:08 -07002970 /* Start off with a safe cutoff, allowing usage of all 16 work
2971 * registers. Later, we'll promote uniform reads to uniform registers
2972 * if we determine it is beneficial to do so */
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002973 ctx->uniform_cutoff = 8;
2974
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002975 /* Initialize at a global (not block) level hash tables */
2976
2977 ctx->ssa_constants = _mesa_hash_table_u64_create(NULL);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002978
Alyssa Rosenzweigde8d49a2019-06-06 09:15:26 -07002979 /* Lower gl_Position pre-optimisation, but after lowering vars to ssa
2980 * (so we don't accidentally duplicate the epilogue since mesa/st has
2981 * messed with our I/O quite a bit already) */
2982
2983 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
Alyssa Rosenzweig1e2cb3e2019-04-07 16:37:28 +00002984
Alyssa Rosenzweigbb483a92019-07-10 11:30:00 -07002985 if (ctx->stage == MESA_SHADER_VERTEX) {
Alyssa Rosenzweig1e2cb3e2019-04-07 16:37:28 +00002986 NIR_PASS_V(nir, nir_lower_viewport_transform);
Alyssa Rosenzweig20237162019-08-26 12:14:11 -07002987 NIR_PASS_V(nir, nir_lower_point_size, 1.0, 1024.0);
Alyssa Rosenzweigbb483a92019-07-10 11:30:00 -07002988 }
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00002989
2990 NIR_PASS_V(nir, nir_lower_var_copies);
2991 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
2992 NIR_PASS_V(nir, nir_split_var_copies);
2993 NIR_PASS_V(nir, nir_lower_var_copies);
2994 NIR_PASS_V(nir, nir_lower_global_vars_to_local);
2995 NIR_PASS_V(nir, nir_lower_var_copies);
2996 NIR_PASS_V(nir, nir_lower_vars_to_ssa);
Alyssa Rosenzweig7e8de5a2019-04-03 01:48:09 +00002997
Boris Brezillon0a74a042020-10-08 10:09:56 +02002998 unsigned pan_quirks = panfrost_get_quirks(inputs->gpu_id);
Icecream951e1eee92020-07-06 19:30:37 +12002999 NIR_PASS_V(nir, pan_lower_framebuffer,
Boris Brezillon0a74a042020-10-08 10:09:56 +02003000 inputs->rt_formats, inputs->is_blend, pan_quirks);
Icecream951e1eee92020-07-06 19:30:37 +12003001
Jason Ekstrandb019b222020-06-10 17:54:25 -05003002 NIR_PASS_V(nir, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
3003 glsl_type_size, 0);
Alyssa Rosenzweig31489372019-11-05 08:59:49 -05003004 NIR_PASS_V(nir, nir_lower_ssbo);
Icecream95d37e9012020-06-06 17:25:08 +12003005 NIR_PASS_V(nir, midgard_nir_lower_zs_store);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00003006
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00003007 /* Optimisation passes */
3008
Boris Brezillon0a74a042020-10-08 10:09:56 +02003009 optimise_nir(nir, ctx->quirks, inputs->is_blend);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00003010
Icecream950ff62632020-07-06 23:52:40 +12003011 NIR_PASS_V(nir, midgard_nir_reorder_writeout);
3012
Icecream95756441b2020-09-26 12:19:14 +12003013 if ((midgard_debug & MIDGARD_DBG_SHADERS) && !nir->info.internal) {
Alyssa Rosenzweige4bd6fb2019-07-10 10:00:50 -07003014 nir_print_shader(nir, stdout);
3015 }
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00003016
Alyssa Rosenzweig7e8de5a2019-04-03 01:48:09 +00003017 /* Assign sysvals and counts, now that we're sure
3018 * (post-optimisation) */
3019
Alyssa Rosenzweig680fb052020-08-18 08:31:42 -04003020 panfrost_nir_assign_sysvals(&ctx->sysvals, ctx, nir);
Alyssa Rosenzweigc2ff3bb2020-03-10 16:00:56 -04003021 program->sysval_count = ctx->sysvals.sysval_count;
3022 memcpy(program->sysvals, ctx->sysvals.sysvals, sizeof(ctx->sysvals.sysvals[0]) * ctx->sysvals.sysval_count);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00003023
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00003024 nir_foreach_function(func, nir) {
3025 if (!func->impl)
3026 continue;
3027
3028 list_inithead(&ctx->blocks);
3029 ctx->block_count = 0;
3030 ctx->func = func;
Alyssa Rosenzweig22bb5a92020-04-29 18:08:26 -04003031 ctx->already_emitted = calloc(BITSET_WORDS(func->impl->ssa_alloc), sizeof(BITSET_WORD));
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00003032
Boris Brezillon0a74a042020-10-08 10:09:56 +02003033 if (nir->info.outputs_read && !inputs->is_blend) {
Icecream95ed4d2732020-07-08 13:15:09 +12003034 emit_block_init(ctx);
3035
3036 struct midgard_instruction wait = v_branch(false, false);
3037 wait.branch.target_type = TARGET_TILEBUF_WAIT;
3038
3039 emit_mir_instruction(ctx, wait);
3040
3041 ++ctx->instruction_count;
3042 }
3043
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00003044 emit_cf_list(ctx, &func->impl->body);
Alyssa Rosenzweig22bb5a92020-04-29 18:08:26 -04003045 free(ctx->already_emitted);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00003046 break; /* TODO: Multi-function shaders */
3047 }
3048
3049 util_dynarray_init(compiled, NULL);
3050
Alyssa Rosenzweigcc2ba8e2019-08-30 10:53:13 -07003051 /* Per-block lowering before opts */
3052
Alyssa Rosenzweig5aaaf7b2020-03-11 08:36:31 -04003053 mir_foreach_block(ctx, _block) {
3054 midgard_block *block = (midgard_block *) _block;
Alyssa Rosenzweigcc2ba8e2019-08-30 10:53:13 -07003055 inline_alu_constants(ctx, block);
Alyssa Rosenzweigcc2ba8e2019-08-30 10:53:13 -07003056 embedded_to_inline_constant(ctx, block);
3057 }
Alyssa Rosenzweig4d995e02019-04-22 04:58:53 +00003058 /* MIR-level optimizations */
Alyssa Rosenzweig84f09ff2019-04-21 16:11:11 +00003059
Alyssa Rosenzweig4d995e02019-04-22 04:58:53 +00003060 bool progress = false;
3061
3062 do {
3063 progress = false;
Alyssa Rosenzweigfc06b8b2020-05-06 17:34:09 -04003064 progress |= midgard_opt_dead_code_eliminate(ctx);
Alyssa Rosenzweig4d995e02019-04-22 04:58:53 +00003065
Alyssa Rosenzweig5aaaf7b2020-03-11 08:36:31 -04003066 mir_foreach_block(ctx, _block) {
3067 midgard_block *block = (midgard_block *) _block;
Alyssa Rosenzweig4d995e02019-04-22 04:58:53 +00003068 progress |= midgard_opt_copy_prop(ctx, block);
Alyssa Rosenzweig9ce75822019-07-24 15:37:24 -07003069 progress |= midgard_opt_combine_projection(ctx, block);
3070 progress |= midgard_opt_varying_projection(ctx, block);
Alyssa Rosenzweig4d995e02019-04-22 04:58:53 +00003071 }
3072 } while (progress);
Alyssa Rosenzweig84f09ff2019-04-21 16:11:11 +00003073
Alyssa Rosenzweig5aaaf7b2020-03-11 08:36:31 -04003074 mir_foreach_block(ctx, _block) {
3075 midgard_block *block = (midgard_block *) _block;
Alyssa Rosenzweig8f887322019-07-29 15:11:12 -07003076 midgard_lower_derivatives(ctx, block);
Alyssa Rosenzweig622e3a82020-06-02 12:15:18 -04003077 midgard_legalize_invert(ctx, block);
Alyssa Rosenzweig1c2d4692020-04-30 13:13:24 -04003078 midgard_cull_dead_branch(ctx, block);
Alyssa Rosenzweigae20bee2019-06-06 11:19:13 -07003079 }
3080
Alyssa Rosenzweig5bc62af2020-01-02 12:27:59 -05003081 if (ctx->stage == MESA_SHADER_FRAGMENT)
3082 mir_add_writeout_loops(ctx);
3083
Alyssa Rosenzweig9a7f0e22020-05-12 13:26:32 -04003084 /* Analyze now that the code is known but before scheduling creates
3085 * pipeline registers which are harder to track */
3086 mir_analyze_helper_terminate(ctx);
3087 mir_analyze_helper_requirements(ctx);
3088
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00003089 /* Schedule! */
Robert Foss62adb652020-01-15 01:14:16 +01003090 midgard_schedule_program(ctx);
Alyssa Rosenzweig9dc3b182019-12-06 09:32:38 -05003091 mir_ra(ctx);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00003092
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00003093 /* Emit flat binary from the instruction arrays. Iterate each block in
3094 * sequence. Save instruction boundaries such that lookahead tags can
3095 * be assigned easily */
3096
3097 /* Cache _all_ bundles in source order for lookahead across failed branches */
3098
3099 int bundle_count = 0;
Alyssa Rosenzweig5aaaf7b2020-03-11 08:36:31 -04003100 mir_foreach_block(ctx, _block) {
3101 midgard_block *block = (midgard_block *) _block;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00003102 bundle_count += block->bundles.size / sizeof(midgard_bundle);
3103 }
3104 midgard_bundle **source_order_bundles = malloc(sizeof(midgard_bundle *) * bundle_count);
3105 int bundle_idx = 0;
Alyssa Rosenzweig5aaaf7b2020-03-11 08:36:31 -04003106 mir_foreach_block(ctx, _block) {
3107 midgard_block *block = (midgard_block *) _block;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00003108 util_dynarray_foreach(&block->bundles, midgard_bundle, bundle) {
3109 source_order_bundles[bundle_idx++] = bundle;
3110 }
3111 }
3112
3113 int current_bundle = 0;
3114
Alyssa Rosenzweig2a79afc2019-05-23 01:56:03 +00003115 /* Midgard prefetches instruction types, so during emission we
3116 * need to lookahead. Unless this is the last instruction, in
Alyssa Rosenzweig5bc62af2020-01-02 12:27:59 -05003117 * which we return 1. */
Alyssa Rosenzweig2a79afc2019-05-23 01:56:03 +00003118
Alyssa Rosenzweig5aaaf7b2020-03-11 08:36:31 -04003119 mir_foreach_block(ctx, _block) {
3120 midgard_block *block = (midgard_block *) _block;
Alyssa Rosenzweigd3ad8d62019-06-06 11:19:44 -07003121 mir_foreach_bundle_in_block(block, bundle) {
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00003122 int lookahead = 1;
3123
Alyssa Rosenzweig5bc62af2020-01-02 12:27:59 -05003124 if (!bundle->last_writeout && (current_bundle + 1 < bundle_count))
3125 lookahead = source_order_bundles[current_bundle + 1]->tag;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00003126
Alyssa Rosenzweig30a393f2020-05-21 19:14:23 -04003127 emit_binary_bundle(ctx, block, bundle, compiled, lookahead);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00003128 ++current_bundle;
3129 }
3130
3131 /* TODO: Free deeper */
3132 //util_dynarray_fini(&block->instructions);
3133 }
3134
3135 free(source_order_bundles);
3136
Alyssa Rosenzweig5e55c112019-02-17 03:35:03 +00003137 /* Report the very first tag executed */
3138 program->first_tag = midgard_get_first_tag_from_block(ctx, 0);
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00003139
3140 /* Deal with off-by-one related to the fencepost problem */
3141 program->work_register_count = ctx->work_registers + 1;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00003142 program->uniform_cutoff = ctx->uniform_cutoff;
3143
3144 program->blend_patch_offset = ctx->blend_constant_offset;
Alyssa Rosenzweigf0d00612019-07-19 16:23:52 -07003145 program->tls_size = ctx->tls_size;
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00003146
Boris Brezillon0a74a042020-10-08 10:09:56 +02003147 if ((midgard_debug & MIDGARD_DBG_SHADERS) && !nir->info.internal) {
3148 disassemble_midgard(stdout,
3149 program->compiled.data,
3150 program->compiled.size,
3151 inputs->gpu_id, ctx->stage);
3152 }
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00003153
Boris Brezillon0a74a042020-10-08 10:09:56 +02003154 if ((midgard_debug & MIDGARD_DBG_SHADERDB || inputs->shaderdb) &&
3155 !nir->info.internal) {
Alyssa Rosenzweig19bceb52019-08-30 13:57:20 -07003156 unsigned nr_bundles = 0, nr_ins = 0;
Alyssa Rosenzweig138e40d2019-07-08 16:42:29 -07003157
3158 /* Count instructions and bundles */
3159
Alyssa Rosenzweig5aaaf7b2020-03-11 08:36:31 -04003160 mir_foreach_block(ctx, _block) {
3161 midgard_block *block = (midgard_block *) _block;
Alyssa Rosenzweig138e40d2019-07-08 16:42:29 -07003162 nr_bundles += util_dynarray_num_elements(
Alyssa Rosenzweige4bd6fb2019-07-10 10:00:50 -07003163 &block->bundles, midgard_bundle);
Alyssa Rosenzweig2d739f62019-07-09 11:16:57 -07003164
Alyssa Rosenzweig67909c82019-08-30 13:08:16 -07003165 mir_foreach_bundle_in_block(block, bun)
3166 nr_ins += bun->instruction_count;
Alyssa Rosenzweig138e40d2019-07-08 16:42:29 -07003167 }
3168
3169 /* Calculate thread count. There are certain cutoffs by
3170 * register count for thread count */
3171
3172 unsigned nr_registers = program->work_register_count;
3173
3174 unsigned nr_threads =
3175 (nr_registers <= 4) ? 4 :
3176 (nr_registers <= 8) ? 2 :
Alyssa Rosenzweige4bd6fb2019-07-10 10:00:50 -07003177 1;
Alyssa Rosenzweig138e40d2019-07-08 16:42:29 -07003178
3179 /* Dump stats */
3180
3181 fprintf(stderr, "shader%d - %s shader: "
Alyssa Rosenzweige4bd6fb2019-07-10 10:00:50 -07003182 "%u inst, %u bundles, %u quadwords, "
Alyssa Rosenzweige8dca7e2019-07-22 06:32:48 -07003183 "%u registers, %u threads, %u loops, "
Alyssa Rosenzweig1a4153b2019-08-30 17:29:17 -07003184 "%u:%u spills:fills\n",
Alyssa Rosenzweige4bd6fb2019-07-10 10:00:50 -07003185 SHADER_DB_COUNT++,
Alyssa Rosenzweig014d2e42020-05-25 13:19:43 -04003186 ctx->is_blend ? "PAN_SHADER_BLEND" :
Alyssa Rosenzweige4bd6fb2019-07-10 10:00:50 -07003187 gl_shader_stage_name(ctx->stage),
Alyssa Rosenzweig19bceb52019-08-30 13:57:20 -07003188 nr_ins, nr_bundles, ctx->quadword_count,
Alyssa Rosenzweige4bd6fb2019-07-10 10:00:50 -07003189 nr_registers, nr_threads,
Alyssa Rosenzweige8dca7e2019-07-22 06:32:48 -07003190 ctx->loop_count,
3191 ctx->spills, ctx->fills);
Alyssa Rosenzweig138e40d2019-07-08 16:42:29 -07003192 }
3193
Alyssa Rosenzweig4fa09322019-08-15 08:10:46 -07003194 ralloc_free(ctx);
Alyssa Rosenzweig138e40d2019-07-08 16:42:29 -07003195
Alyssa Rosenzweige67e0722019-01-30 01:11:31 +00003196 return 0;
3197}