[AMDGPU] Fixed incorrect uniform branch condition
Summary:
I had a case where multiple nested uniform ifs resulted in code that did
v_cmp comparisons, combining the results with s_and_b64, s_or_b64 and
s_xor_b64 and using the resulting mask in s_cbranch_vccnz, without first
ensuring that bits for inactive lanes were clear.
There was already code for inserting an "s_and_b64 vcc, exec, vcc" to
clear bits for inactive lanes in the case that the branch is instruction
selected as s_cbranch_scc1 and is then changed to s_cbranch_vccnz in
SIFixSGPRCopies. I have added the same code into SILowerControlFlow for
the case that the branch is instruction selected as s_cbranch_vccnz.
This de-optimizes the code in some cases where the s_and is not needed,
because vcc is the result of a v_cmp, or multiple v_cmp instructions
combined by s_and/s_or. We should add a pass to re-optimize those cases.
Reviewers: arsenm, kzhuravl
Subscribers: wdng, yaxunl, t-tye, llvm-commits, dstuttard, timcorringham, nhaehnle
Differential Revision: https://reviews.llvm.org/D41292
llvm-svn: 322119
diff --git a/llvm/test/CodeGen/AMDGPU/branch-relaxation.ll b/llvm/test/CodeGen/AMDGPU/branch-relaxation.ll
index 023baf1..ba632f9 100644
--- a/llvm/test/CodeGen/AMDGPU/branch-relaxation.ll
+++ b/llvm/test/CodeGen/AMDGPU/branch-relaxation.ll
@@ -100,7 +100,8 @@
; GCN-LABEL: {{^}}uniform_conditional_min_long_forward_vcnd_branch:
; GCN: s_load_dword [[CND:s[0-9]+]]
; GCN-DAG: v_mov_b32_e32 [[V_CND:v[0-9]+]], [[CND]]
-; GCN-DAG: v_cmp_eq_f32_e64 vcc, [[CND]], 0
+; GCN-DAG: v_cmp_eq_f32_e64 [[UNMASKED:s\[[0-9]+:[0-9]+\]]], [[CND]], 0
+; GCN-DAG: s_and_b64 vcc, exec, [[UNMASKED]]
; GCN: s_cbranch_vccz [[LONGBB:BB[0-9]+_[0-9]+]]
; GCN-NEXT: [[LONG_JUMP:BB[0-9]+_[0-9]+]]: ; %bb0
@@ -500,8 +501,7 @@
; GCN: s_setpc_b64
; GCN: [[LONG_BR_DEST0]]
-; GCN: v_cmp_ne_u32_e32
-; GCN-NEXT: s_cbranch_vccz
+; GCN: s_cbranch_vccz
; GCN: s_setpc_b64
; GCN: s_endpgm
@@ -520,6 +520,11 @@
br i1 %tmp12, label %bb19, label %bb14
bb13: ; preds = %bb
+ call void asm sideeffect
+ "v_nop_e64
+ v_nop_e64
+ v_nop_e64
+ v_nop_e64", ""() #0
br i1 %tmp6, label %bb19, label %bb14
bb14: ; preds = %bb13, %bb9
diff --git a/llvm/test/CodeGen/AMDGPU/cf-loop-on-constant.ll b/llvm/test/CodeGen/AMDGPU/cf-loop-on-constant.ll
index 1e0af26..1e04544 100644
--- a/llvm/test/CodeGen/AMDGPU/cf-loop-on-constant.ll
+++ b/llvm/test/CodeGen/AMDGPU/cf-loop-on-constant.ll
@@ -95,7 +95,7 @@
; GCN-LABEL: {{^}}loop_arg_0:
; GCN: v_and_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
-; GCN: v_cmp_eq_u32_e32 vcc, 1,
+; GCN: v_cmp_eq_u32{{[^,]*}}, 1,
; GCN: [[LOOPBB:BB[0-9]+_[0-9]+]]
; GCN: s_add_i32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80
diff --git a/llvm/test/CodeGen/AMDGPU/nested-loop-conditions.ll b/llvm/test/CodeGen/AMDGPU/nested-loop-conditions.ll
index 96d2841..ce2e868 100644
--- a/llvm/test/CodeGen/AMDGPU/nested-loop-conditions.ll
+++ b/llvm/test/CodeGen/AMDGPU/nested-loop-conditions.ll
@@ -63,8 +63,7 @@
; GCN-NEXT: s_cbranch_scc1
; FIXME: Should fold to unconditional branch?
-; GCN: s_mov_b64 vcc, -1
-; GCN-NEXT: ; implicit-def
+; GCN: ; implicit-def
; GCN: s_cbranch_vccz
; GCN: ds_read_b32
diff --git a/llvm/test/CodeGen/AMDGPU/scalar-branch-missing-and-exec.ll b/llvm/test/CodeGen/AMDGPU/scalar-branch-missing-and-exec.ll
new file mode 100644
index 0000000..70ee24f
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/scalar-branch-missing-and-exec.ll
@@ -0,0 +1,54 @@
+; RUN: llc -march=amdgcn -mcpu=gfx600 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=gfx800 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck %s
+
+; This checks for a bug where uniform control flow can result in multiple
+; v_cmp results being combined together with s_and_b64, s_or_b64 and s_xor_b64,
+; using the resulting mask in s_cbranch_vccnz
+; without ensuring that the resulting mask has bits clear for inactive lanes.
+; The problematic case is s_xor_b64, as, unlike the other ops, it can actually
+; set bits for inactive lanes.
+;
+; The check for an s_xor_b64 is just to check that this test tests what it is
+; supposed to test. If the s_xor_b64 disappears due to some other case, it does
+; not necessarily mean that the bug has reappeared.
+;
+; The check for "s_and_b64 vcc, exec, something" checks that the bug is fixed.
+
+; CHECK: {{^}}main:
+; CHECK: s_xor_b64
+; CHECK: s_and_b64 vcc, exec,
+
+define amdgpu_cs void @main(i32 inreg %arg) {
+.entry:
+ %tmp44 = load volatile <2 x float>, <2 x float> addrspace(1)* undef
+ %tmp16 = load volatile float, float addrspace(1)* undef
+ %tmp22 = load volatile float, float addrspace(1)* undef
+ %tmp25 = load volatile float, float addrspace(1)* undef
+ %tmp31 = fcmp olt float %tmp16, 0x3FA99999A0000000
+ br i1 %tmp31, label %bb, label %.exit.thread
+
+bb: ; preds = %.entry
+ %tmp42 = fcmp olt float %tmp25, 0x3FA99999A0000000
+ br i1 %tmp42, label %bb43, label %.exit.thread
+
+bb43:
+ %tmp46 = fcmp olt <2 x float> %tmp44, <float 0x3FA99999A0000000, float 0x3FA99999A0000000>
+ %tmp47 = extractelement <2 x i1> %tmp46, i32 0
+ %tmp48 = extractelement <2 x i1> %tmp46, i32 1
+ %tmp49 = and i1 %tmp47, %tmp48
+ br i1 %tmp49, label %bb50, label %.exit.thread
+
+bb50:
+ %tmp53 = fcmp olt float %tmp22, 0x3FA99999A0000000
+ br i1 %tmp53, label %.exit3.i, label %.exit.thread
+
+.exit3.i:
+ store volatile i32 0, i32 addrspace(1)* undef
+ br label %.exit.thread
+
+.exit.thread:
+ ret void
+}
+
diff --git a/llvm/test/CodeGen/AMDGPU/select-opt.ll b/llvm/test/CodeGen/AMDGPU/select-opt.ll
index d56b952..540eb9c 100644
--- a/llvm/test/CodeGen/AMDGPU/select-opt.ll
+++ b/llvm/test/CodeGen/AMDGPU/select-opt.ll
@@ -134,8 +134,8 @@
}
; GCN-LABEL: {{^}}regression:
-; GCN: v_cmp_neq_f32_e64 vcc
-; GCN: v_cmp_neq_f32_e64 vcc, s{{[0-9]+}}, 0
+; GCN: v_cmp_neq_f32_e64
+; GCN: v_cmp_neq_f32_e64 {{[^,]*}}, s{{[0-9]+}}, 0
; GCN: v_cmp_ne_u32_e32 vcc, 0, v{{[0-9]+}}
define amdgpu_kernel void @regression(float addrspace(1)* %out, float %c0, float %c1) #0 {
diff --git a/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll b/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll
index 9ae36b0..54fa93a 100644
--- a/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll
+++ b/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll
@@ -267,7 +267,7 @@
; CHECK: [[PHIBB]]:
; CHECK: v_cmp_eq_f32_e32 vcc, 0, [[PHIREG]]
-; CHECK-NEXT: s_cbranch_vccz [[ENDBB:BB[0-9]+_[0-9]+]]
+; CHECK: s_cbranch_vccz [[ENDBB:BB[0-9]+_[0-9]+]]
; CHECK: ; %bb10
; CHECK: v_mov_b32_e32 v{{[0-9]+}}, 9
@@ -302,14 +302,14 @@
; CHECK-LABEL: {{^}}no_skip_no_successors:
; CHECK: v_cmp_nge_f32
-; CHECK-NEXT: s_cbranch_vccz [[SKIPKILL:BB[0-9]+_[0-9]+]]
+; CHECK: s_cbranch_vccz [[SKIPKILL:BB[0-9]+_[0-9]+]]
; CHECK: ; %bb6
; CHECK: s_mov_b64 exec, 0
; CHECK: [[SKIPKILL]]:
; CHECK: v_cmp_nge_f32_e32 vcc
-; CHECK-NEXT: %bb.3: ; %bb5
+; CHECK: %bb.3: ; %bb5
; CHECK-NEXT: .Lfunc_end{{[0-9]+}}
define amdgpu_ps void @no_skip_no_successors(float inreg %arg, float inreg %arg1) #0 {
bb:
diff --git a/llvm/test/CodeGen/AMDGPU/smrd-vccz-bug.ll b/llvm/test/CodeGen/AMDGPU/smrd-vccz-bug.ll
index 333113e..0eaa28b 100644
--- a/llvm/test/CodeGen/AMDGPU/smrd-vccz-bug.ll
+++ b/llvm/test/CodeGen/AMDGPU/smrd-vccz-bug.ll
@@ -4,7 +4,7 @@
; GCN-FUNC: {{^}}vccz_workaround:
; GCN: s_load_dword s{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0x0
-; GCN: v_cmp_neq_f32_e64 vcc, s{{[0-9]+}}, 0{{$}}
+; GCN: v_cmp_neq_f32_e64 {{[^,]*}}, s{{[0-9]+}}, 0{{$}}
; VCCZ-BUG: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VCCZ-BUG: s_mov_b64 vcc, vcc
; NOVCCZ-BUG-NOT: s_mov_b64 vcc, vcc
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll b/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll
index a247d7a..33a4202 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll
@@ -251,7 +251,7 @@
; GCN: s_load_dword [[COND:s[0-9]+]]
; GCN: s_cmp_lt_i32 [[COND]], 1
; GCN: s_cbranch_scc1 [[EXIT:[A-Za-z0-9_]+]]
-; GCN: v_cmp_gt_i32_e64 vcc, [[COND]], 0{{$}}
+; GCN: v_cmp_gt_i32_e64 {{[^,]*}}, [[COND]], 0{{$}}
; GCN: s_cbranch_vccz [[BODY:[A-Za-z0-9_]+]]
; GCN: {{^}}[[EXIT]]:
; GCN: s_endpgm