R600/SI: Prefer selecting more e64 instruction forms.
Add some more tests to make sure better operand
choices are still made. Leave some cases that seem
to have no reason to ever be e64 alone.
llvm-svn: 217789
diff --git a/llvm/test/CodeGen/R600/trunc.ll b/llvm/test/CodeGen/R600/trunc.ll
index 2f4b482..d3b191d 100644
--- a/llvm/test/CodeGen/R600/trunc.ll
+++ b/llvm/test/CodeGen/R600/trunc.ll
@@ -46,9 +46,20 @@
}
; SI-LABEL: @trunc_i32_to_i1:
-; SI: V_AND_B32
+; SI: V_AND_B32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
; SI: V_CMP_EQ_I32
-define void @trunc_i32_to_i1(i32 addrspace(1)* %out, i32 %a) {
+define void @trunc_i32_to_i1(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) {
+ %a = load i32 addrspace(1)* %ptr, align 4
+ %trunc = trunc i32 %a to i1
+ %result = select i1 %trunc, i32 1, i32 0
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @sgpr_trunc_i32_to_i1:
+; SI: V_AND_B32_e64 v{{[0-9]+}}, 1, s{{[0-9]+}}
+; SI: V_CMP_EQ_I32
+define void @sgpr_trunc_i32_to_i1(i32 addrspace(1)* %out, i32 %a) {
%trunc = trunc i32 %a to i1
%result = select i1 %trunc, i32 1, i32 0
store i32 %result, i32 addrspace(1)* %out, align 4