R600/SI: Remove SIISelLowering::legalizeOperands()

Its functionality has been replaced by calling
SIInstrInfo::legalizeOperands() from
SIISelLowering::AdjstInstrPostInstrSelection() and running the
SIFoldOperands and SIShrinkInstructions passes.

llvm-svn: 225445
diff --git a/llvm/test/CodeGen/R600/fneg.ll b/llvm/test/CodeGen/R600/fneg.ll
index ef912c3..ca3350d 100644
--- a/llvm/test/CodeGen/R600/fneg.ll
+++ b/llvm/test/CodeGen/R600/fneg.ll
@@ -48,7 +48,7 @@
 ; R600: -KC0[2].Z
 
 ; XXX: We could use v_add_f32_e64 with the negate bit here instead.
-; SI: v_sub_f32_e64 v{{[0-9]}}, 0.0, s{{[0-9]+$}}
+; SI: v_sub_f32_e64 v{{[0-9]}}, 0, s{{[0-9]+$}}
 define void @fneg_free_f32(float addrspace(1)* %out, i32 %in) {
   %bc = bitcast i32 %in to float
   %fsub = fsub float 0.0, %bc
diff --git a/llvm/test/CodeGen/R600/imm.ll b/llvm/test/CodeGen/R600/imm.ll
index 416462b..6e4fa3c 100644
--- a/llvm/test/CodeGen/R600/imm.ll
+++ b/llvm/test/CodeGen/R600/imm.ll
@@ -112,7 +112,7 @@
 
 ; CHECK-LABEL: {{^}}add_inline_imm_0.0_f32
 ; CHECK: s_load_dword [[VAL:s[0-9]+]]
-; CHECK: v_add_f32_e64 [[REG:v[0-9]+]], 0.0, [[VAL]]{{$}}
+; CHECK: v_add_f32_e64 [[REG:v[0-9]+]], 0, [[VAL]]{{$}}
 ; CHECK-NEXT: buffer_store_dword [[REG]]
 define void @add_inline_imm_0.0_f32(float addrspace(1)* %out, float %x) {
   %y = fadd float %x, 0.0
@@ -304,7 +304,7 @@
 
 ; CHECK-LABEL: {{^}}add_inline_imm_0.0_f64
 ; CHECK: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
-; CHECK: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], 0.0, [[VAL]]
+; CHECK: v_add_f64 [[REG:v\[[0-9]+:[0-9]+\]]], 0, [[VAL]]
 ; CHECK-NEXT: buffer_store_dwordx2 [[REG]]
 define void @add_inline_imm_0.0_f64(double addrspace(1)* %out, double %x) {
   %y = fadd double %x, 0.0
diff --git a/llvm/test/CodeGen/R600/operand-folding.ll b/llvm/test/CodeGen/R600/operand-folding.ll
index c430c5a..88a8145 100644
--- a/llvm/test/CodeGen/R600/operand-folding.ll
+++ b/llvm/test/CodeGen/R600/operand-folding.ll
@@ -87,6 +87,27 @@
   store i32 %tmp1, i32 addrspace(1)* %out
   ret void
 }
+; CHECK-LABEL: {{^}}vector_imm:
+; CHECK: s_movk_i32 [[IMM:s[0-9]+]], 0x64
+; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}}
+; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}}
+; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}}
+; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}}
+
+define void @vector_imm(<4 x i32> addrspace(1)* %out) {
+entry:
+  %tmp0 = call i32 @llvm.r600.read.tidig.x()
+  %tmp1 = add i32 %tmp0, 1
+  %tmp2 = add i32 %tmp0, 2
+  %tmp3 = add i32 %tmp0, 3
+  %vec0 = insertelement <4 x i32> undef, i32 %tmp0, i32 0
+  %vec1 = insertelement <4 x i32> %vec0, i32 %tmp1, i32 1
+  %vec2 = insertelement <4 x i32> %vec1, i32 %tmp2, i32 2
+  %vec3 = insertelement <4 x i32> %vec2, i32 %tmp3, i32 3
+  %tmp4 = xor <4 x i32> <i32 100, i32 100, i32 100, i32 100>, %vec3
+  store <4 x i32> %tmp4, <4 x i32> addrspace(1)* %out
+  ret void
+}
 
 declare i32 @llvm.r600.read.tidig.x() #0
 attributes #0 = { readnone }
diff --git a/llvm/test/CodeGen/R600/seto.ll b/llvm/test/CodeGen/R600/seto.ll
index 0530cbd..c6265a4 100644
--- a/llvm/test/CodeGen/R600/seto.ll
+++ b/llvm/test/CodeGen/R600/seto.ll
@@ -2,7 +2,7 @@
 
 ; CHECK-LABEL: {{^}}main:
 ; CHECK: v_cmp_o_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[SREG:s[0-9]+]], [[SREG]]
-; CHECK-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0.0, 1.0, [[CMP]]
+; CHECK-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, 1.0, [[CMP]]
 define void @main(float %p) {
 main_body:
   %c = fcmp oeq float %p, %p
diff --git a/llvm/test/CodeGen/R600/setuo.ll b/llvm/test/CodeGen/R600/setuo.ll
index c13c7dc..f211309 100644
--- a/llvm/test/CodeGen/R600/setuo.ll
+++ b/llvm/test/CodeGen/R600/setuo.ll
@@ -2,7 +2,7 @@
 
 ; CHECK-LABEL: {{^}}main:
 ; CHECK: v_cmp_u_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[SREG:s[0-9]+]], [[SREG]]
-; CHECK-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0.0, 1.0, [[CMP]]
+; CHECK-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, 1.0, [[CMP]]
 define void @main(float %p) {
 main_body:
   %c = fcmp une float %p, %p
diff --git a/llvm/test/CodeGen/R600/sint_to_fp.f64.ll b/llvm/test/CodeGen/R600/sint_to_fp.f64.ll
index abce177..77844a6 100644
--- a/llvm/test/CodeGen/R600/sint_to_fp.f64.ll
+++ b/llvm/test/CodeGen/R600/sint_to_fp.f64.ll
@@ -45,9 +45,9 @@
 
 ; SI-LABEL: @v_sint_to_fp_i64_to_f64
 ; SI: buffer_load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
-; SI-DAG: v_cvt_f64_u32_e32 [[LO_CONV:v\[[0-9]+:[0-9]+\]]], v[[LO]]
-; SI-DAG: v_cvt_f64_i32_e32 [[HI_CONV:v\[[0-9]+:[0-9]+\]]], v[[HI]]
+; SI: v_cvt_f64_i32_e32 [[HI_CONV:v\[[0-9]+:[0-9]+\]]], v[[HI]]
 ; SI: v_ldexp_f64 [[LDEXP:v\[[0-9]+:[0-9]+\]]], [[HI_CONV]], 32
+; SI: v_cvt_f64_u32_e32 [[LO_CONV:v\[[0-9]+:[0-9]+\]]], v[[LO]]
 ; SI: v_add_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[LDEXP]], [[LO_CONV]]
 ; SI: buffer_store_dwordx2 [[RESULT]]
 define void @v_sint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 addrspace(1)* %in) {
diff --git a/llvm/test/CodeGen/R600/sint_to_fp.ll b/llvm/test/CodeGen/R600/sint_to_fp.ll
index 06b8df4..dd541bb 100644
--- a/llvm/test/CodeGen/R600/sint_to_fp.ll
+++ b/llvm/test/CodeGen/R600/sint_to_fp.ll
@@ -42,7 +42,7 @@
 
 ; FUNC-LABEL: {{^}}sint_to_fp_i1_f32:
 ; SI: v_cmp_eq_i32_e64 [[CMP:s\[[0-9]+:[0-9]\]]],
-; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0.0, 1.0, [[CMP]]
+; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1.0, [[CMP]]
 ; SI: buffer_store_dword [[RESULT]],
 ; SI: s_endpgm
 define void @sint_to_fp_i1_f32(float addrspace(1)* %out, i32 %in) {
diff --git a/llvm/test/CodeGen/R600/uint_to_fp.f64.ll b/llvm/test/CodeGen/R600/uint_to_fp.f64.ll
index f346833..09e987d 100644
--- a/llvm/test/CodeGen/R600/uint_to_fp.f64.ll
+++ b/llvm/test/CodeGen/R600/uint_to_fp.f64.ll
@@ -4,9 +4,9 @@
 
 ; SI-LABEL: {{^}}v_uint_to_fp_i64_to_f64
 ; SI: buffer_load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
-; SI-DAG: v_cvt_f64_u32_e32 [[LO_CONV:v\[[0-9]+:[0-9]+\]]], v[[LO]]
-; SI-DAG: v_cvt_f64_u32_e32 [[HI_CONV:v\[[0-9]+:[0-9]+\]]], v[[HI]]
+; SI: v_cvt_f64_u32_e32 [[HI_CONV:v\[[0-9]+:[0-9]+\]]], v[[HI]]
 ; SI: v_ldexp_f64 [[LDEXP:v\[[0-9]+:[0-9]+\]]], [[HI_CONV]], 32
+; SI: v_cvt_f64_u32_e32 [[LO_CONV:v\[[0-9]+:[0-9]+\]]], v[[LO]]
 ; SI: v_add_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[LDEXP]], [[LO_CONV]]
 ; SI: buffer_store_dwordx2 [[RESULT]]
 define void @v_uint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 addrspace(1)* %in) {
diff --git a/llvm/test/CodeGen/R600/uint_to_fp.ll b/llvm/test/CodeGen/R600/uint_to_fp.ll
index adb3653..1c5e487 100644
--- a/llvm/test/CodeGen/R600/uint_to_fp.ll
+++ b/llvm/test/CodeGen/R600/uint_to_fp.ll
@@ -60,7 +60,7 @@
 
 ; FUNC-LABEL: {{^}}uint_to_fp_i1_to_f32:
 ; SI: v_cmp_eq_i32_e64 [[CMP:s\[[0-9]+:[0-9]\]]],
-; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0.0, 1.0, [[CMP]]
+; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1.0, [[CMP]]
 ; SI: buffer_store_dword [[RESULT]],
 ; SI: s_endpgm
 define void @uint_to_fp_i1_to_f32(float addrspace(1)* %out, i32 %in) {
diff --git a/llvm/test/CodeGen/R600/xor.ll b/llvm/test/CodeGen/R600/xor.ll
index b444b4e..6ed35f2 100644
--- a/llvm/test/CodeGen/R600/xor.ll
+++ b/llvm/test/CodeGen/R600/xor.ll
@@ -39,7 +39,7 @@
 ; FUNC-LABEL: {{^}}xor_i1:
 ; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], PS}}
 
-; SI-DAG: v_cmp_ge_f32_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], {{v[0-9]+}}, 0.0
+; SI-DAG: v_cmp_ge_f32_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], {{v[0-9]+}}, 0
 ; SI-DAG: v_cmp_ge_f32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]], {{v[0-9]+}}, 1.0
 ; SI: s_xor_b64 [[XOR:s\[[0-9]+:[0-9]+\]]], [[CMP0]], [[CMP1]]
 ; SI: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}, [[XOR]]