R600/SI: Do abs/neg folding with ComplexPatterns

Abs/neg folding has moved out of foldOperands and into the instruction
selection phase using complex patterns.  As a consequence of this
change, we now prefer to select the 64-bit encoding for most
instructions and the modifier operands have been dropped from
integer VOP3 instructions.

llvm-svn: 214467
diff --git a/llvm/test/CodeGen/R600/fabs.ll b/llvm/test/CodeGen/R600/fabs.ll
index b87ce22..fa1b608 100644
--- a/llvm/test/CodeGen/R600/fabs.ll
+++ b/llvm/test/CodeGen/R600/fabs.ll
@@ -50,8 +50,9 @@
 }
 
 ; SI-CHECK-LABEL: @fabs_fold
+; SI-CHECK: S_LOAD_DWORD [[ABS_VALUE:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0xb
 ; SI-CHECK-NOT: V_AND_B32_e32
-; SI-CHECK: V_MUL_F32_e64 v{{[0-9]+}}, s{{[0-9]+}}, |v{{[0-9]+}}|
+; SI-CHECK: V_MUL_F32_e64 v{{[0-9]+}}, |[[ABS_VALUE]]|, v{{[0-9]+}}
 define void @fabs_fold(float addrspace(1)* %out, float %in0, float %in1) {
 entry:
   %0 = call float @fabs(float %in0)
diff --git a/llvm/test/CodeGen/R600/fneg.ll b/llvm/test/CodeGen/R600/fneg.ll
index 4cddc73..5b47817 100644
--- a/llvm/test/CodeGen/R600/fneg.ll
+++ b/llvm/test/CodeGen/R600/fneg.ll
@@ -61,8 +61,9 @@
 }
 
 ; SI-CHECK-LABEL: @fneg_fold
+; SI-CHECK: S_LOAD_DWORD [[NEG_VALUE:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0xb
 ; SI-CHECK-NOT: V_XOR_B32
-; SI-CHECK: V_MUL_F32_e64 v{{[0-9]+}}, s{{[0-9]+}}, -v{{[0-9]+}}
+; SI-CHECK: V_MUL_F32_e64 v{{[0-9]+}}, -[[NEG_VALUE]], v{{[0-9]+}}
 define void @fneg_fold(float addrspace(1)* %out, float %in) {
 entry:
   %0 = fsub float -0.0, %in
diff --git a/llvm/test/CodeGen/R600/fsub.ll b/llvm/test/CodeGen/R600/fsub.ll
index 4f74efb..5fb9ff6 100644
--- a/llvm/test/CodeGen/R600/fsub.ll
+++ b/llvm/test/CodeGen/R600/fsub.ll
@@ -20,8 +20,8 @@
 ; R600-CHECK-DAG: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[3].X, -KC0[3].Z
 ; R600-CHECK-DAG: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].W, -KC0[3].Y
 ; SI-CHECK: @fsub_v2f32
-; SI-CHECK: V_SUB_F32
-; SI-CHECK: V_SUB_F32
+; SI-CHECK: V_SUBREV_F32
+; SI-CHECK: V_SUBREV_F32
 define void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) {
 entry:
   %0 = fsub <2 x float> %a, %b
@@ -35,10 +35,10 @@
 ; R600-CHECK: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
 ; R600-CHECK: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
 ; SI-CHECK: @fsub_v4f32
-; SI-CHECK: V_SUB_F32
-; SI-CHECK: V_SUB_F32
-; SI-CHECK: V_SUB_F32
-; SI-CHECK: V_SUB_F32
+; SI-CHECK: V_SUBREV_F32
+; SI-CHECK: V_SUBREV_F32
+; SI-CHECK: V_SUBREV_F32
+; SI-CHECK: V_SUBREV_F32
 define void @fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
   %b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1
   %a = load <4 x float> addrspace(1) * %in
diff --git a/llvm/test/CodeGen/R600/mul_uint24.ll b/llvm/test/CodeGen/R600/mul_uint24.ll
index 419f275..72bbe0f 100644
--- a/llvm/test/CodeGen/R600/mul_uint24.ll
+++ b/llvm/test/CodeGen/R600/mul_uint24.ll
@@ -23,7 +23,7 @@
 ; EG: BFE_INT {{[* ]*}}T{{[0-9]}}.{{[XYZW]}}, PV.[[MUL_CHAN]], 0.0, literal.x
 ; EG: 16
 ; SI: V_MUL_U32_U24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; SI: V_BFE_I32 v{{[0-9]}}, [[MUL]], 0, 16,
+; SI: V_BFE_I32 v{{[0-9]}}, [[MUL]], 0, 16
 define void @i16_mul24(i32 addrspace(1)* %out, i16 %a, i16 %b) {
 entry:
   %0 = mul i16 %a, %b
@@ -37,7 +37,7 @@
 ; The result must be sign-extended
 ; EG: BFE_INT {{[* ]*}}T{{[0-9]}}.{{[XYZW]}}, PV.[[MUL_CHAN]], 0.0, literal.x
 ; SI: V_MUL_U32_U24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; SI: V_BFE_I32 v{{[0-9]}}, [[MUL]], 0, 8,
+; SI: V_BFE_I32 v{{[0-9]}}, [[MUL]], 0, 8
 
 define void @i8_mul24(i32 addrspace(1)* %out, i8 %a, i8 %b) {
 entry:
diff --git a/llvm/test/CodeGen/R600/vop-shrink.ll b/llvm/test/CodeGen/R600/vop-shrink.ll
index f8bc2b4..bf1aae4 100644
--- a/llvm/test/CodeGen/R600/vop-shrink.ll
+++ b/llvm/test/CodeGen/R600/vop-shrink.ll
@@ -1,9 +1,4 @@
 ; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; XXX: This testis for a bug in the SIShrinkInstruction pass and it will be
-;       relevant once we are selecting 64-bit instructions.  We are
-;       currently selecting mostly 32-bit instruction, so the
-;       SIShrinkInstructions pass isn't doing much.
-; XFAIL: *
 
 ; Test that we correctly commute a sub instruction
 ; FUNC-LABEL: @sub_rev