- Start moving target-dependent nodes that could be represented by an
  instruction sequence and cannot ordinarily be simplified by DAGcombine
  into the various target description files or SPUDAGToDAGISel.cpp.

  This makes some 64-bit operations legal.

- Eliminate target-dependent ISD enums.

- Update tests.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@61508 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/CellSPU/fdiv.ll b/test/CodeGen/CellSPU/fdiv.ll
index 826a2fa..d121c3f 100644
--- a/test/CodeGen/CellSPU/fdiv.ll
+++ b/test/CodeGen/CellSPU/fdiv.ll
@@ -1,9 +1,11 @@
 ; RUN: llvm-as -o - %s | llc -march=cellspu > %t1.s
 ; RUN: grep frest    %t1.s | count 2 
 ; RUN: grep -w fi    %t1.s | count 2 
-; RUN: grep fm       %t1.s | count 4 
+; RUN: grep -w fm    %t1.s | count 2
 ; RUN: grep fma      %t1.s | count 2 
-; RUN: grep fnms     %t1.s | count 2
+; RUN: grep fnms     %t1.s | count 4
+; RUN: grep cgti     %t1.s | count 2
+; RUN: grep selb     %t1.s | count 2
 ;
 ; This file includes standard floating point arithmetic instructions
 target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-i1:8:128-a0:0:128-v128:128:128-s0:128:128"
diff --git a/test/CodeGen/CellSPU/i64ops.ll b/test/CodeGen/CellSPU/i64ops.ll
index 5e7897b..51abd44 100644
--- a/test/CodeGen/CellSPU/i64ops.ll
+++ b/test/CodeGen/CellSPU/i64ops.ll
@@ -1,8 +1,5 @@
 ; RUN: llvm-as -o - %s | llc -march=cellspu > %t1.s
-; RUN: grep {fsmbi.*61680}   %t1.s | count 1
-; RUN: grep rotqmbyi         %t1.s | count 1
-; RUN: grep rotmai           %t1.s | count 1
-; RUN: grep selb             %t1.s | count 1
+; RUN: grep xswd	     %t1.s | count 1
 ; RUN: grep shufb            %t1.s | count 2
 ; RUN: grep cg               %t1.s | count 1
 ; RUN: grep addx             %t1.s | count 1
diff --git a/test/CodeGen/CellSPU/mul_ops.ll b/test/CodeGen/CellSPU/mul_ops.ll
index 843505f..085ce55 100644
--- a/test/CodeGen/CellSPU/mul_ops.ll
+++ b/test/CodeGen/CellSPU/mul_ops.ll
@@ -8,7 +8,7 @@
 ; RUN: grep and     %t1.s | count 2
 ; RUN: grep selb    %t1.s | count 6
 ; RUN: grep fsmbi   %t1.s | count 4
-; RUN: grep shli    %t1.s | count 2
+; RUN: grep shli    %t1.s | count 4
 ; RUN: grep shlhi   %t1.s | count 4
 ; RUN: grep ila     %t1.s | count 2
 ; RUN: grep xsbh    %t1.s | count 4
diff --git a/test/CodeGen/CellSPU/shift_ops.ll b/test/CodeGen/CellSPU/shift_ops.ll
index b6629ca..5b60dc1 100644
--- a/test/CodeGen/CellSPU/shift_ops.ll
+++ b/test/CodeGen/CellSPU/shift_ops.ll
@@ -1,10 +1,21 @@
 ; RUN: llvm-as -o - %s | llc -march=cellspu > %t1.s
-; RUN: grep shlh   %t1.s | count 84
-; RUN: grep shlhi  %t1.s | count 51
-; RUN: grep shl    %t1.s | count 168
-; RUN: grep shli   %t1.s | count 51
-; RUN: grep xshw   %t1.s | count 5
-; RUN: grep and    %t1.s | count 5
+; RUN: grep -w shlh      %t1.s | count 9
+; RUN: grep -w shlhi     %t1.s | count 3
+; RUN: grep -w shl       %t1.s | count 9
+; RUN: grep -w shli      %t1.s | count 3
+; RUN: grep -w xshw      %t1.s | count 5
+; RUN: grep -w and       %t1.s | count 5
+; RUN: grep -w andi      %t1.s | count 2
+; RUN: grep -w rotmi     %t1.s | count 2
+; RUN: grep -w rotqmbyi  %t1.s | count 1
+; RUN: grep -w rotqmbii  %t1.s | count 2
+; RUN: grep -w rotqmby   %t1.s | count 1
+; RUN: grep -w rotqmbi   %t1.s | count 1
+; RUN: grep -w rotqbyi   %t1.s | count 1
+; RUN: grep -w rotqbii   %t1.s | count 2
+; RUN: grep -w rotqbybi  %t1.s | count 1
+; RUN: grep -w sfi       %t1.s | count 3
+
 target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-i1:8:128-a0:0:128-v128:128:128-s0:128:128"
 target triple = "spu"
 
@@ -210,3 +221,57 @@
         %A = shl i32 0, %arg1
         ret i32 %A
 }
+
+;; i64 shift left
+
+define i64 @shl_i64_1(i64 %arg1) {
+	%A = shl i64 %arg1, 9
+	ret i64 %A
+}
+
+define i64 @shl_i64_2(i64 %arg1) {
+	%A = shl i64 %arg1, 3
+	ret i64 %A
+}
+
+define i64 @shl_i64_3(i64 %arg1, i32 %shift) {
+	%1 = zext i32 %shift to i64
+	%2 = shl i64 %arg1, %1
+	ret i64 %2
+}
+
+;; i64 shift right logical (shift 0s from the right)
+
+define i64 @lshr_i64_1(i64 %arg1) {
+	%1 = lshr i64 %arg1, 9
+	ret i64 %1
+}
+
+define i64 @lshr_i64_2(i64 %arg1) {
+	%1 = lshr i64 %arg1, 3
+	ret i64 %1
+}
+
+define i64 @lshr_i64_3(i64 %arg1, i32 %shift) {
+	%1 = zext i32 %shift to i64
+	%2 = lshr i64 %arg1, %1
+	ret i64 %2
+}
+
+;; i64 shift right arithmetic (shift 1s from the right)
+
+define i64 @ashr_i64_1(i64 %arg) {
+	%1 = ashr i64 %arg, 9
+	ret i64 %1
+}
+
+define i64 @ashr_i64_2(i64 %arg) {
+	%1 = ashr i64 %arg, 3
+	ret i64 %1
+}
+
+define i64 @ashr_i64_3(i64 %arg1, i32 %shift) {
+	%1 = zext i32 %shift to i64
+	%2 = ashr i64 %arg1, %1
+	ret i64 %2
+}
diff --git a/test/CodeGen/CellSPU/useful-harnesses/i64operations.c b/test/CodeGen/CellSPU/useful-harnesses/i64operations.c
index 7b86070..3819797 100644
--- a/test/CodeGen/CellSPU/useful-harnesses/i64operations.c
+++ b/test/CodeGen/CellSPU/useful-harnesses/i64operations.c
@@ -34,19 +34,45 @@
   { "neq", i64_neq, i64_neq_select }
 };
 
+uint64_t i64_shl_const(uint64_t a) {
+  return a << 10;
+}
+
+uint64_t i64_shl(uint64_t a, int amt) {
+  return a << amt;
+}
+
+uint64_t i64_srl_const(uint64_t a) {
+  return a >> 10;
+}
+
+uint64_t i64_srl(uint64_t a, int amt) {
+  return a >> amt;
+}
+
+int64_t i64_sra_const(int64_t a) {
+  return a >> 10;
+}
+
+int64_t i64_sra(int64_t a, int amt) {
+  return a >> amt;
+}
+
 int main(void) {
   int i;
-  int64_t a = 1234567890000LL;
-  int64_t b = 2345678901234LL;
-  int64_t c = 1234567890001LL;
-  int64_t d =         10001LL;
-  int64_t e =         10000LL;
+  int64_t a =  1234567890003LL;
+  int64_t b =  2345678901235LL;
+  int64_t c =  1234567890001LL;
+  int64_t d =          10001LL;
+  int64_t e =          10000LL;
+  int64_t f = -1068103409991LL;
 
   printf("a = %16lld (0x%016llx)\n", a, a);
   printf("b = %16lld (0x%016llx)\n", b, b);
   printf("c = %16lld (0x%016llx)\n", c, c);
   printf("d = %16lld (0x%016llx)\n", d, d);
   printf("e = %16lld (0x%016llx)\n", e, e);
+  printf("f = %16lld (0x%016llx)\n", f, f);
   printf("----------------------------------------\n");
 
   for (i = 0; i < sizeof(preds)/sizeof(preds[0]); ++i) {
@@ -64,5 +90,23 @@
     printf("----------------------------------------\n");
   }
 
+  printf("a                = 0x%016llx\n", a);
+  printf("i64_shl_const(a) = 0x%016llx\n", i64_shl_const(a));
+  printf("i64_shl(a)       = 0x%016llx\n", i64_shl(a, 5));
+  printf("i64_srl_const(a) = 0x%016llx\n", i64_srl_const(a));
+  printf("i64_srl(a)       = 0x%016llx\n", i64_srl(a, 5));
+  printf("i64_sra_const(a) = 0x%016llx\n", i64_sra_const(a));
+  printf("i64_sra(a)       = 0x%016llx\n", i64_sra(a, 5));
+  printf("----------------------------------------\n");
+
+  printf("f                = 0x%016llx\n", f);
+  printf("i64_shl_const(f) = 0x%016llx\n", i64_shl_const(f));
+  printf("i64_shl(f)       = 0x%016llx\n", i64_shl(f, 10));
+  printf("i64_srl_const(f) = 0x%016llx\n", i64_srl_const(f));
+  printf("i64_srl(f)       = 0x%016llx\n", i64_srl(f, 10));
+  printf("i64_sra_const(f) = 0x%016llx\n", i64_sra_const(f));
+  printf("i64_sra(f)       = 0x%016llx\n", i64_sra(f, 10));
+  printf("----------------------------------------\n");
+
   return 0;
 }