- Start moving target-dependent nodes that could be represented by an
  instruction sequence and cannot ordinarily be simplified by DAGcombine
  into the various target description files or SPUDAGToDAGISel.cpp.

  This makes some 64-bit operations legal.

- Eliminate target-dependent ISD enums.

- Update tests.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@61508 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/CellSPU/shift_ops.ll b/test/CodeGen/CellSPU/shift_ops.ll
index b6629ca..5b60dc1 100644
--- a/test/CodeGen/CellSPU/shift_ops.ll
+++ b/test/CodeGen/CellSPU/shift_ops.ll
@@ -1,10 +1,21 @@
 ; RUN: llvm-as -o - %s | llc -march=cellspu > %t1.s
-; RUN: grep shlh   %t1.s | count 84
-; RUN: grep shlhi  %t1.s | count 51
-; RUN: grep shl    %t1.s | count 168
-; RUN: grep shli   %t1.s | count 51
-; RUN: grep xshw   %t1.s | count 5
-; RUN: grep and    %t1.s | count 5
+; RUN: grep -w shlh      %t1.s | count 9
+; RUN: grep -w shlhi     %t1.s | count 3
+; RUN: grep -w shl       %t1.s | count 9
+; RUN: grep -w shli      %t1.s | count 3
+; RUN: grep -w xshw      %t1.s | count 5
+; RUN: grep -w and       %t1.s | count 5
+; RUN: grep -w andi      %t1.s | count 2
+; RUN: grep -w rotmi     %t1.s | count 2
+; RUN: grep -w rotqmbyi  %t1.s | count 1
+; RUN: grep -w rotqmbii  %t1.s | count 2
+; RUN: grep -w rotqmby   %t1.s | count 1
+; RUN: grep -w rotqmbi   %t1.s | count 1
+; RUN: grep -w rotqbyi   %t1.s | count 1
+; RUN: grep -w rotqbii   %t1.s | count 2
+; RUN: grep -w rotqbybi  %t1.s | count 1
+; RUN: grep -w sfi       %t1.s | count 3
+
 target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-i1:8:128-a0:0:128-v128:128:128-s0:128:128"
 target triple = "spu"
 
@@ -210,3 +221,57 @@
         %A = shl i32 0, %arg1
         ret i32 %A
 }
+
+;; i64 shift left
+
+define i64 @shl_i64_1(i64 %arg1) {
+	%A = shl i64 %arg1, 9
+	ret i64 %A
+}
+
+define i64 @shl_i64_2(i64 %arg1) {
+	%A = shl i64 %arg1, 3
+	ret i64 %A
+}
+
+define i64 @shl_i64_3(i64 %arg1, i32 %shift) {
+	%1 = zext i32 %shift to i64
+	%2 = shl i64 %arg1, %1
+	ret i64 %2
+}
+
+;; i64 shift right logical (shift 0s from the right)
+
+define i64 @lshr_i64_1(i64 %arg1) {
+	%1 = lshr i64 %arg1, 9
+	ret i64 %1
+}
+
+define i64 @lshr_i64_2(i64 %arg1) {
+	%1 = lshr i64 %arg1, 3
+	ret i64 %1
+}
+
+define i64 @lshr_i64_3(i64 %arg1, i32 %shift) {
+	%1 = zext i32 %shift to i64
+	%2 = lshr i64 %arg1, %1
+	ret i64 %2
+}
+
+;; i64 shift right arithmetic (shift 1s from the right)
+
+define i64 @ashr_i64_1(i64 %arg) {
+	%1 = ashr i64 %arg, 9
+	ret i64 %1
+}
+
+define i64 @ashr_i64_2(i64 %arg) {
+	%1 = ashr i64 %arg, 3
+	ret i64 %1
+}
+
+define i64 @ashr_i64_3(i64 %arg1, i32 %shift) {
+	%1 = zext i32 %shift to i64
+	%2 = ashr i64 %arg1, %1
+	ret i64 %2
+}