CellSPU:
(a) Slight rethink on i64 zero/sign/any extend code - use a shuffle to
    directly zero-extend i32 to i64, but use rotates and shifts for
    sign extension. Also ensure unified register consistency.
(b) Add new test harness for i64 operations: i64ops.ll


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@59970 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/CellSPU/i64ops.ll b/test/CodeGen/CellSPU/i64ops.ll
new file mode 100644
index 0000000..5e7897b
--- /dev/null
+++ b/test/CodeGen/CellSPU/i64ops.ll
@@ -0,0 +1,27 @@
+; RUN: llvm-as -o - %s | llc -march=cellspu > %t1.s
+; RUN: grep {fsmbi.*61680}   %t1.s | count 1
+; RUN: grep rotqmbyi         %t1.s | count 1
+; RUN: grep rotmai           %t1.s | count 1
+; RUN: grep selb             %t1.s | count 1
+; RUN: grep shufb            %t1.s | count 2
+; RUN: grep cg               %t1.s | count 1
+; RUN: grep addx             %t1.s | count 1
+
+; ModuleID = 'stores.bc'
+target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-i1:8:128-a0:0:128-v128:128:128-s0:128:128"
+target triple = "spu"
+
+define i64 @sext_i64_i32(i32 %a) nounwind {
+  %1 = sext i32 %a to i64
+  ret i64 %1
+}
+
+define i64 @zext_i64_i32(i32 %a) nounwind {
+  %1 = zext i32 %a to i64
+  ret i64 %1
+}
+
+define i64 @add_i64(i64 %a, i64 %b) nounwind {
+  %1 = add i64 %a, %b
+  ret i64 %1
+}