- Remove Tilmann's custom truncate lowering: it completely hosed over
  DAGcombine's ability to find reasons to remove truncates when they were not
  needed. Consequently, the CellSPU backend would produce correct, but _really
  slow and horrible_, code.

  Replaced with instruction sequences that do the equivalent truncation in
  SPUInstrInfo.td.

- Re-examine how unaligned loads and stores work. Generated unaligned
  load code has been tested on the CellSPU hardware; see the i32operations.c
  and i64operations.c in CodeGen/CellSPU/useful-harnesses.  (While they may be
  toy test code, it does prove that some real world code does compile
  correctly.)

- Fix truncating stores in bug 3193 (note: unpack_df.ll will still make llc
  fault because i64 ult is not yet implemented.)

- Added i64 eq and neq for setcc and select/setcc; started new instruction
  information file for them in SPU64InstrInfo.td. Additional i64 operations
  should be added to this file and not to SPUInstrInfo.td.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@61447 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/CellSPU/trunc.ll b/test/CodeGen/CellSPU/trunc.ll
index 845feed..1c6e1f6 100644
--- a/test/CodeGen/CellSPU/trunc.ll
+++ b/test/CodeGen/CellSPU/trunc.ll
@@ -1,16 +1,12 @@
 ; RUN: llvm-as -o - %s | llc -march=cellspu > %t1.s
-; RUN: grep shufb   %t1.s | count 9
+; RUN: grep shufb   %t1.s | count 10
 ; RUN: grep {ilhu.*1799}  %t1.s | count 1
-; RUN: grep {ilhu.*771}  %t1.s | count 3
+; RUN: grep {ilhu.*771}  %t1.s | count 1
 ; RUN: grep {ilhu.*1543}  %t1.s | count 1
 ; RUN: grep {ilhu.*1029}  %t1.s | count 1
-; RUN: grep {ilhu.*515}  %t1.s | count 1
-; RUN: grep {iohl.*1799}  %t1.s | count 1
-; RUN: grep {iohl.*771}  %t1.s | count 3
-; RUN: grep {iohl.*1543}  %t1.s | count 2
-; RUN: grep {iohl.*515}  %t1.s | count 1
-; RUN: grep xsbh  %t1.s | count 6
-; RUN: grep sfh  %t1.s | count 5
+; RUN: grep {ilhu.*515}  %t1.s | count 2
+; RUN: grep xsbh  %t1.s | count 2
+; RUN: grep sfh  %t1.s | count 1
 
 ; ModuleID = 'trunc.bc'
 target datalayout = "E-p:32:32:128-i1:8:128-i8:8:128-i16:16:128-i32:32:128-i64:32:128-f32:32:128-f64:64:128-v64:64:64-v128:128:128-a0:0:128-s0:128:128"
@@ -41,23 +37,22 @@
 ;	ret i64 %0
 ;}
 
-define i8 @trunc_i64_i8(i64 %u, i8 %v) nounwind readnone {
+define <16 x i8> @trunc_i64_i8(i64 %u, <16 x i8> %v) nounwind readnone {
 entry:
 	%0 = trunc i64 %u to i8
-	%1 = sub i8 %0, %v
-	ret i8 %1
+        %tmp1 = insertelement <16 x i8> %v, i8 %0, i32 10
+	ret <16 x i8> %tmp1
 }
-define i16 @trunc_i64_i16(i64 %u, i16 %v) nounwind readnone {
+define <8 x i16> @trunc_i64_i16(i64 %u, <8 x i16> %v) nounwind readnone {
 entry:
 	%0 = trunc i64 %u to i16
-        %1 = sub i16 %0, %v
-	ret i16 %1
+        %tmp1 = insertelement <8 x i16> %v, i16 %0, i32 6
+	ret <8 x i16> %tmp1
 }
 define i32 @trunc_i64_i32(i64 %u, i32 %v) nounwind readnone {
 entry:
 	%0 = trunc i64 %u to i32
-	%1 = sub i32 %0, %v
-	ret i32 %1
+	ret i32 %0
 }
 
 define i8 @trunc_i32_i8(i32 %u, i8 %v) nounwind readnone {
@@ -66,16 +61,16 @@
 	%1 = sub i8 %0, %v
 	ret i8 %1
 }
-define i16 @trunc_i32_i16(i32 %u, i16 %v) nounwind readnone {
+define <8 x i16> @trunc_i32_i16(i32 %u, <8 x i16> %v) nounwind readnone {
 entry:
 	%0 = trunc i32 %u to i16
-	%1 = sub i16 %0, %v
-	ret i16 %1
+        %tmp1 = insertelement <8 x i16> %v, i16 %0, i32 3
+	ret <8 x i16> %tmp1
 }
 
-define i8 @trunc_i16_i8(i16 %u, i8 %v) nounwind readnone {
+define <16 x i8> @trunc_i16_i8(i16 %u, <16 x i8> %v) nounwind readnone {
 entry:
 	%0 = trunc i16 %u to i8
-	%1 = sub i8 %0, %v
-	ret i8 %1
+        %tmp1 = insertelement <16 x i8> %v, i8 %0, i32 5
+	ret <16 x i8> %tmp1
 }