CellSPU:
- Fix v2[if]64 vector insertion code before IBM files a bug report.
- Ensure that zero (0) offsets relative to $sp don't trip an assert
  (add $sp, 0 gets legalized to $sp alone, tripping an assert)
- Shuffle masks passed to SPUISD::SHUFB are now v16i8 or v4i32


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@60358 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/CellSPU/vecinsert.ll b/test/CodeGen/CellSPU/vecinsert.ll
index 9864c53..726fe3f 100644
--- a/test/CodeGen/CellSPU/vecinsert.ll
+++ b/test/CodeGen/CellSPU/vecinsert.ll
@@ -1,12 +1,12 @@
 ; RUN: llvm-as -o - %s | llc -march=cellspu > %t1.s
-; RUN: grep cbd     %t1.s | count 3 
-; RUN: grep chd     %t1.s | count 3
-; RUN: grep cwd     %t1.s | count 6
-; RUN: grep il      %t1.s | count 4
-; RUN: grep ilh     %t1.s | count 3
+; RUN: grep cbd     %t1.s | count 5
+; RUN: grep chd     %t1.s | count 5
+; RUN: grep cwd     %t1.s | count 10
+; RUN: grep il      %t1.s | count 15
+; RUN: grep ilh     %t1.s | count 10
 ; RUN: grep iohl    %t1.s | count 1
-; RUN: grep ilhu    %t1.s | count 1
-; RUN: grep shufb   %t1.s | count 12
+; RUN: grep ilhu    %t1.s | count 4
+; RUN: grep shufb   %t1.s | count 26
 ; RUN: grep 17219   %t1.s | count 1 
 ; RUN: grep 22598   %t1.s | count 1
 ; RUN: grep -- -39  %t1.s | count 1
@@ -51,3 +51,70 @@
         %tmp1.2 = insertelement <4 x i32> %tmp1.1, i32 %x, i32 3
         ret <4 x i32> %tmp1.2
 }
+
+define void @variable_v16i8_1(<16 x i8>* %a, i32 %i) nounwind {
+entry:
+	%arrayidx = getelementptr <16 x i8>* %a, i32 %i
+	%tmp2 = load <16 x i8>* %arrayidx
+	%tmp3 = insertelement <16 x i8> %tmp2, i8 1, i32 1
+	%tmp8 = insertelement <16 x i8> %tmp3, i8 2, i32 11
+	store <16 x i8> %tmp8, <16 x i8>* %arrayidx
+	ret void
+}
+
+define void @variable_v8i16_1(<8 x i16>* %a, i32 %i) nounwind {
+entry:
+	%arrayidx = getelementptr <8 x i16>* %a, i32 %i
+	%tmp2 = load <8 x i16>* %arrayidx
+	%tmp3 = insertelement <8 x i16> %tmp2, i16 1, i32 1
+	%tmp8 = insertelement <8 x i16> %tmp3, i16 2, i32 6
+	store <8 x i16> %tmp8, <8 x i16>* %arrayidx
+	ret void
+}
+
+define void @variable_v4i32_1(<4 x i32>* %a, i32 %i) nounwind {
+entry:
+	%arrayidx = getelementptr <4 x i32>* %a, i32 %i
+	%tmp2 = load <4 x i32>* %arrayidx
+	%tmp3 = insertelement <4 x i32> %tmp2, i32 1, i32 1
+	%tmp8 = insertelement <4 x i32> %tmp3, i32 2, i32 2
+	store <4 x i32> %tmp8, <4 x i32>* %arrayidx
+	ret void
+}
+
+define void @variable_v4f32_1(<4 x float>* %a, i32 %i) nounwind {
+entry:
+	%arrayidx = getelementptr <4 x float>* %a, i32 %i
+	%tmp2 = load <4 x float>* %arrayidx
+	%tmp3 = insertelement <4 x float> %tmp2, float 1.000000e+00, i32 1
+	%tmp8 = insertelement <4 x float> %tmp3, float 2.000000e+00, i32 2
+	store <4 x float> %tmp8, <4 x float>* %arrayidx
+	ret void
+}
+
+define void @variable_v2i64_1(<2 x i64>* %a, i32 %i) nounwind {
+entry:
+	%arrayidx = getelementptr <2 x i64>* %a, i32 %i
+	%tmp2 = load <2 x i64>* %arrayidx
+	%tmp3 = insertelement <2 x i64> %tmp2, i64 615, i32 0
+	store <2 x i64> %tmp3, <2 x i64>* %arrayidx
+	ret void
+}
+
+define void @variable_v2i64_2(<2 x i64>* %a, i32 %i) nounwind {
+entry:
+	%arrayidx = getelementptr <2 x i64>* %a, i32 %i
+	%tmp2 = load <2 x i64>* %arrayidx
+	%tmp3 = insertelement <2 x i64> %tmp2, i64 615, i32 1
+	store <2 x i64> %tmp3, <2 x i64>* %arrayidx
+	ret void
+}
+
+define void @variable_v2f64_1(<2 x double>* %a, i32 %i) nounwind {
+entry:
+	%arrayidx = getelementptr <2 x double>* %a, i32 %i
+	%tmp2 = load <2 x double>* %arrayidx
+	%tmp3 = insertelement <2 x double> %tmp2, double 1.000000e+00, i32 1
+	store <2 x double> %tmp3, <2 x double>* %arrayidx
+	ret void
+}