add vector tests and a simpler version of the negative tests

llvm-svn: 276328
diff --git a/llvm/test/Transforms/InstSimplify/select.ll b/llvm/test/Transforms/InstSimplify/select.ll
index 4500662..a1859c8 100644
--- a/llvm/test/Transforms/InstSimplify/select.ll
+++ b/llvm/test/Transforms/InstSimplify/select.ll
@@ -346,10 +346,55 @@
   ret i32 %sel
 }
 
-define i64 @select_icmp_x_and_8_eq_0_y_and_not_8(i32 %x, i64 %y) {
+; Make sure that at least a few of the same patterns are repeated with vector types.
+
+define <2 x i32> @select_icmp_and_8_ne_0_and_not_8_vec(<2 x i32> %x) {
+; CHECK-LABEL: @select_icmp_and_8_ne_0_and_not_8_vec(
+; CHECK-NEXT:    ret <2 x i32> %x
+;
+  %and = and <2 x i32> %x, <i32 8, i32 8>
+  %cmp = icmp ne <2 x i32> %and, zeroinitializer
+  %and1 = and <2 x i32> %x, <i32 -9, i32 -9>
+  %sel = select <2 x i1> %cmp, <2 x i32> %x, <2 x i32> %and1
+  ret <2 x i32> %sel
+}
+
+define <2 x i32> @select_icmp_trunc_8_ne_0_and_not_8_alt_vec(<2 x i32> %x) {
+; CHECK-LABEL: @select_icmp_trunc_8_ne_0_and_not_8_alt_vec(
+; CHECK-NEXT:    [[TRUNC:%.*]] = trunc <2 x i32> %x to <2 x i4>
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt <2 x i4> [[TRUNC]], <i4 -1, i4 -1>
+; CHECK-NEXT:    [[AND:%.*]] = and <2 x i32> %x, <i32 -9, i32 -9>
+; CHECK-NEXT:    [[SEL:%.*]] = select <2 x i1> [[CMP]], <2 x i32> [[AND]], <2 x i32> %x
+; CHECK-NEXT:    ret <2 x i32> [[SEL]]
+;
+  %trunc = trunc <2 x i32> %x to <2 x i4>
+  %cmp = icmp sgt <2 x i4> %trunc, <i4 -1, i4 -1>
+  %and = and <2 x i32> %x, <i32 -9, i32 -9>
+  %sel = select <2 x i1> %cmp, <2 x i32> %and, <2 x i32> %x
+  ret <2 x i32> %sel
+}
+
+; Insert a bit from x into y? This should be possible in InstCombine, but not InstSimplify?
+
+define i32 @select_icmp_x_and_8_eq_0_y_and_not_8(i32 %x, i32 %y) {
 ; CHECK-LABEL: @select_icmp_x_and_8_eq_0_y_and_not_8(
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 %x, 8
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT:    [[AND1:%.*]] = and i32 %y, -9
+; CHECK-NEXT:    [[Y_AND1:%.*]] = select i1 [[CMP]], i32 %y, i32 [[AND1]]
+; CHECK-NEXT:    ret i32 [[Y_AND1]]
+;
+  %and = and i32 %x, 8
+  %cmp = icmp eq i32 %and, 0
+  %and1 = and i32 %y, -9
+  %y.and1 = select i1 %cmp, i32 %y, i32 %and1
+  ret i32 %y.and1
+}
+
+define i64 @select_icmp_x_and_8_eq_0_y64_and_not_8(i32 %x, i64 %y) {
+; CHECK-LABEL: @select_icmp_x_and_8_eq_0_y64_and_not_8(
+; CHECK-NEXT:    [[AND:%.*]] = and i32 %x, 8
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 0
 ; CHECK-NEXT:    [[AND1:%.*]] = and i64 %y, -9
 ; CHECK-NEXT:    [[Y_AND1:%.*]] = select i1 [[CMP]], i64 %y, i64 [[AND1]]
 ; CHECK-NEXT:    ret i64 [[Y_AND1]]
@@ -361,8 +406,8 @@
   ret i64 %y.and1
 }
 
-define i64 @select_icmp_x_and_8_ne_0_y_and_not_8(i32 %x, i64 %y) {
-; CHECK-LABEL: @select_icmp_x_and_8_ne_0_y_and_not_8(
+define i64 @select_icmp_x_and_8_ne_0_y64_and_not_8(i32 %x, i64 %y) {
+; CHECK-LABEL: @select_icmp_x_and_8_ne_0_y64_and_not_8(
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 %x, 8
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 0
 ; CHECK-NEXT:    [[AND1:%.*]] = and i64 %y, -9