It seems better to scalarize vectors of size 1 instead of widening them.
Add support to widen SETCC.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@94342 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/X86/vsplit-and.ll b/test/CodeGen/X86/vsplit-and.ll
new file mode 100644
index 0000000..a247c6e
--- /dev/null
+++ b/test/CodeGen/X86/vsplit-and.ll
@@ -0,0 +1,22 @@
+; RUN: llc < %s -march=x86 -disable-mmx | FileCheck %s
+
+
+define void @t(<2 x i64>* %dst, <2 x i64> %src1, <2 x i64> %src2) nounwind readonly {
+; CHECK: andb
+ %cmp1 = icmp ne <2 x i64> %src1, zeroinitializer
+ %cmp2 = icmp ne <2 x i64> %src2, zeroinitializer
+ %t1 = and <2 x i1> %cmp1, %cmp2
+ %t2 = sext <2 x i1> %t1 to <2 x i64>
+ store <2 x i64> %t2, <2 x i64>* %dst
+ ret void
+}
+
+define void @t2(<3 x i64>* %dst, <3 x i64> %src1, <3 x i64> %src2) nounwind readonly {
+; CHECK: andb
+ %cmp1 = icmp ne <3 x i64> %src1, zeroinitializer
+ %cmp2 = icmp ne <3 x i64> %src2, zeroinitializer
+ %t1 = and <3 x i1> %cmp1, %cmp2
+ %t2 = sext <3 x i1> %t1 to <3 x i64>
+ store <3 x i64> %t2, <3 x i64>* %dst
+ ret void
+}