Expand V_SET0 to xorps by default.

The xorps instruction is smaller than pxor, so prefer that encoding.

The ExecutionDepsFix pass will switch the encoding to pxor and xorpd
when appropriate.

llvm-svn: 143996
diff --git a/llvm/test/CodeGen/X86/avx-basic.ll b/llvm/test/CodeGen/X86/avx-basic.ll
index 0a46b082..edbdc06 100644
--- a/llvm/test/CodeGen/X86/avx-basic.ll
+++ b/llvm/test/CodeGen/X86/avx-basic.ll
@@ -6,7 +6,7 @@
 
 define void @zero128() nounwind ssp {
 entry:
-  ; CHECK: vpxor
+  ; CHECK: vxorps
   ; CHECK: vmovaps
   store <4 x float> zeroinitializer, <4 x float>* @z, align 16
   ret void
diff --git a/llvm/test/CodeGen/X86/sse2-blend.ll b/llvm/test/CodeGen/X86/sse2-blend.ll
index 0007cab..4ff1d03 100644
--- a/llvm/test/CodeGen/X86/sse2-blend.ll
+++ b/llvm/test/CodeGen/X86/sse2-blend.ll
@@ -26,8 +26,10 @@
   ret void
 }
 
+; FIXME: The -mattr=+sse2,-sse41 disable the ExecutionDepsFix pass causing the
+; mixed domains here.
 ; CHECK: vsel_i64
-; CHECK: pxor
+; CHECK: xorps
 ; CHECK: pand
 ; CHECK: andnps
 ; CHECK: orps
@@ -41,8 +43,10 @@
   ret void
 }
 
+; FIXME: The -mattr=+sse2,-sse41 disable the ExecutionDepsFix pass causing the
+; mixed domains here.
 ; CHECK: vsel_double
-; CHECK: pxor
+; CHECK: xorps
 ; CHECK: pand
 ; CHECK: andnps
 ; CHECK: orps
diff --git a/llvm/test/CodeGen/X86/sse2.ll b/llvm/test/CodeGen/X86/sse2.ll
index 70e0a8a..d520d5c 100644
--- a/llvm/test/CodeGen/X86/sse2.ll
+++ b/llvm/test/CodeGen/X86/sse2.ll
@@ -98,7 +98,7 @@
         ret void
         
 ; CHECK: test7:
-; CHECK:	pxor	%xmm0, %xmm0
+; CHECK:	xorps	%xmm0, %xmm0
 ; CHECK:	movaps	%xmm0, 0
 }
 
diff --git a/llvm/test/CodeGen/X86/vec_return.ll b/llvm/test/CodeGen/X86/vec_return.ll
index 676be9b..d5fc11e 100644
--- a/llvm/test/CodeGen/X86/vec_return.ll
+++ b/llvm/test/CodeGen/X86/vec_return.ll
@@ -1,12 +1,17 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 > %t
-; RUN: grep pxor %t | count 1
-; RUN: grep movaps %t | count 1
-; RUN: not grep shuf %t
+; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s
 
+; Without any typed operations, always use the smaller xorps.
+; CHECK: test
+; CHECK: xorps
 define <2 x double> @test() {
 	ret <2 x double> zeroinitializer
 }
 
+; Prefer a constant pool load here.
+; CHECK: test2
+; CHECK-NOT: shuf
+; CHECK: movaps LCP
+; CHECK-NEXT: ret
 define <4 x i32> @test2() nounwind  {
 	ret <4 x i32> < i32 0, i32 0, i32 1, i32 0 >
 }
diff --git a/llvm/test/CodeGen/X86/vec_zero.ll b/llvm/test/CodeGen/X86/vec_zero.ll
index 4d1f056..682a0df 100644
--- a/llvm/test/CodeGen/X86/vec_zero.ll
+++ b/llvm/test/CodeGen/X86/vec_zero.ll
@@ -1,5 +1,6 @@
 ; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s
 
+; CHECK: foo
 ; CHECK: xorps
 define void @foo(<4 x float>* %P) {
         %T = load <4 x float>* %P               ; <<4 x float>> [#uses=1]
@@ -8,6 +9,7 @@
         ret void
 }
 
+; CHECK: bar
 ; CHECK: pxor
 define void @bar(<4 x i32>* %P) {
         %T = load <4 x i32>* %P         ; <<4 x i32>> [#uses=1]
@@ -16,3 +18,13 @@
         ret void
 }
 
+; Without any type hints from operations, we fall back to the smaller xorps.
+; The IR type <4 x i32> is ignored.
+; CHECK: untyped_zero
+; CHECK: xorps
+; CHECK: movaps
+define void @untyped_zero(<4 x i32>* %p) {
+entry:
+  store <4 x i32> zeroinitializer, <4 x i32>* %p, align 16
+  ret void
+}
diff --git a/llvm/test/CodeGen/X86/vec_zero_cse.ll b/llvm/test/CodeGen/X86/vec_zero_cse.ll
index 8aa5094..41ea024 100644
--- a/llvm/test/CodeGen/X86/vec_zero_cse.ll
+++ b/llvm/test/CodeGen/X86/vec_zero_cse.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -relocation-model=static -march=x86 -mcpu=yonah | grep pxor | count 1
+; RUN: llc < %s -relocation-model=static -march=x86 -mcpu=yonah | grep xorps | count 1
 ; RUN: llc < %s -relocation-model=static -march=x86 -mcpu=yonah | grep pcmpeqd | count 1
 ; 64-bit stores here do not use MMX.
 
diff --git a/llvm/test/CodeGen/X86/xor.ll b/llvm/test/CodeGen/X86/xor.ll
index 178c59d..ddc4cab 100644
--- a/llvm/test/CodeGen/X86/xor.ll
+++ b/llvm/test/CodeGen/X86/xor.ll
@@ -8,7 +8,7 @@
 	ret <4 x i32> %tmp
         
 ; X32: test1:
-; X32:	pxor	%xmm0, %xmm0
+; X32:	xorps	%xmm0, %xmm0
 ; X32:	ret
 }