Revert r122955. It seems using movups to lower memcpy can cause massive regression (even on Nehalem) in edge cases. I also didn't see any real performance benefit.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@123015 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/X86/memcpy.ll b/test/CodeGen/X86/memcpy.ll
index 4af93ad..72342cb 100644
--- a/test/CodeGen/X86/memcpy.ll
+++ b/test/CodeGen/X86/memcpy.ll
@@ -37,34 +37,26 @@
   tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %A, i8* %B, i64 64, i32 1, i1 false)
   ret void
 ; LINUX: test3:
-; LINUX-NOT: memcpy
-; LINUX: movups
-; LINUX: movups
-; LINUX: movups
-; LINUX: movups
-; LINUX: movups
-; LINUX: movups
-; LINUX: movups
-; LINUX: movups
+; LINUX: memcpy
 
 ; DARWIN: test3:
 ; DARWIN-NOT: memcpy
-; DARWIN: movups
-; DARWIN: movups
-; DARWIN: movups
-; DARWIN: movups
-; DARWIN: movups
-; DARWIN: movups
-; DARWIN: movups
-; DARWIN: movups
-; DARWIN: movups
-; DARWIN: movups
-; DARWIN: movups
-; DARWIN: movups
-; DARWIN: movups
-; DARWIN: movups
-; DARWIN: movups
-; DARWIN: movups
+; DARWIN: movq
+; DARWIN: movq
+; DARWIN: movq
+; DARWIN: movq
+; DARWIN: movq
+; DARWIN: movq
+; DARWIN: movq
+; DARWIN: movq
+; DARWIN: movq
+; DARWIN: movq
+; DARWIN: movq
+; DARWIN: movq
+; DARWIN: movq
+; DARWIN: movq
+; DARWIN: movq
+; DARWIN: movq
 }
 
 ; Large constant memcpy's should be inlined when not optimizing for size.