On x86, if the only use of a i64 load is a i64 store, generate a pair of double load and store instead.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@66776 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/X86/i64-mem-copy.ll b/test/CodeGen/X86/i64-mem-copy.ll
new file mode 100644
index 0000000..ce54011
--- /dev/null
+++ b/test/CodeGen/X86/i64-mem-copy.ll
@@ -0,0 +1,13 @@
+; RUN: llvm-as < %s | llc -march=x86-64 | grep {movq.*(%rsi), %rax}
+; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 | grep {movsd.*(%eax),}
+
+; Uses movsd to load / store i64 values in sse2 is available.
+
+; rdar://6659858
+
+define void @foo(i64* %x, i64* %y) nounwind {
+entry:
+ %tmp1 = load i64* %y, align 8 ; <i64> [#uses=1]
+ store i64 %tmp1, i64* %x, align 8
+ ret void
+}