Implement initial memory alignment awareness for SSE instructions. Vector loads
and stores that have a specified alignment of less than 16 bytes now use
instructions that support misaligned memory references.

llvm-svn: 40015
diff --git a/llvm/test/CodeGen/X86/sse-align-0.ll b/llvm/test/CodeGen/X86/sse-align-0.ll
new file mode 100644
index 0000000..0b28067
--- /dev/null
+++ b/llvm/test/CodeGen/X86/sse-align-0.ll
@@ -0,0 +1,14 @@
+; RUN: llvm-as < %s | llc -march=x86-64 | not grep mov
+
+define <4 x float> @foo(<4 x float>* %p, <4 x float> %x)
+{
+  %t = load <4 x float>* %p
+  %z = mul <4 x float> %t, %x
+  ret <4 x float> %z
+}
+define <2 x double> @bar(<2 x double>* %p, <2 x double> %x)
+{
+  %t = load <2 x double>* %p
+  %z = mul <2 x double> %t, %x
+  ret <2 x double> %z
+}