- Handle special scalar_to_vector case: splats. Using a native 128-bit
shuffle before inserting on a 256-bit vector.
- Add AVX versions of movd/movq instructions
- Introduce a few COPY patterns to match insert_subvector instructions.
This turns a trivial insert_subvector instruction into a register copy,
coalescing the xmm into a ymm and avoid emiting on more instruction.
llvm-svn: 136002
diff --git a/llvm/test/CodeGen/X86/avx-256.ll b/llvm/test/CodeGen/X86/avx-256.ll
index 20d31e7..a6d1450 100644
--- a/llvm/test/CodeGen/X86/avx-256.ll
+++ b/llvm/test/CodeGen/X86/avx-256.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -march=x86 -mcpu=corei7 -mattr=avx | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
@x = common global <8 x float> zeroinitializer, align 32
@y = common global <4 x double> zeroinitializer, align 32
@@ -12,4 +12,3 @@
store <4 x double> zeroinitializer, <4 x double>* @y, align 32
ret void
}
-