blob: 6fbec91e77a37c19658febc6b985d0d420802b1e [file] [log] [blame]
David L Kreitzer01a057a2016-10-14 18:20:41 +00001; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=x86_64-pc-linux -mattr=+avx < %s | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
3; RUN: llc -mtriple=x86_64-pc-linux -mattr=+avx2 < %s | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
4
5define <4 x double> @load_factorf64_4(<16 x double>* %ptr) {
6; AVX-LABEL: load_factorf64_4:
7; AVX: # BB#0:
8; AVX-NEXT: vmovupd (%rdi), %ymm0
9; AVX-NEXT: vmovupd 32(%rdi), %ymm1
10; AVX-NEXT: vmovupd 64(%rdi), %ymm2
11; AVX-NEXT: vmovupd 96(%rdi), %ymm3
12; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4
13; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5
14; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
15; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
16; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
17; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
18; AVX-NEXT: vhaddpd %ymm5, %ymm4, %ymm1
19; AVX-NEXT: vaddpd %ymm2, %ymm1, %ymm1
20; AVX-NEXT: vaddpd %ymm0, %ymm1, %ymm0
21; AVX-NEXT: retq
22 %wide.vec = load <16 x double>, <16 x double>* %ptr, align 16
23 %strided.v0 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
24 %strided.v1 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
25 %strided.v2 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
26 %strided.v3 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
27 %add1 = fadd <4 x double> %strided.v0, %strided.v1
28 %add2 = fadd <4 x double> %add1, %strided.v2
29 %add3 = fadd <4 x double> %add2, %strided.v3
30 ret <4 x double> %add3
31}
32
33define <4 x double> @load_factorf64_2(<16 x double>* %ptr) {
34; AVX-LABEL: load_factorf64_2:
35; AVX: # BB#0:
36; AVX-NEXT: vmovupd (%rdi), %ymm0
37; AVX-NEXT: vmovupd 32(%rdi), %ymm1
38; AVX-NEXT: vmovupd 64(%rdi), %ymm2
39; AVX-NEXT: vmovupd 96(%rdi), %ymm3
40; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4
41; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5
42; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
43; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
44; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
45; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
46; AVX-NEXT: vmulpd %ymm0, %ymm2, %ymm0
47; AVX-NEXT: retq
48 %wide.vec = load <16 x double>, <16 x double>* %ptr, align 16
49 %strided.v0 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
50 %strided.v3 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
51 %mul = fmul <4 x double> %strided.v0, %strided.v3
52 ret <4 x double> %mul
53}
54
55define <4 x double> @load_factorf64_1(<16 x double>* %ptr) {
Craig Topper464b8cb2017-02-11 05:32:57 +000056; AVX1-LABEL: load_factorf64_1:
57; AVX1: # BB#0:
58; AVX1-NEXT: vmovups (%rdi), %ymm0
59; AVX1-NEXT: vmovups 32(%rdi), %ymm1
60; AVX1-NEXT: vmovups 64(%rdi), %ymm2
61; AVX1-NEXT: vmovups 96(%rdi), %ymm3
62; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
63; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
64; AVX1-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
65; AVX1-NEXT: vmulpd %ymm0, %ymm0, %ymm0
66; AVX1-NEXT: retq
67;
68; AVX2-LABEL: load_factorf64_1:
69; AVX2: # BB#0:
70; AVX2-NEXT: vmovupd (%rdi), %ymm0
71; AVX2-NEXT: vmovupd 32(%rdi), %ymm1
72; AVX2-NEXT: vmovupd 64(%rdi), %ymm2
73; AVX2-NEXT: vmovupd 96(%rdi), %ymm3
74; AVX2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
75; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
76; AVX2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
77; AVX2-NEXT: vmulpd %ymm0, %ymm0, %ymm0
78; AVX2-NEXT: retq
David L Kreitzer01a057a2016-10-14 18:20:41 +000079 %wide.vec = load <16 x double>, <16 x double>* %ptr, align 16
80 %strided.v0 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
81 %strided.v3 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
82 %mul = fmul <4 x double> %strided.v0, %strided.v3
83 ret <4 x double> %mul
84}
85
86define <4 x i64> @load_factori64_4(<16 x i64>* %ptr) {
87; AVX1-LABEL: load_factori64_4:
88; AVX1: # BB#0:
89; AVX1-NEXT: vmovupd (%rdi), %ymm0
90; AVX1-NEXT: vmovupd 32(%rdi), %ymm1
91; AVX1-NEXT: vmovupd 64(%rdi), %ymm2
92; AVX1-NEXT: vmovupd 96(%rdi), %ymm3
93; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4
94; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5
95; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
96; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
97; AVX1-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
98; AVX1-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
99; AVX1-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
100; AVX1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
101; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm1
102; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
103; AVX1-NEXT: vpaddq %xmm3, %xmm4, %xmm4
104; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
105; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
106; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
107; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
108; AVX1-NEXT: vpaddq %xmm1, %xmm5, %xmm1
109; AVX1-NEXT: vpaddq %xmm0, %xmm4, %xmm0
110; AVX1-NEXT: vpaddq %xmm0, %xmm2, %xmm0
111; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
112; AVX1-NEXT: retq
113;
114; AVX2-LABEL: load_factori64_4:
115; AVX2: # BB#0:
116; AVX2-NEXT: vmovdqu (%rdi), %ymm0
117; AVX2-NEXT: vmovdqu 32(%rdi), %ymm1
118; AVX2-NEXT: vmovdqu 64(%rdi), %ymm2
119; AVX2-NEXT: vmovdqu 96(%rdi), %ymm3
120; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm4
121; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm5
122; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
123; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
124; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
125; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
126; AVX2-NEXT: vpunpckhqdq {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
127; AVX2-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
128; AVX2-NEXT: vpaddq %ymm3, %ymm4, %ymm1
129; AVX2-NEXT: vpaddq %ymm0, %ymm1, %ymm0
130; AVX2-NEXT: vpaddq %ymm0, %ymm2, %ymm0
131; AVX2-NEXT: retq
132 %wide.vec = load <16 x i64>, <16 x i64>* %ptr, align 16
133 %strided.v0 = shufflevector <16 x i64> %wide.vec, <16 x i64> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
134 %strided.v1 = shufflevector <16 x i64> %wide.vec, <16 x i64> undef, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
135 %strided.v2 = shufflevector <16 x i64> %wide.vec, <16 x i64> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
136 %strided.v3 = shufflevector <16 x i64> %wide.vec, <16 x i64> undef, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
137 %add1 = add <4 x i64> %strided.v0, %strided.v1
138 %add2 = add <4 x i64> %add1, %strided.v2
139 %add3 = add <4 x i64> %add2, %strided.v3
140 ret <4 x i64> %add3
141}