blob: 4b16ffd33d501f4ad6e1e7ec37a2547d11eae6eb [file] [log] [blame]
Ayman Musaa37d1132017-11-02 13:07:06 +00001; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc %s -O0 -mtriple=x86_64-unknown-unknown -mattr=+avx512f -o - | FileCheck %s
3
4declare fastcc <38 x double> @test()
5
6define void @pr34653() {
7; CHECK-LABEL: pr34653:
8; CHECK: # BB#0: # %entry
9; CHECK-NEXT: pushq %rbp
10; CHECK-NEXT: .cfi_def_cfa_offset 16
11; CHECK-NEXT: .cfi_offset %rbp, -16
12; CHECK-NEXT: movq %rsp, %rbp
13; CHECK-NEXT: .cfi_def_cfa_register %rbp
14; CHECK-NEXT: andq $-512, %rsp # imm = 0xFE00
15; CHECK-NEXT: subq $2048, %rsp # imm = 0x800
16; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
17; CHECK-NEXT: callq test
18; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %xmm0
19; CHECK-NEXT: vmovaps %xmm0, %xmm1
20; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
21; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %ymm2
22; CHECK-NEXT: vextractf128 $1, %ymm2, %xmm3
23; CHECK-NEXT: vmovaps %xmm3, %xmm4
24; CHECK-NEXT: vmovaps %xmm2, %xmm5
25; CHECK-NEXT: vmovaps %xmm5, %xmm6
26; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %zmm7
27; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %zmm8
28; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %zmm9
29; CHECK-NEXT: vmovupd {{[0-9]+}}(%rsp), %zmm10
30; CHECK-NEXT: vextractf32x4 $3, %zmm10, %xmm11
31; CHECK-NEXT: vmovaps %xmm11, %xmm12
32; CHECK-NEXT: vextractf32x4 $2, %zmm10, %xmm13
33; CHECK-NEXT: vmovaps %xmm13, %xmm14
34; CHECK-NEXT: vmovaps %xmm10, %xmm15
35; CHECK-NEXT: vmovaps %xmm15, %xmm2
36; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
37; CHECK-NEXT: vextractf32x4 $3, %zmm9, %xmm0
38; CHECK-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
39; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
40; CHECK-NEXT: vextractf32x4 $2, %zmm9, %xmm0
41; CHECK-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
42; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
43; CHECK-NEXT: vmovaps %xmm9, %xmm0
44; CHECK-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
45; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
46; CHECK-NEXT: vextractf32x4 $3, %zmm8, %xmm0
47; CHECK-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
48; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
49; CHECK-NEXT: vextractf32x4 $2, %zmm8, %xmm0
50; CHECK-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
51; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
52; CHECK-NEXT: vmovaps %xmm8, %xmm0
53; CHECK-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
54; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
55; CHECK-NEXT: vextractf32x4 $3, %zmm7, %xmm0
56; CHECK-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
57; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
58; CHECK-NEXT: vextractf32x4 $2, %zmm7, %xmm0
59; CHECK-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
60; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
61; CHECK-NEXT: vmovaps %xmm7, %xmm0
62; CHECK-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
63; CHECK-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
64; CHECK-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
65; CHECK-NEXT: vpermilpd {{.*#+}} xmm11 = xmm11[1,0]
66; CHECK-NEXT: vpermilpd {{.*#+}} xmm13 = xmm13[1,0]
67; CHECK-NEXT: # kill: %YMM10<def> %YMM10<kill> %ZMM10<kill>
68; CHECK-NEXT: vextractf128 $1, %ymm10, %xmm10
69; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
70; CHECK-NEXT: vmovaps %xmm10, %xmm0
71; CHECK-NEXT: vpermilpd {{.*#+}} xmm15 = xmm15[1,0]
72; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
73; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
74; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
75; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
76; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
77; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
78; CHECK-NEXT: # kill: %YMM9<def> %YMM9<kill> %ZMM9<kill>
79; CHECK-NEXT: vextractf128 $1, %ymm9, %xmm9
80; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
81; CHECK-NEXT: vmovaps %xmm9, %xmm0
82; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
83; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
84; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
85; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
86; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
87; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
88; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
89; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
90; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
91; CHECK-NEXT: # kill: %YMM8<def> %YMM8<kill> %ZMM8<kill>
92; CHECK-NEXT: vextractf128 $1, %ymm8, %xmm8
93; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
94; CHECK-NEXT: vmovaps %xmm8, %xmm0
95; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
96; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
97; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
98; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
99; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
100; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
101; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
102; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
103; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
104; CHECK-NEXT: # kill: %YMM7<def> %YMM7<kill> %ZMM7<kill>
105; CHECK-NEXT: vextractf128 $1, %ymm7, %xmm7
106; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
107; CHECK-NEXT: vmovaps %xmm7, %xmm0
108; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
109; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
110; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
111; CHECK-NEXT: vpermilpd {{.*#+}} xmm10 = xmm10[1,0]
112; CHECK-NEXT: vpermilpd {{.*#+}} xmm9 = xmm9[1,0]
113; CHECK-NEXT: vpermilpd {{.*#+}} xmm8 = xmm8[1,0]
114; CHECK-NEXT: vpermilpd {{.*#+}} xmm7 = xmm7[1,0]
115; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
116; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
117; CHECK-NEXT: # xmm0 = mem[0],zero
118; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
119; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
120; CHECK-NEXT: # xmm0 = mem[0],zero
121; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
122; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
123; CHECK-NEXT: # xmm0 = mem[0],zero
124; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
125; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
126; CHECK-NEXT: # xmm0 = mem[0],zero
127; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
128; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
129; CHECK-NEXT: # xmm0 = mem[0],zero
130; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
131; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
132; CHECK-NEXT: # xmm0 = mem[0],zero
133; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
134; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
135; CHECK-NEXT: # xmm0 = mem[0],zero
136; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
137; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
138; CHECK-NEXT: # xmm0 = mem[0],zero
139; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
140; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
141; CHECK-NEXT: # xmm0 = mem[0],zero
142; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
143; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
144; CHECK-NEXT: # xmm0 = mem[0],zero
145; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
146; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
147; CHECK-NEXT: # xmm0 = mem[0],zero
148; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
149; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
150; CHECK-NEXT: # xmm0 = mem[0],zero
151; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
152; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
153; CHECK-NEXT: # xmm0 = mem[0],zero
154; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
155; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
156; CHECK-NEXT: # xmm0 = mem[0],zero
157; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
158; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
159; CHECK-NEXT: # xmm0 = mem[0],zero
160; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
161; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
162; CHECK-NEXT: # xmm0 = mem[0],zero
163; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
164; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
165; CHECK-NEXT: # xmm0 = mem[0],zero
166; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
167; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
168; CHECK-NEXT: # xmm0 = mem[0],zero
169; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
170; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
171; CHECK-NEXT: # xmm0 = mem[0],zero
172; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
173; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
174; CHECK-NEXT: # xmm0 = mem[0],zero
175; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
176; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
177; CHECK-NEXT: # xmm0 = mem[0],zero
178; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
179; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
180; CHECK-NEXT: # xmm0 = mem[0],zero
181; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
182; CHECK-NEXT: vmovsd {{[0-9]+}}(%rsp), %xmm0 # 8-byte Reload
183; CHECK-NEXT: # xmm0 = mem[0],zero
184; CHECK-NEXT: vmovsd %xmm8, {{[0-9]+}}(%rsp) # 8-byte Spill
185; CHECK-NEXT: vmovsd %xmm13, {{[0-9]+}}(%rsp) # 8-byte Spill
186; CHECK-NEXT: vmovsd %xmm1, {{[0-9]+}}(%rsp) # 8-byte Spill
187; CHECK-NEXT: vmovsd %xmm14, {{[0-9]+}}(%rsp) # 8-byte Spill
188; CHECK-NEXT: vmovsd %xmm2, {{[0-9]+}}(%rsp) # 8-byte Spill
189; CHECK-NEXT: vmovsd %xmm4, {{[0-9]+}}(%rsp) # 8-byte Spill
190; CHECK-NEXT: vmovsd %xmm9, {{[0-9]+}}(%rsp) # 8-byte Spill
191; CHECK-NEXT: vmovsd %xmm10, {{[0-9]+}}(%rsp) # 8-byte Spill
192; CHECK-NEXT: vmovsd %xmm15, {{[0-9]+}}(%rsp) # 8-byte Spill
193; CHECK-NEXT: vmovsd %xmm11, {{[0-9]+}}(%rsp) # 8-byte Spill
194; CHECK-NEXT: vmovsd %xmm3, {{[0-9]+}}(%rsp) # 8-byte Spill
195; CHECK-NEXT: vmovsd %xmm6, {{[0-9]+}}(%rsp) # 8-byte Spill
196; CHECK-NEXT: vmovsd %xmm5, {{[0-9]+}}(%rsp) # 8-byte Spill
197; CHECK-NEXT: vmovsd %xmm12, {{[0-9]+}}(%rsp) # 8-byte Spill
198; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
199; CHECK-NEXT: vmovsd %xmm7, {{[0-9]+}}(%rsp) # 8-byte Spill
200; CHECK-NEXT: movq %rbp, %rsp
201; CHECK-NEXT: popq %rbp
202; CHECK-NEXT: vzeroupper
203; CHECK-NEXT: retq
204entry:
205 %v = call fastcc <38 x double> @test()
206 %v.0 = extractelement <38 x double> %v, i32 0
207 ret void
208}
209