blob: db146c7c985df6e0f2e4ac9f8af75c1269e06f7d [file] [log] [blame]
Ulrich Weigand9e3577f2013-05-06 16:17:29 +00001; Like frame-02.ll, but with doubles rather than floats. Internally this
2; uses a different register class, but the set of saved and restored
3; registers should be the same.
4;
5; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
6
7; This function should require all FPRs, but no other spill slots.
8; We need to save and restore 8 of the 16 FPRs, so the frame size
9; should be exactly 160 + 8 * 8 = 224. The CFA offset is 160
10; (the caller-allocated part of the frame) + 224.
11define void @f1(double *%ptr) {
Stephen Lind24ab202013-07-14 06:24:09 +000012; CHECK-LABEL: f1:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000013; CHECK: aghi %r15, -224
14; CHECK: .cfi_def_cfa_offset 384
15; CHECK: std %f8, 216(%r15)
16; CHECK: std %f9, 208(%r15)
17; CHECK: std %f10, 200(%r15)
18; CHECK: std %f11, 192(%r15)
19; CHECK: std %f12, 184(%r15)
20; CHECK: std %f13, 176(%r15)
21; CHECK: std %f14, 168(%r15)
22; CHECK: std %f15, 160(%r15)
23; CHECK: .cfi_offset %f8, -168
24; CHECK: .cfi_offset %f9, -176
25; CHECK: .cfi_offset %f10, -184
26; CHECK: .cfi_offset %f11, -192
27; CHECK: .cfi_offset %f12, -200
28; CHECK: .cfi_offset %f13, -208
29; CHECK: .cfi_offset %f14, -216
30; CHECK: .cfi_offset %f15, -224
31; ...main function body...
32; CHECK: ld %f8, 216(%r15)
33; CHECK: ld %f9, 208(%r15)
34; CHECK: ld %f10, 200(%r15)
35; CHECK: ld %f11, 192(%r15)
36; CHECK: ld %f12, 184(%r15)
37; CHECK: ld %f13, 176(%r15)
38; CHECK: ld %f14, 168(%r15)
39; CHECK: ld %f15, 160(%r15)
40; CHECK: aghi %r15, 224
41; CHECK: br %r14
42 %l0 = load volatile double *%ptr
43 %l1 = load volatile double *%ptr
44 %l2 = load volatile double *%ptr
45 %l3 = load volatile double *%ptr
46 %l4 = load volatile double *%ptr
47 %l5 = load volatile double *%ptr
48 %l6 = load volatile double *%ptr
49 %l7 = load volatile double *%ptr
50 %l8 = load volatile double *%ptr
51 %l9 = load volatile double *%ptr
52 %l10 = load volatile double *%ptr
53 %l11 = load volatile double *%ptr
54 %l12 = load volatile double *%ptr
55 %l13 = load volatile double *%ptr
56 %l14 = load volatile double *%ptr
57 %l15 = load volatile double *%ptr
58 %add0 = fadd double %l0, %l0
59 %add1 = fadd double %l1, %add0
60 %add2 = fadd double %l2, %add1
61 %add3 = fadd double %l3, %add2
62 %add4 = fadd double %l4, %add3
63 %add5 = fadd double %l5, %add4
64 %add6 = fadd double %l6, %add5
65 %add7 = fadd double %l7, %add6
66 %add8 = fadd double %l8, %add7
67 %add9 = fadd double %l9, %add8
68 %add10 = fadd double %l10, %add9
69 %add11 = fadd double %l11, %add10
70 %add12 = fadd double %l12, %add11
71 %add13 = fadd double %l13, %add12
72 %add14 = fadd double %l14, %add13
73 %add15 = fadd double %l15, %add14
74 store volatile double %add0, double *%ptr
75 store volatile double %add1, double *%ptr
76 store volatile double %add2, double *%ptr
77 store volatile double %add3, double *%ptr
78 store volatile double %add4, double *%ptr
79 store volatile double %add5, double *%ptr
80 store volatile double %add6, double *%ptr
81 store volatile double %add7, double *%ptr
82 store volatile double %add8, double *%ptr
83 store volatile double %add9, double *%ptr
84 store volatile double %add10, double *%ptr
85 store volatile double %add11, double *%ptr
86 store volatile double %add12, double *%ptr
87 store volatile double %add13, double *%ptr
88 store volatile double %add14, double *%ptr
89 store volatile double %add15, double *%ptr
90 ret void
91}
92
93; Like f1, but requires one fewer FPR. We allocate in numerical order,
94; so %f15 is the one that gets dropped.
95define void @f2(double *%ptr) {
Stephen Lind24ab202013-07-14 06:24:09 +000096; CHECK-LABEL: f2:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000097; CHECK: aghi %r15, -216
98; CHECK: .cfi_def_cfa_offset 376
99; CHECK: std %f8, 208(%r15)
100; CHECK: std %f9, 200(%r15)
101; CHECK: std %f10, 192(%r15)
102; CHECK: std %f11, 184(%r15)
103; CHECK: std %f12, 176(%r15)
104; CHECK: std %f13, 168(%r15)
105; CHECK: std %f14, 160(%r15)
106; CHECK: .cfi_offset %f8, -168
107; CHECK: .cfi_offset %f9, -176
108; CHECK: .cfi_offset %f10, -184
109; CHECK: .cfi_offset %f11, -192
110; CHECK: .cfi_offset %f12, -200
111; CHECK: .cfi_offset %f13, -208
112; CHECK: .cfi_offset %f14, -216
113; CHECK-NOT: %f15
114; ...main function body...
115; CHECK: ld %f8, 208(%r15)
116; CHECK: ld %f9, 200(%r15)
117; CHECK: ld %f10, 192(%r15)
118; CHECK: ld %f11, 184(%r15)
119; CHECK: ld %f12, 176(%r15)
120; CHECK: ld %f13, 168(%r15)
121; CHECK: ld %f14, 160(%r15)
122; CHECK: aghi %r15, 216
123; CHECK: br %r14
124 %l0 = load volatile double *%ptr
125 %l1 = load volatile double *%ptr
126 %l2 = load volatile double *%ptr
127 %l3 = load volatile double *%ptr
128 %l4 = load volatile double *%ptr
129 %l5 = load volatile double *%ptr
130 %l6 = load volatile double *%ptr
131 %l7 = load volatile double *%ptr
132 %l8 = load volatile double *%ptr
133 %l9 = load volatile double *%ptr
134 %l10 = load volatile double *%ptr
135 %l11 = load volatile double *%ptr
136 %l12 = load volatile double *%ptr
137 %l13 = load volatile double *%ptr
138 %l14 = load volatile double *%ptr
139 %add0 = fadd double %l0, %l0
140 %add1 = fadd double %l1, %add0
141 %add2 = fadd double %l2, %add1
142 %add3 = fadd double %l3, %add2
143 %add4 = fadd double %l4, %add3
144 %add5 = fadd double %l5, %add4
145 %add6 = fadd double %l6, %add5
146 %add7 = fadd double %l7, %add6
147 %add8 = fadd double %l8, %add7
148 %add9 = fadd double %l9, %add8
149 %add10 = fadd double %l10, %add9
150 %add11 = fadd double %l11, %add10
151 %add12 = fadd double %l12, %add11
152 %add13 = fadd double %l13, %add12
153 %add14 = fadd double %l14, %add13
154 store volatile double %add0, double *%ptr
155 store volatile double %add1, double *%ptr
156 store volatile double %add2, double *%ptr
157 store volatile double %add3, double *%ptr
158 store volatile double %add4, double *%ptr
159 store volatile double %add5, double *%ptr
160 store volatile double %add6, double *%ptr
161 store volatile double %add7, double *%ptr
162 store volatile double %add8, double *%ptr
163 store volatile double %add9, double *%ptr
164 store volatile double %add10, double *%ptr
165 store volatile double %add11, double *%ptr
166 store volatile double %add12, double *%ptr
167 store volatile double %add13, double *%ptr
168 store volatile double %add14, double *%ptr
169 ret void
170}
171
172; Like f1, but should require only one call-saved FPR.
173define void @f3(double *%ptr) {
Stephen Lind24ab202013-07-14 06:24:09 +0000174; CHECK-LABEL: f3:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000175; CHECK: aghi %r15, -168
176; CHECK: .cfi_def_cfa_offset 328
177; CHECK: std %f8, 160(%r15)
178; CHECK: .cfi_offset %f8, -168
179; CHECK-NOT: %f9
180; CHECK-NOT: %f10
181; CHECK-NOT: %f11
182; CHECK-NOT: %f12
183; CHECK-NOT: %f13
184; CHECK-NOT: %f14
185; CHECK-NOT: %f15
186; ...main function body...
187; CHECK: ld %f8, 160(%r15)
188; CHECK: aghi %r15, 168
189; CHECK: br %r14
190 %l0 = load volatile double *%ptr
191 %l1 = load volatile double *%ptr
192 %l2 = load volatile double *%ptr
193 %l3 = load volatile double *%ptr
194 %l4 = load volatile double *%ptr
195 %l5 = load volatile double *%ptr
196 %l6 = load volatile double *%ptr
197 %l7 = load volatile double *%ptr
198 %l8 = load volatile double *%ptr
199 %add0 = fadd double %l0, %l0
200 %add1 = fadd double %l1, %add0
201 %add2 = fadd double %l2, %add1
202 %add3 = fadd double %l3, %add2
203 %add4 = fadd double %l4, %add3
204 %add5 = fadd double %l5, %add4
205 %add6 = fadd double %l6, %add5
206 %add7 = fadd double %l7, %add6
207 %add8 = fadd double %l8, %add7
208 store volatile double %add0, double *%ptr
209 store volatile double %add1, double *%ptr
210 store volatile double %add2, double *%ptr
211 store volatile double %add3, double *%ptr
212 store volatile double %add4, double *%ptr
213 store volatile double %add5, double *%ptr
214 store volatile double %add6, double *%ptr
215 store volatile double %add7, double *%ptr
216 store volatile double %add8, double *%ptr
217 ret void
218}
219
220; This function should use all call-clobbered FPRs but no call-saved ones.
221; It shouldn't need to create a frame.
222define void @f4(double *%ptr) {
Stephen Lind24ab202013-07-14 06:24:09 +0000223; CHECK-LABEL: f4:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000224; CHECK-NOT: %r15
225; CHECK-NOT: %f8
226; CHECK-NOT: %f9
227; CHECK-NOT: %f10
228; CHECK-NOT: %f11
229; CHECK-NOT: %f12
230; CHECK-NOT: %f13
231; CHECK-NOT: %f14
232; CHECK-NOT: %f15
233; CHECK: br %r14
234 %l0 = load volatile double *%ptr
235 %l1 = load volatile double *%ptr
236 %l2 = load volatile double *%ptr
237 %l3 = load volatile double *%ptr
238 %l4 = load volatile double *%ptr
239 %l5 = load volatile double *%ptr
240 %l6 = load volatile double *%ptr
241 %l7 = load volatile double *%ptr
242 %add0 = fadd double %l0, %l0
243 %add1 = fadd double %l1, %add0
244 %add2 = fadd double %l2, %add1
245 %add3 = fadd double %l3, %add2
246 %add4 = fadd double %l4, %add3
247 %add5 = fadd double %l5, %add4
248 %add6 = fadd double %l6, %add5
249 %add7 = fadd double %l7, %add6
250 store volatile double %add0, double *%ptr
251 store volatile double %add1, double *%ptr
252 store volatile double %add2, double *%ptr
253 store volatile double %add3, double *%ptr
254 store volatile double %add4, double *%ptr
255 store volatile double %add5, double *%ptr
256 store volatile double %add6, double *%ptr
257 store volatile double %add7, double *%ptr
258 ret void
259}