blob: 93c59a3bc15fc541acb0b2d24779ad76933ab71e [file] [log] [blame]
Ulrich Weigand9e3577f2013-05-06 16:17:29 +00001; Like frame-02.ll, but with long doubles rather than floats. Some of the
2; cases are slightly different because we need to allocate pairs of FPRs.
3;
4; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
5
6; This function should require all FPRs, but no other spill slots.
7; We need to save and restore 8 of the 16 FPRs, so the frame size
8; should be exactly 160 + 8 * 8 = 224. The CFA offset is 160
9; (the caller-allocated part of the frame) + 224.
10define void @f1(fp128 *%ptr) {
Stephen Lind24ab202013-07-14 06:24:09 +000011; CHECK-LABEL: f1:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000012; CHECK: aghi %r15, -224
13; CHECK: .cfi_def_cfa_offset 384
14; CHECK: std %f8, 216(%r15)
15; CHECK: std %f9, 208(%r15)
16; CHECK: std %f10, 200(%r15)
17; CHECK: std %f11, 192(%r15)
18; CHECK: std %f12, 184(%r15)
19; CHECK: std %f13, 176(%r15)
20; CHECK: std %f14, 168(%r15)
21; CHECK: std %f15, 160(%r15)
22; CHECK: .cfi_offset %f8, -168
23; CHECK: .cfi_offset %f9, -176
24; CHECK: .cfi_offset %f10, -184
25; CHECK: .cfi_offset %f11, -192
26; CHECK: .cfi_offset %f12, -200
27; CHECK: .cfi_offset %f13, -208
28; CHECK: .cfi_offset %f14, -216
29; CHECK: .cfi_offset %f15, -224
30; ...main function body...
31; CHECK: ld %f8, 216(%r15)
32; CHECK: ld %f9, 208(%r15)
33; CHECK: ld %f10, 200(%r15)
34; CHECK: ld %f11, 192(%r15)
35; CHECK: ld %f12, 184(%r15)
36; CHECK: ld %f13, 176(%r15)
37; CHECK: ld %f14, 168(%r15)
38; CHECK: ld %f15, 160(%r15)
39; CHECK: aghi %r15, 224
40; CHECK: br %r14
41 %l0 = load volatile fp128 *%ptr
42 %l1 = load volatile fp128 *%ptr
43 %l4 = load volatile fp128 *%ptr
44 %l5 = load volatile fp128 *%ptr
45 %l8 = load volatile fp128 *%ptr
46 %l9 = load volatile fp128 *%ptr
47 %l12 = load volatile fp128 *%ptr
48 %l13 = load volatile fp128 *%ptr
49 %add0 = fadd fp128 %l0, %l0
50 %add1 = fadd fp128 %l1, %add0
51 %add4 = fadd fp128 %l4, %add1
52 %add5 = fadd fp128 %l5, %add4
53 %add8 = fadd fp128 %l8, %add5
54 %add9 = fadd fp128 %l9, %add8
55 %add12 = fadd fp128 %l12, %add9
56 %add13 = fadd fp128 %l13, %add12
57 store volatile fp128 %add0, fp128 *%ptr
58 store volatile fp128 %add1, fp128 *%ptr
59 store volatile fp128 %add4, fp128 *%ptr
60 store volatile fp128 %add5, fp128 *%ptr
61 store volatile fp128 %add8, fp128 *%ptr
62 store volatile fp128 %add9, fp128 *%ptr
63 store volatile fp128 %add12, fp128 *%ptr
64 store volatile fp128 %add13, fp128 *%ptr
65 ret void
66}
67
68; Like f1, but requires one fewer FPR pair. We allocate in numerical order,
69; so %f13+%f15 is the pair that gets dropped.
70define void @f2(fp128 *%ptr) {
Stephen Lind24ab202013-07-14 06:24:09 +000071; CHECK-LABEL: f2:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000072; CHECK: aghi %r15, -208
73; CHECK: .cfi_def_cfa_offset 368
74; CHECK: std %f8, 200(%r15)
75; CHECK: std %f9, 192(%r15)
76; CHECK: std %f10, 184(%r15)
77; CHECK: std %f11, 176(%r15)
78; CHECK: std %f12, 168(%r15)
79; CHECK: std %f14, 160(%r15)
80; CHECK: .cfi_offset %f8, -168
81; CHECK: .cfi_offset %f9, -176
82; CHECK: .cfi_offset %f10, -184
83; CHECK: .cfi_offset %f11, -192
84; CHECK: .cfi_offset %f12, -200
85; CHECK: .cfi_offset %f14, -208
86; CHECK-NOT: %f13
87; CHECK-NOT: %f15
88; ...main function body...
89; CHECK: ld %f8, 200(%r15)
90; CHECK: ld %f9, 192(%r15)
91; CHECK: ld %f10, 184(%r15)
92; CHECK: ld %f11, 176(%r15)
93; CHECK: ld %f12, 168(%r15)
94; CHECK: ld %f14, 160(%r15)
95; CHECK: aghi %r15, 208
96; CHECK: br %r14
97 %l0 = load volatile fp128 *%ptr
98 %l1 = load volatile fp128 *%ptr
99 %l4 = load volatile fp128 *%ptr
100 %l5 = load volatile fp128 *%ptr
101 %l8 = load volatile fp128 *%ptr
102 %l9 = load volatile fp128 *%ptr
103 %l12 = load volatile fp128 *%ptr
104 %add0 = fadd fp128 %l0, %l0
105 %add1 = fadd fp128 %l1, %add0
106 %add4 = fadd fp128 %l4, %add1
107 %add5 = fadd fp128 %l5, %add4
108 %add8 = fadd fp128 %l8, %add5
109 %add9 = fadd fp128 %l9, %add8
110 %add12 = fadd fp128 %l12, %add9
111 store volatile fp128 %add0, fp128 *%ptr
112 store volatile fp128 %add1, fp128 *%ptr
113 store volatile fp128 %add4, fp128 *%ptr
114 store volatile fp128 %add5, fp128 *%ptr
115 store volatile fp128 %add8, fp128 *%ptr
116 store volatile fp128 %add9, fp128 *%ptr
117 store volatile fp128 %add12, fp128 *%ptr
118 ret void
119}
120
121; Like f1, but requires only one call-saved FPR pair. We allocate in
122; numerical order so the pair should be %f8+%f10.
123define void @f3(fp128 *%ptr) {
Stephen Lind24ab202013-07-14 06:24:09 +0000124; CHECK-LABEL: f3:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000125; CHECK: aghi %r15, -176
126; CHECK: .cfi_def_cfa_offset 336
127; CHECK: std %f8, 168(%r15)
128; CHECK: std %f10, 160(%r15)
129; CHECK: .cfi_offset %f8, -168
130; CHECK: .cfi_offset %f10, -176
131; CHECK-NOT: %f9
132; CHECK-NOT: %f11
133; CHECK-NOT: %f12
134; CHECK-NOT: %f13
135; CHECK-NOT: %f14
136; CHECK-NOT: %f15
137; ...main function body...
138; CHECK: ld %f8, 168(%r15)
139; CHECK: ld %f10, 160(%r15)
140; CHECK: aghi %r15, 176
141; CHECK: br %r14
142 %l0 = load volatile fp128 *%ptr
143 %l1 = load volatile fp128 *%ptr
144 %l4 = load volatile fp128 *%ptr
145 %l5 = load volatile fp128 *%ptr
146 %l8 = load volatile fp128 *%ptr
147 %add0 = fadd fp128 %l0, %l0
148 %add1 = fadd fp128 %l1, %add0
149 %add4 = fadd fp128 %l4, %add1
150 %add5 = fadd fp128 %l5, %add4
151 %add8 = fadd fp128 %l8, %add5
152 store volatile fp128 %add0, fp128 *%ptr
153 store volatile fp128 %add1, fp128 *%ptr
154 store volatile fp128 %add4, fp128 *%ptr
155 store volatile fp128 %add5, fp128 *%ptr
156 store volatile fp128 %add8, fp128 *%ptr
157 ret void
158}
159
160; This function should use all call-clobbered FPRs but no call-saved ones.
161; It shouldn't need to create a frame.
162define void @f4(fp128 *%ptr) {
Stephen Lind24ab202013-07-14 06:24:09 +0000163; CHECK-LABEL: f4:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000164; CHECK-NOT: %r15
165; CHECK-NOT: %f8
166; CHECK-NOT: %f9
167; CHECK-NOT: %f10
168; CHECK-NOT: %f11
169; CHECK-NOT: %f12
170; CHECK-NOT: %f13
171; CHECK-NOT: %f14
172; CHECK-NOT: %f15
173; CHECK: br %r14
174 %l0 = load volatile fp128 *%ptr
175 %l1 = load volatile fp128 *%ptr
176 %l4 = load volatile fp128 *%ptr
177 %l5 = load volatile fp128 *%ptr
178 %add0 = fadd fp128 %l0, %l0
179 %add1 = fadd fp128 %l1, %add0
180 %add4 = fadd fp128 %l4, %add1
181 %add5 = fadd fp128 %l5, %add4
182 store volatile fp128 %add0, fp128 *%ptr
183 store volatile fp128 %add1, fp128 *%ptr
184 store volatile fp128 %add4, fp128 *%ptr
185 store volatile fp128 %add5, fp128 *%ptr
186 ret void
187}