blob: fe27b61c20bab796ff651284d7888a7c3af32699 [file] [log] [blame]
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +00001; Verify that strict FP operations are not rescheduled
2;
3; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
4
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +00005declare float @llvm.experimental.constrained.sqrt.f32(float, metadata, metadata)
6declare float @llvm.sqrt.f32(float)
7declare void @llvm.s390.sfpc(i32)
8
Ulrich Weigand450c62e2019-07-16 15:55:45 +00009; The basic assumption of all following tests is that on z13, we never
10; want to see two square root instructions directly in a row, so the
11; post-RA scheduler will always schedule something else in between
12; whenever possible.
13
14; We can move any FP operation across a (normal) store.
15
16define void @f1(float %f1, float %f2, float *%ptr1, float *%ptr2) {
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +000017; CHECK-LABEL: f1:
18; CHECK: sqebr
Ulrich Weigand450c62e2019-07-16 15:55:45 +000019; CHECK: ste
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +000020; CHECK: sqebr
Ulrich Weigand450c62e2019-07-16 15:55:45 +000021; CHECK: ste
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +000022; CHECK: br %r14
23
Ulrich Weigand450c62e2019-07-16 15:55:45 +000024 %sqrt1 = call float @llvm.sqrt.f32(float %f1)
25 %sqrt2 = call float @llvm.sqrt.f32(float %f2)
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +000026
Ulrich Weigand450c62e2019-07-16 15:55:45 +000027 store float %sqrt1, float *%ptr1
28 store float %sqrt2, float *%ptr2
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +000029
30 ret void
31}
32
Ulrich Weigand450c62e2019-07-16 15:55:45 +000033define void @f2(float %f1, float %f2, float *%ptr1, float *%ptr2) {
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +000034; CHECK-LABEL: f2:
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +000035; CHECK: sqebr
Ulrich Weigand450c62e2019-07-16 15:55:45 +000036; CHECK: ste
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +000037; CHECK: sqebr
Ulrich Weigand450c62e2019-07-16 15:55:45 +000038; CHECK: ste
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +000039; CHECK: br %r14
40
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +000041 %sqrt1 = call float @llvm.experimental.constrained.sqrt.f32(
Ulrich Weigand450c62e2019-07-16 15:55:45 +000042 float %f1,
43 metadata !"round.dynamic",
44 metadata !"fpexcept.ignore")
45 %sqrt2 = call float @llvm.experimental.constrained.sqrt.f32(
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +000046 float %f2,
47 metadata !"round.dynamic",
Ulrich Weigand450c62e2019-07-16 15:55:45 +000048 metadata !"fpexcept.ignore")
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +000049
Ulrich Weigand450c62e2019-07-16 15:55:45 +000050 store float %sqrt1, float *%ptr1
51 store float %sqrt2, float *%ptr2
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +000052
53 ret void
54}
55
Ulrich Weigand450c62e2019-07-16 15:55:45 +000056define void @f3(float %f1, float %f2, float *%ptr1, float *%ptr2) {
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +000057; CHECK-LABEL: f3:
58; CHECK: sqebr
Ulrich Weigand450c62e2019-07-16 15:55:45 +000059; CHECK: ste
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +000060; CHECK: sqebr
Ulrich Weigand450c62e2019-07-16 15:55:45 +000061; CHECK: ste
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +000062; CHECK: br %r14
63
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +000064 %sqrt1 = call float @llvm.experimental.constrained.sqrt.f32(
Ulrich Weigand450c62e2019-07-16 15:55:45 +000065 float %f1,
66 metadata !"round.dynamic",
67 metadata !"fpexcept.strict")
68 %sqrt2 = call float @llvm.experimental.constrained.sqrt.f32(
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +000069 float %f2,
70 metadata !"round.dynamic",
Ulrich Weigand450c62e2019-07-16 15:55:45 +000071 metadata !"fpexcept.strict")
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +000072
Ulrich Weigand450c62e2019-07-16 15:55:45 +000073 store float %sqrt1, float *%ptr1
74 store float %sqrt2, float *%ptr2
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +000075
76 ret void
77}
78
Ulrich Weigand450c62e2019-07-16 15:55:45 +000079
80; We can move a non-strict FP operation or a fpexcept.ignore
81; operation even across a volatile store, but not a fpexcept.strict
82; operation.
83
84define void @f4(float %f1, float %f2, float *%ptr1, float *%ptr2) {
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +000085; CHECK-LABEL: f4:
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +000086; CHECK: sqebr
Ulrich Weigand450c62e2019-07-16 15:55:45 +000087; CHECK: ste
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +000088; CHECK: sqebr
Ulrich Weigand450c62e2019-07-16 15:55:45 +000089; CHECK: ste
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +000090; CHECK: br %r14
91
Ulrich Weigand450c62e2019-07-16 15:55:45 +000092 %sqrt1 = call float @llvm.sqrt.f32(float %f1)
93 %sqrt2 = call float @llvm.sqrt.f32(float %f2)
94
95 store volatile float %sqrt1, float *%ptr1
96 store volatile float %sqrt2, float *%ptr2
97
98 ret void
99}
100
101define void @f5(float %f1, float %f2, float *%ptr1, float *%ptr2) {
102; CHECK-LABEL: f5:
103; CHECK: sqebr
104; CHECK: ste
105; CHECK: sqebr
106; CHECK: ste
107; CHECK: br %r14
108
109 %sqrt1 = call float @llvm.experimental.constrained.sqrt.f32(
110 float %f1,
111 metadata !"round.dynamic",
112 metadata !"fpexcept.ignore")
113 %sqrt2 = call float @llvm.experimental.constrained.sqrt.f32(
114 float %f2,
115 metadata !"round.dynamic",
116 metadata !"fpexcept.ignore")
117
118 store volatile float %sqrt1, float *%ptr1
119 store volatile float %sqrt2, float *%ptr2
120
121 ret void
122}
123
124define void @f6(float %f1, float %f2, float *%ptr1, float *%ptr2) {
125; CHECK-LABEL: f6:
126; CHECK: sqebr
127; CHECK: sqebr
128; CHECK: ste
129; CHECK: ste
130; CHECK: br %r14
131
132 %sqrt1 = call float @llvm.experimental.constrained.sqrt.f32(
133 float %f1,
134 metadata !"round.dynamic",
135 metadata !"fpexcept.strict")
136 %sqrt2 = call float @llvm.experimental.constrained.sqrt.f32(
137 float %f2,
138 metadata !"round.dynamic",
139 metadata !"fpexcept.strict")
140
141 store volatile float %sqrt1, float *%ptr1
142 store volatile float %sqrt2, float *%ptr2
143
144 ret void
145}
146
147
148; No variant of FP operations can be scheduled across a SPFC.
149
150define void @f7(float %f1, float %f2, float *%ptr1, float *%ptr2) {
151; CHECK-LABEL: f7:
152; CHECK: sqebr
153; CHECK: sqebr
154; CHECK: ste
155; CHECK: ste
156; CHECK: br %r14
157
158 %sqrt1 = call float @llvm.sqrt.f32(float %f1)
159 %sqrt2 = call float @llvm.sqrt.f32(float %f2)
160
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +0000161 call void @llvm.s390.sfpc(i32 0)
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +0000162
Ulrich Weigand450c62e2019-07-16 15:55:45 +0000163 store float %sqrt1, float *%ptr1
164 store float %sqrt2, float *%ptr2
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +0000165
Ulrich Weigand450c62e2019-07-16 15:55:45 +0000166 ret void
167}
168
169define void @f8(float %f1, float %f2, float *%ptr1, float *%ptr2) {
170; CHECK-LABEL: f8:
171; CHECK: sqebr
172; CHECK: sqebr
173; CHECK: ste
174; CHECK: ste
175; CHECK: br %r14
176
177 %sqrt1 = call float @llvm.experimental.constrained.sqrt.f32(
178 float %f1,
179 metadata !"round.dynamic",
180 metadata !"fpexcept.ignore")
181 %sqrt2 = call float @llvm.experimental.constrained.sqrt.f32(
182 float %f2,
183 metadata !"round.dynamic",
184 metadata !"fpexcept.ignore")
185
186 call void @llvm.s390.sfpc(i32 0)
187
188 store float %sqrt1, float *%ptr1
189 store float %sqrt2, float *%ptr2
190
191 ret void
192}
193
194define void @f9(float %f1, float %f2, float *%ptr1, float *%ptr2) {
195; CHECK-LABEL: f9:
196; CHECK: sqebr
197; CHECK: sqebr
198; CHECK: ste
199; CHECK: ste
200; CHECK: br %r14
201
202 %sqrt1 = call float @llvm.experimental.constrained.sqrt.f32(
203 float %f1,
204 metadata !"round.dynamic",
205 metadata !"fpexcept.strict")
206 %sqrt2 = call float @llvm.experimental.constrained.sqrt.f32(
207 float %f2,
208 metadata !"round.dynamic",
209 metadata !"fpexcept.strict")
210
211 call void @llvm.s390.sfpc(i32 0)
212
213 store float %sqrt1, float *%ptr1
214 store float %sqrt2, float *%ptr2
Ulrich Weigand6c5d5ce2019-06-05 22:33:10 +0000215
216 ret void
217}
218