blob: 151d5b13adfdc553429817e79ecf4d81a2a36094 [file] [log] [blame]
Ulrich Weigand9e3577f2013-05-06 16:17:29 +00001; Test multiplication of two f32s, producing an f64 result.
2;
3; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
4
Richard Sandiforded1fab62013-07-03 10:10:02 +00005declare float @foo()
6
Ulrich Weigand9e3577f2013-05-06 16:17:29 +00007; Check register multiplication.
8define double @f1(float %f1, float %f2) {
Stephen Lind24ab202013-07-14 06:24:09 +00009; CHECK-LABEL: f1:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000010; CHECK: mdebr %f0, %f2
11; CHECK: br %r14
12 %f1x = fpext float %f1 to double
13 %f2x = fpext float %f2 to double
14 %res = fmul double %f1x, %f2x
15 ret double %res
16}
17
18; Check the low end of the MDEB range.
19define double @f2(float %f1, float *%ptr) {
Stephen Lind24ab202013-07-14 06:24:09 +000020; CHECK-LABEL: f2:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000021; CHECK: mdeb %f0, 0(%r2)
22; CHECK: br %r14
23 %f2 = load float *%ptr
24 %f1x = fpext float %f1 to double
25 %f2x = fpext float %f2 to double
26 %res = fmul double %f1x, %f2x
27 ret double %res
28}
29
30; Check the high end of the aligned MDEB range.
31define double @f3(float %f1, float *%base) {
Stephen Lind24ab202013-07-14 06:24:09 +000032; CHECK-LABEL: f3:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000033; CHECK: mdeb %f0, 4092(%r2)
34; CHECK: br %r14
David Blaikie79e6c742015-02-27 19:29:02 +000035 %ptr = getelementptr float, float *%base, i64 1023
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000036 %f2 = load float *%ptr
37 %f1x = fpext float %f1 to double
38 %f2x = fpext float %f2 to double
39 %res = fmul double %f1x, %f2x
40 ret double %res
41}
42
43; Check the next word up, which needs separate address logic.
44; Other sequences besides this one would be OK.
45define double @f4(float %f1, float *%base) {
Stephen Lind24ab202013-07-14 06:24:09 +000046; CHECK-LABEL: f4:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000047; CHECK: aghi %r2, 4096
48; CHECK: mdeb %f0, 0(%r2)
49; CHECK: br %r14
David Blaikie79e6c742015-02-27 19:29:02 +000050 %ptr = getelementptr float, float *%base, i64 1024
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000051 %f2 = load float *%ptr
52 %f1x = fpext float %f1 to double
53 %f2x = fpext float %f2 to double
54 %res = fmul double %f1x, %f2x
55 ret double %res
56}
57
58; Check negative displacements, which also need separate address logic.
59define double @f5(float %f1, float *%base) {
Stephen Lind24ab202013-07-14 06:24:09 +000060; CHECK-LABEL: f5:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000061; CHECK: aghi %r2, -4
62; CHECK: mdeb %f0, 0(%r2)
63; CHECK: br %r14
David Blaikie79e6c742015-02-27 19:29:02 +000064 %ptr = getelementptr float, float *%base, i64 -1
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000065 %f2 = load float *%ptr
66 %f1x = fpext float %f1 to double
67 %f2x = fpext float %f2 to double
68 %res = fmul double %f1x, %f2x
69 ret double %res
70}
71
72; Check that MDEB allows indices.
73define double @f6(float %f1, float *%base, i64 %index) {
Stephen Lind24ab202013-07-14 06:24:09 +000074; CHECK-LABEL: f6:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000075; CHECK: sllg %r1, %r3, 2
76; CHECK: mdeb %f0, 400(%r1,%r2)
77; CHECK: br %r14
David Blaikie79e6c742015-02-27 19:29:02 +000078 %ptr1 = getelementptr float, float *%base, i64 %index
79 %ptr2 = getelementptr float, float *%ptr1, i64 100
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000080 %f2 = load float *%ptr2
81 %f1x = fpext float %f1 to double
82 %f2x = fpext float %f2 to double
83 %res = fmul double %f1x, %f2x
84 ret double %res
85}
Richard Sandiforded1fab62013-07-03 10:10:02 +000086
87; Check that multiplications of spilled values can use MDEB rather than MDEBR.
88define float @f7(float *%ptr0) {
Stephen Lind24ab202013-07-14 06:24:09 +000089; CHECK-LABEL: f7:
Richard Sandiforded1fab62013-07-03 10:10:02 +000090; CHECK: brasl %r14, foo@PLT
91; CHECK: mdeb %f0, 16{{[04]}}(%r15)
92; CHECK: br %r14
David Blaikie79e6c742015-02-27 19:29:02 +000093 %ptr1 = getelementptr float, float *%ptr0, i64 2
94 %ptr2 = getelementptr float, float *%ptr0, i64 4
95 %ptr3 = getelementptr float, float *%ptr0, i64 6
96 %ptr4 = getelementptr float, float *%ptr0, i64 8
97 %ptr5 = getelementptr float, float *%ptr0, i64 10
98 %ptr6 = getelementptr float, float *%ptr0, i64 12
99 %ptr7 = getelementptr float, float *%ptr0, i64 14
100 %ptr8 = getelementptr float, float *%ptr0, i64 16
101 %ptr9 = getelementptr float, float *%ptr0, i64 18
102 %ptr10 = getelementptr float, float *%ptr0, i64 20
Richard Sandiforded1fab62013-07-03 10:10:02 +0000103
104 %val0 = load float *%ptr0
105 %val1 = load float *%ptr1
106 %val2 = load float *%ptr2
107 %val3 = load float *%ptr3
108 %val4 = load float *%ptr4
109 %val5 = load float *%ptr5
110 %val6 = load float *%ptr6
111 %val7 = load float *%ptr7
112 %val8 = load float *%ptr8
113 %val9 = load float *%ptr9
114 %val10 = load float *%ptr10
115
116 %frob0 = fadd float %val0, %val0
117 %frob1 = fadd float %val1, %val1
118 %frob2 = fadd float %val2, %val2
119 %frob3 = fadd float %val3, %val3
120 %frob4 = fadd float %val4, %val4
121 %frob5 = fadd float %val5, %val5
122 %frob6 = fadd float %val6, %val6
123 %frob7 = fadd float %val7, %val7
124 %frob8 = fadd float %val8, %val8
125 %frob9 = fadd float %val9, %val9
126 %frob10 = fadd float %val9, %val10
127
128 store float %frob0, float *%ptr0
129 store float %frob1, float *%ptr1
130 store float %frob2, float *%ptr2
131 store float %frob3, float *%ptr3
132 store float %frob4, float *%ptr4
133 store float %frob5, float *%ptr5
134 store float %frob6, float *%ptr6
135 store float %frob7, float *%ptr7
136 store float %frob8, float *%ptr8
137 store float %frob9, float *%ptr9
138 store float %frob10, float *%ptr10
139
140 %ret = call float @foo()
141
142 %accext0 = fpext float %ret to double
143 %ext0 = fpext float %frob0 to double
144 %mul0 = fmul double %accext0, %ext0
145 %extra0 = fmul double %mul0, 1.01
146 %trunc0 = fptrunc double %extra0 to float
147
148 %accext1 = fpext float %trunc0 to double
149 %ext1 = fpext float %frob1 to double
150 %mul1 = fmul double %accext1, %ext1
151 %extra1 = fmul double %mul1, 1.11
152 %trunc1 = fptrunc double %extra1 to float
153
154 %accext2 = fpext float %trunc1 to double
155 %ext2 = fpext float %frob2 to double
156 %mul2 = fmul double %accext2, %ext2
157 %extra2 = fmul double %mul2, 1.21
158 %trunc2 = fptrunc double %extra2 to float
159
160 %accext3 = fpext float %trunc2 to double
161 %ext3 = fpext float %frob3 to double
162 %mul3 = fmul double %accext3, %ext3
163 %extra3 = fmul double %mul3, 1.31
164 %trunc3 = fptrunc double %extra3 to float
165
166 %accext4 = fpext float %trunc3 to double
167 %ext4 = fpext float %frob4 to double
168 %mul4 = fmul double %accext4, %ext4
169 %extra4 = fmul double %mul4, 1.41
170 %trunc4 = fptrunc double %extra4 to float
171
172 %accext5 = fpext float %trunc4 to double
173 %ext5 = fpext float %frob5 to double
174 %mul5 = fmul double %accext5, %ext5
175 %extra5 = fmul double %mul5, 1.51
176 %trunc5 = fptrunc double %extra5 to float
177
178 %accext6 = fpext float %trunc5 to double
179 %ext6 = fpext float %frob6 to double
180 %mul6 = fmul double %accext6, %ext6
181 %extra6 = fmul double %mul6, 1.61
182 %trunc6 = fptrunc double %extra6 to float
183
184 %accext7 = fpext float %trunc6 to double
185 %ext7 = fpext float %frob7 to double
186 %mul7 = fmul double %accext7, %ext7
187 %extra7 = fmul double %mul7, 1.71
188 %trunc7 = fptrunc double %extra7 to float
189
190 %accext8 = fpext float %trunc7 to double
191 %ext8 = fpext float %frob8 to double
192 %mul8 = fmul double %accext8, %ext8
193 %extra8 = fmul double %mul8, 1.81
194 %trunc8 = fptrunc double %extra8 to float
195
196 %accext9 = fpext float %trunc8 to double
197 %ext9 = fpext float %frob9 to double
198 %mul9 = fmul double %accext9, %ext9
199 %extra9 = fmul double %mul9, 1.91
200 %trunc9 = fptrunc double %extra9 to float
201
202 ret float %trunc9
203}