Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 1 | ; Test multiplication of two f64s, producing an f128 result. |
| 2 | ; |
| 3 | ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s |
| 4 | |
Richard Sandiford | ed1fab6 | 2013-07-03 10:10:02 +0000 | [diff] [blame] | 5 | declare double @foo() |
| 6 | |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 7 | ; Check register multiplication. "mxdbr %f0, %f2" is not valid from LLVM's |
| 8 | ; point of view, because %f2 is the low register of the FP128 %f0. Pass the |
| 9 | ; multiplier in %f4 instead. |
| 10 | define void @f1(double %f1, double %dummy, double %f2, fp128 *%dst) { |
Stephen Lin | d24ab20 | 2013-07-14 06:24:09 +0000 | [diff] [blame] | 11 | ; CHECK-LABEL: f1: |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 12 | ; CHECK: mxdbr %f0, %f4 |
| 13 | ; CHECK: std %f0, 0(%r2) |
| 14 | ; CHECK: std %f2, 8(%r2) |
| 15 | ; CHECK: br %r14 |
| 16 | %f1x = fpext double %f1 to fp128 |
| 17 | %f2x = fpext double %f2 to fp128 |
| 18 | %res = fmul fp128 %f1x, %f2x |
| 19 | store fp128 %res, fp128 *%dst |
| 20 | ret void |
| 21 | } |
| 22 | |
| 23 | ; Check the low end of the MXDB range. |
| 24 | define void @f2(double %f1, double *%ptr, fp128 *%dst) { |
Stephen Lin | d24ab20 | 2013-07-14 06:24:09 +0000 | [diff] [blame] | 25 | ; CHECK-LABEL: f2: |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 26 | ; CHECK: mxdb %f0, 0(%r2) |
| 27 | ; CHECK: std %f0, 0(%r3) |
| 28 | ; CHECK: std %f2, 8(%r3) |
| 29 | ; CHECK: br %r14 |
David Blaikie | a79ac14 | 2015-02-27 21:17:42 +0000 | [diff] [blame] | 30 | %f2 = load double , double *%ptr |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 31 | %f1x = fpext double %f1 to fp128 |
| 32 | %f2x = fpext double %f2 to fp128 |
| 33 | %res = fmul fp128 %f1x, %f2x |
| 34 | store fp128 %res, fp128 *%dst |
| 35 | ret void |
| 36 | } |
| 37 | |
| 38 | ; Check the high end of the aligned MXDB range. |
| 39 | define void @f3(double %f1, double *%base, fp128 *%dst) { |
Stephen Lin | d24ab20 | 2013-07-14 06:24:09 +0000 | [diff] [blame] | 40 | ; CHECK-LABEL: f3: |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 41 | ; CHECK: mxdb %f0, 4088(%r2) |
| 42 | ; CHECK: std %f0, 0(%r3) |
| 43 | ; CHECK: std %f2, 8(%r3) |
| 44 | ; CHECK: br %r14 |
David Blaikie | 79e6c74 | 2015-02-27 19:29:02 +0000 | [diff] [blame] | 45 | %ptr = getelementptr double, double *%base, i64 511 |
David Blaikie | a79ac14 | 2015-02-27 21:17:42 +0000 | [diff] [blame] | 46 | %f2 = load double , double *%ptr |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 47 | %f1x = fpext double %f1 to fp128 |
| 48 | %f2x = fpext double %f2 to fp128 |
| 49 | %res = fmul fp128 %f1x, %f2x |
| 50 | store fp128 %res, fp128 *%dst |
| 51 | ret void |
| 52 | } |
| 53 | |
| 54 | ; Check the next doubleword up, which needs separate address logic. |
| 55 | ; Other sequences besides this one would be OK. |
| 56 | define void @f4(double %f1, double *%base, fp128 *%dst) { |
Stephen Lin | d24ab20 | 2013-07-14 06:24:09 +0000 | [diff] [blame] | 57 | ; CHECK-LABEL: f4: |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 58 | ; CHECK: aghi %r2, 4096 |
| 59 | ; CHECK: mxdb %f0, 0(%r2) |
| 60 | ; CHECK: std %f0, 0(%r3) |
| 61 | ; CHECK: std %f2, 8(%r3) |
| 62 | ; CHECK: br %r14 |
David Blaikie | 79e6c74 | 2015-02-27 19:29:02 +0000 | [diff] [blame] | 63 | %ptr = getelementptr double, double *%base, i64 512 |
David Blaikie | a79ac14 | 2015-02-27 21:17:42 +0000 | [diff] [blame] | 64 | %f2 = load double , double *%ptr |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 65 | %f1x = fpext double %f1 to fp128 |
| 66 | %f2x = fpext double %f2 to fp128 |
| 67 | %res = fmul fp128 %f1x, %f2x |
| 68 | store fp128 %res, fp128 *%dst |
| 69 | ret void |
| 70 | } |
| 71 | |
| 72 | ; Check negative displacements, which also need separate address logic. |
| 73 | define void @f5(double %f1, double *%base, fp128 *%dst) { |
Stephen Lin | d24ab20 | 2013-07-14 06:24:09 +0000 | [diff] [blame] | 74 | ; CHECK-LABEL: f5: |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 75 | ; CHECK: aghi %r2, -8 |
| 76 | ; CHECK: mxdb %f0, 0(%r2) |
| 77 | ; CHECK: std %f0, 0(%r3) |
| 78 | ; CHECK: std %f2, 8(%r3) |
| 79 | ; CHECK: br %r14 |
David Blaikie | 79e6c74 | 2015-02-27 19:29:02 +0000 | [diff] [blame] | 80 | %ptr = getelementptr double, double *%base, i64 -1 |
David Blaikie | a79ac14 | 2015-02-27 21:17:42 +0000 | [diff] [blame] | 81 | %f2 = load double , double *%ptr |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 82 | %f1x = fpext double %f1 to fp128 |
| 83 | %f2x = fpext double %f2 to fp128 |
| 84 | %res = fmul fp128 %f1x, %f2x |
| 85 | store fp128 %res, fp128 *%dst |
| 86 | ret void |
| 87 | } |
| 88 | |
| 89 | ; Check that MXDB allows indices. |
| 90 | define void @f6(double %f1, double *%base, i64 %index, fp128 *%dst) { |
Stephen Lin | d24ab20 | 2013-07-14 06:24:09 +0000 | [diff] [blame] | 91 | ; CHECK-LABEL: f6: |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 92 | ; CHECK: sllg %r1, %r3, 3 |
| 93 | ; CHECK: mxdb %f0, 800(%r1,%r2) |
| 94 | ; CHECK: std %f0, 0(%r4) |
| 95 | ; CHECK: std %f2, 8(%r4) |
| 96 | ; CHECK: br %r14 |
David Blaikie | 79e6c74 | 2015-02-27 19:29:02 +0000 | [diff] [blame] | 97 | %ptr1 = getelementptr double, double *%base, i64 %index |
| 98 | %ptr2 = getelementptr double, double *%ptr1, i64 100 |
David Blaikie | a79ac14 | 2015-02-27 21:17:42 +0000 | [diff] [blame] | 99 | %f2 = load double , double *%ptr2 |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 100 | %f1x = fpext double %f1 to fp128 |
| 101 | %f2x = fpext double %f2 to fp128 |
| 102 | %res = fmul fp128 %f1x, %f2x |
| 103 | store fp128 %res, fp128 *%dst |
| 104 | ret void |
| 105 | } |
Richard Sandiford | ed1fab6 | 2013-07-03 10:10:02 +0000 | [diff] [blame] | 106 | |
| 107 | ; Check that multiplications of spilled values can use MXDB rather than MXDBR. |
| 108 | define double @f7(double *%ptr0) { |
Stephen Lin | d24ab20 | 2013-07-14 06:24:09 +0000 | [diff] [blame] | 109 | ; CHECK-LABEL: f7: |
Richard Sandiford | ed1fab6 | 2013-07-03 10:10:02 +0000 | [diff] [blame] | 110 | ; CHECK: brasl %r14, foo@PLT |
| 111 | ; CHECK: mxdb %f0, 160(%r15) |
| 112 | ; CHECK: br %r14 |
David Blaikie | 79e6c74 | 2015-02-27 19:29:02 +0000 | [diff] [blame] | 113 | %ptr1 = getelementptr double, double *%ptr0, i64 2 |
| 114 | %ptr2 = getelementptr double, double *%ptr0, i64 4 |
| 115 | %ptr3 = getelementptr double, double *%ptr0, i64 6 |
| 116 | %ptr4 = getelementptr double, double *%ptr0, i64 8 |
| 117 | %ptr5 = getelementptr double, double *%ptr0, i64 10 |
| 118 | %ptr6 = getelementptr double, double *%ptr0, i64 12 |
| 119 | %ptr7 = getelementptr double, double *%ptr0, i64 14 |
| 120 | %ptr8 = getelementptr double, double *%ptr0, i64 16 |
| 121 | %ptr9 = getelementptr double, double *%ptr0, i64 18 |
| 122 | %ptr10 = getelementptr double, double *%ptr0, i64 20 |
Richard Sandiford | ed1fab6 | 2013-07-03 10:10:02 +0000 | [diff] [blame] | 123 | |
David Blaikie | a79ac14 | 2015-02-27 21:17:42 +0000 | [diff] [blame] | 124 | %val0 = load double , double *%ptr0 |
| 125 | %val1 = load double , double *%ptr1 |
| 126 | %val2 = load double , double *%ptr2 |
| 127 | %val3 = load double , double *%ptr3 |
| 128 | %val4 = load double , double *%ptr4 |
| 129 | %val5 = load double , double *%ptr5 |
| 130 | %val6 = load double , double *%ptr6 |
| 131 | %val7 = load double , double *%ptr7 |
| 132 | %val8 = load double , double *%ptr8 |
| 133 | %val9 = load double , double *%ptr9 |
| 134 | %val10 = load double , double *%ptr10 |
Richard Sandiford | ed1fab6 | 2013-07-03 10:10:02 +0000 | [diff] [blame] | 135 | |
| 136 | %frob0 = fadd double %val0, %val0 |
| 137 | %frob1 = fadd double %val1, %val1 |
| 138 | %frob2 = fadd double %val2, %val2 |
| 139 | %frob3 = fadd double %val3, %val3 |
| 140 | %frob4 = fadd double %val4, %val4 |
| 141 | %frob5 = fadd double %val5, %val5 |
| 142 | %frob6 = fadd double %val6, %val6 |
| 143 | %frob7 = fadd double %val7, %val7 |
| 144 | %frob8 = fadd double %val8, %val8 |
| 145 | %frob9 = fadd double %val9, %val9 |
| 146 | %frob10 = fadd double %val9, %val10 |
| 147 | |
| 148 | store double %frob0, double *%ptr0 |
| 149 | store double %frob1, double *%ptr1 |
| 150 | store double %frob2, double *%ptr2 |
| 151 | store double %frob3, double *%ptr3 |
| 152 | store double %frob4, double *%ptr4 |
| 153 | store double %frob5, double *%ptr5 |
| 154 | store double %frob6, double *%ptr6 |
| 155 | store double %frob7, double *%ptr7 |
| 156 | store double %frob8, double *%ptr8 |
| 157 | store double %frob9, double *%ptr9 |
| 158 | store double %frob10, double *%ptr10 |
| 159 | |
| 160 | %ret = call double @foo() |
| 161 | |
| 162 | %accext0 = fpext double %ret to fp128 |
| 163 | %ext0 = fpext double %frob0 to fp128 |
| 164 | %mul0 = fmul fp128 %accext0, %ext0 |
| 165 | %const0 = fpext double 1.01 to fp128 |
| 166 | %extra0 = fmul fp128 %mul0, %const0 |
| 167 | %trunc0 = fptrunc fp128 %extra0 to double |
| 168 | |
| 169 | %accext1 = fpext double %trunc0 to fp128 |
| 170 | %ext1 = fpext double %frob1 to fp128 |
| 171 | %mul1 = fmul fp128 %accext1, %ext1 |
| 172 | %const1 = fpext double 1.11 to fp128 |
| 173 | %extra1 = fmul fp128 %mul1, %const1 |
| 174 | %trunc1 = fptrunc fp128 %extra1 to double |
| 175 | |
| 176 | %accext2 = fpext double %trunc1 to fp128 |
| 177 | %ext2 = fpext double %frob2 to fp128 |
| 178 | %mul2 = fmul fp128 %accext2, %ext2 |
| 179 | %const2 = fpext double 1.21 to fp128 |
| 180 | %extra2 = fmul fp128 %mul2, %const2 |
| 181 | %trunc2 = fptrunc fp128 %extra2 to double |
| 182 | |
| 183 | %accext3 = fpext double %trunc2 to fp128 |
| 184 | %ext3 = fpext double %frob3 to fp128 |
| 185 | %mul3 = fmul fp128 %accext3, %ext3 |
| 186 | %const3 = fpext double 1.31 to fp128 |
| 187 | %extra3 = fmul fp128 %mul3, %const3 |
| 188 | %trunc3 = fptrunc fp128 %extra3 to double |
| 189 | |
| 190 | %accext4 = fpext double %trunc3 to fp128 |
| 191 | %ext4 = fpext double %frob4 to fp128 |
| 192 | %mul4 = fmul fp128 %accext4, %ext4 |
| 193 | %const4 = fpext double 1.41 to fp128 |
| 194 | %extra4 = fmul fp128 %mul4, %const4 |
| 195 | %trunc4 = fptrunc fp128 %extra4 to double |
| 196 | |
| 197 | %accext5 = fpext double %trunc4 to fp128 |
| 198 | %ext5 = fpext double %frob5 to fp128 |
| 199 | %mul5 = fmul fp128 %accext5, %ext5 |
| 200 | %const5 = fpext double 1.51 to fp128 |
| 201 | %extra5 = fmul fp128 %mul5, %const5 |
| 202 | %trunc5 = fptrunc fp128 %extra5 to double |
| 203 | |
| 204 | %accext6 = fpext double %trunc5 to fp128 |
| 205 | %ext6 = fpext double %frob6 to fp128 |
| 206 | %mul6 = fmul fp128 %accext6, %ext6 |
| 207 | %const6 = fpext double 1.61 to fp128 |
| 208 | %extra6 = fmul fp128 %mul6, %const6 |
| 209 | %trunc6 = fptrunc fp128 %extra6 to double |
| 210 | |
| 211 | %accext7 = fpext double %trunc6 to fp128 |
| 212 | %ext7 = fpext double %frob7 to fp128 |
| 213 | %mul7 = fmul fp128 %accext7, %ext7 |
| 214 | %const7 = fpext double 1.71 to fp128 |
| 215 | %extra7 = fmul fp128 %mul7, %const7 |
| 216 | %trunc7 = fptrunc fp128 %extra7 to double |
| 217 | |
| 218 | %accext8 = fpext double %trunc7 to fp128 |
| 219 | %ext8 = fpext double %frob8 to fp128 |
| 220 | %mul8 = fmul fp128 %accext8, %ext8 |
| 221 | %const8 = fpext double 1.81 to fp128 |
| 222 | %extra8 = fmul fp128 %mul8, %const8 |
| 223 | %trunc8 = fptrunc fp128 %extra8 to double |
| 224 | |
| 225 | %accext9 = fpext double %trunc8 to fp128 |
| 226 | %ext9 = fpext double %frob9 to fp128 |
| 227 | %mul9 = fmul fp128 %accext9, %ext9 |
| 228 | %const9 = fpext double 1.91 to fp128 |
| 229 | %extra9 = fmul fp128 %mul9, %const9 |
| 230 | %trunc9 = fptrunc fp128 %extra9 to double |
| 231 | |
| 232 | ret double %trunc9 |
| 233 | } |