Richard Sandiford | f834ea1 | 2013-10-31 12:14:17 +0000 | [diff] [blame] | 1 | ; Test moves between FPRs and GPRs. The 32-bit cases test the z10 |
| 2 | ; implementation, which has no high-word support. |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 3 | ; |
Richard Sandiford | f834ea1 | 2013-10-31 12:14:17 +0000 | [diff] [blame] | 4 | ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 5 | |
Richard Sandiford | 3f0edc2 | 2013-07-12 08:37:17 +0000 | [diff] [blame] | 6 | declare i64 @foo() |
| 7 | declare double @bar() |
| 8 | @dptr = external global double |
| 9 | @iptr = external global i64 |
| 10 | |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 11 | ; Test 32-bit moves from GPRs to FPRs. The GPR must be moved into the high |
| 12 | ; 32 bits of the FPR. |
| 13 | define float @f1(i32 %a) { |
Stephen Lin | d24ab20 | 2013-07-14 06:24:09 +0000 | [diff] [blame] | 14 | ; CHECK-LABEL: f1: |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 15 | ; CHECK: sllg [[REGISTER:%r[0-5]]], %r2, 32 |
| 16 | ; CHECK: ldgr %f0, [[REGISTER]] |
| 17 | %res = bitcast i32 %a to float |
| 18 | ret float %res |
| 19 | } |
| 20 | |
| 21 | ; Like f1, but create a situation where the shift can be folded with |
| 22 | ; surrounding code. |
| 23 | define float @f2(i64 %big) { |
Stephen Lin | d24ab20 | 2013-07-14 06:24:09 +0000 | [diff] [blame] | 24 | ; CHECK-LABEL: f2: |
Richard Sandiford | ea9b6aa | 2013-07-11 09:10:09 +0000 | [diff] [blame] | 25 | ; CHECK: risbg [[REGISTER:%r[0-5]]], %r2, 0, 159, 31 |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 26 | ; CHECK: ldgr %f0, [[REGISTER]] |
| 27 | %shift = lshr i64 %big, 1 |
| 28 | %a = trunc i64 %shift to i32 |
| 29 | %res = bitcast i32 %a to float |
| 30 | ret float %res |
| 31 | } |
| 32 | |
| 33 | ; Another example of the same thing. |
| 34 | define float @f3(i64 %big) { |
Stephen Lin | d24ab20 | 2013-07-14 06:24:09 +0000 | [diff] [blame] | 35 | ; CHECK-LABEL: f3: |
Richard Sandiford | ea9b6aa | 2013-07-11 09:10:09 +0000 | [diff] [blame] | 36 | ; CHECK: risbg [[REGISTER:%r[0-5]]], %r2, 0, 159, 2 |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 37 | ; CHECK: ldgr %f0, [[REGISTER]] |
| 38 | %shift = ashr i64 %big, 30 |
| 39 | %a = trunc i64 %shift to i32 |
| 40 | %res = bitcast i32 %a to float |
| 41 | ret float %res |
| 42 | } |
| 43 | |
| 44 | ; Like f1, but the value to transfer is already in the high 32 bits. |
| 45 | define float @f4(i64 %big) { |
Stephen Lin | d24ab20 | 2013-07-14 06:24:09 +0000 | [diff] [blame] | 46 | ; CHECK-LABEL: f4: |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 47 | ; CHECK-NOT: %r2 |
Richard Sandiford | 6a06ba3 | 2013-07-31 11:36:35 +0000 | [diff] [blame] | 48 | ; CHECK: nilf %r2, 0 |
| 49 | ; CHECK-NOT: %r2 |
| 50 | ; CHECK: ldgr %f0, %r2 |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 51 | %shift = ashr i64 %big, 32 |
| 52 | %a = trunc i64 %shift to i32 |
| 53 | %res = bitcast i32 %a to float |
| 54 | ret float %res |
| 55 | } |
| 56 | |
| 57 | ; Test 64-bit moves from GPRs to FPRs. |
| 58 | define double @f5(i64 %a) { |
Stephen Lin | d24ab20 | 2013-07-14 06:24:09 +0000 | [diff] [blame] | 59 | ; CHECK-LABEL: f5: |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 60 | ; CHECK: ldgr %f0, %r2 |
| 61 | %res = bitcast i64 %a to double |
| 62 | ret double %res |
| 63 | } |
| 64 | |
| 65 | ; Test 128-bit moves from GPRs to FPRs. i128 isn't a legitimate type, |
| 66 | ; so this goes through memory. |
| 67 | define void @f6(fp128 *%a, i128 *%b) { |
Stephen Lin | d24ab20 | 2013-07-14 06:24:09 +0000 | [diff] [blame] | 68 | ; CHECK-LABEL: f6: |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 69 | ; CHECK: lg |
Richard Sandiford | 067817e | 2013-09-27 15:29:20 +0000 | [diff] [blame] | 70 | ; CHECK: lg |
| 71 | ; CHECK: stg |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 72 | ; CHECK: stg |
Richard Sandiford | 9784649 | 2013-07-09 09:46:39 +0000 | [diff] [blame] | 73 | ; CHECK: br %r14 |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 74 | %val = load i128 *%b |
| 75 | %res = bitcast i128 %val to fp128 |
| 76 | store fp128 %res, fp128 *%a |
| 77 | ret void |
| 78 | } |
| 79 | |
| 80 | ; Test 32-bit moves from FPRs to GPRs. The high 32 bits of the FPR should |
| 81 | ; be moved into the low 32 bits of the GPR. |
| 82 | define i32 @f7(float %a) { |
Stephen Lin | d24ab20 | 2013-07-14 06:24:09 +0000 | [diff] [blame] | 83 | ; CHECK-LABEL: f7: |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 84 | ; CHECK: lgdr [[REGISTER:%r[0-5]]], %f0 |
| 85 | ; CHECK: srlg %r2, [[REGISTER]], 32 |
| 86 | %res = bitcast float %a to i32 |
| 87 | ret i32 %res |
| 88 | } |
| 89 | |
| 90 | ; Test 64-bit moves from FPRs to GPRs. |
| 91 | define i64 @f8(double %a) { |
Stephen Lin | d24ab20 | 2013-07-14 06:24:09 +0000 | [diff] [blame] | 92 | ; CHECK-LABEL: f8: |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 93 | ; CHECK: lgdr %r2, %f0 |
| 94 | %res = bitcast double %a to i64 |
| 95 | ret i64 %res |
| 96 | } |
| 97 | |
| 98 | ; Test 128-bit moves from FPRs to GPRs, with the same restriction as f6. |
| 99 | define void @f9(fp128 *%a, i128 *%b) { |
Stephen Lin | d24ab20 | 2013-07-14 06:24:09 +0000 | [diff] [blame] | 100 | ; CHECK-LABEL: f9: |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 101 | ; CHECK: ld |
| 102 | ; CHECK: ld |
| 103 | ; CHECK: std |
| 104 | ; CHECK: std |
| 105 | %val = load fp128 *%a |
| 106 | %res = bitcast fp128 %val to i128 |
| 107 | store i128 %res, i128 *%b |
| 108 | ret void |
| 109 | } |
| 110 | |
Richard Sandiford | 3f0edc2 | 2013-07-12 08:37:17 +0000 | [diff] [blame] | 111 | ; Test cases where the destination of an LGDR needs to be spilled. |
| 112 | ; We shouldn't have any integer stack stores or floating-point loads. |
| 113 | define void @f10(double %extra) { |
Stephen Lin | d24ab20 | 2013-07-14 06:24:09 +0000 | [diff] [blame] | 114 | ; CHECK-LABEL: f10: |
Richard Sandiford | 3f0edc2 | 2013-07-12 08:37:17 +0000 | [diff] [blame] | 115 | ; CHECK: dptr |
| 116 | ; CHECK-NOT: stg {{.*}}(%r15) |
| 117 | ; CHECK: %loop |
| 118 | ; CHECK-NOT: ld {{.*}}(%r15) |
| 119 | ; CHECK: %exit |
| 120 | ; CHECK: br %r14 |
| 121 | entry: |
| 122 | %double0 = load volatile double *@dptr |
| 123 | %biased0 = fadd double %double0, %extra |
| 124 | %int0 = bitcast double %biased0 to i64 |
| 125 | %double1 = load volatile double *@dptr |
| 126 | %biased1 = fadd double %double1, %extra |
| 127 | %int1 = bitcast double %biased1 to i64 |
| 128 | %double2 = load volatile double *@dptr |
| 129 | %biased2 = fadd double %double2, %extra |
| 130 | %int2 = bitcast double %biased2 to i64 |
| 131 | %double3 = load volatile double *@dptr |
| 132 | %biased3 = fadd double %double3, %extra |
| 133 | %int3 = bitcast double %biased3 to i64 |
| 134 | %double4 = load volatile double *@dptr |
| 135 | %biased4 = fadd double %double4, %extra |
| 136 | %int4 = bitcast double %biased4 to i64 |
| 137 | %double5 = load volatile double *@dptr |
| 138 | %biased5 = fadd double %double5, %extra |
| 139 | %int5 = bitcast double %biased5 to i64 |
| 140 | %double6 = load volatile double *@dptr |
| 141 | %biased6 = fadd double %double6, %extra |
| 142 | %int6 = bitcast double %biased6 to i64 |
| 143 | %double7 = load volatile double *@dptr |
| 144 | %biased7 = fadd double %double7, %extra |
| 145 | %int7 = bitcast double %biased7 to i64 |
| 146 | %double8 = load volatile double *@dptr |
| 147 | %biased8 = fadd double %double8, %extra |
| 148 | %int8 = bitcast double %biased8 to i64 |
| 149 | %double9 = load volatile double *@dptr |
| 150 | %biased9 = fadd double %double9, %extra |
| 151 | %int9 = bitcast double %biased9 to i64 |
| 152 | br label %loop |
| 153 | |
| 154 | loop: |
| 155 | %start = call i64 @foo() |
| 156 | %or0 = or i64 %start, %int0 |
| 157 | %or1 = or i64 %or0, %int1 |
| 158 | %or2 = or i64 %or1, %int2 |
| 159 | %or3 = or i64 %or2, %int3 |
| 160 | %or4 = or i64 %or3, %int4 |
| 161 | %or5 = or i64 %or4, %int5 |
| 162 | %or6 = or i64 %or5, %int6 |
| 163 | %or7 = or i64 %or6, %int7 |
| 164 | %or8 = or i64 %or7, %int8 |
| 165 | %or9 = or i64 %or8, %int9 |
| 166 | store i64 %or9, i64 *@iptr |
| 167 | %cont = icmp ne i64 %start, 1 |
| 168 | br i1 %cont, label %loop, label %exit |
| 169 | |
| 170 | exit: |
| 171 | ret void |
| 172 | } |
| 173 | |
| 174 | ; ...likewise LDGR, with the requirements the other way around. |
| 175 | define void @f11(i64 %mask) { |
Stephen Lin | d24ab20 | 2013-07-14 06:24:09 +0000 | [diff] [blame] | 176 | ; CHECK-LABEL: f11: |
Richard Sandiford | 3f0edc2 | 2013-07-12 08:37:17 +0000 | [diff] [blame] | 177 | ; CHECK: iptr |
| 178 | ; CHECK-NOT: std {{.*}}(%r15) |
| 179 | ; CHECK: %loop |
| 180 | ; CHECK-NOT: lg {{.*}}(%r15) |
| 181 | ; CHECK: %exit |
| 182 | ; CHECK: br %r14 |
| 183 | entry: |
| 184 | %int0 = load volatile i64 *@iptr |
| 185 | %masked0 = and i64 %int0, %mask |
| 186 | %double0 = bitcast i64 %masked0 to double |
| 187 | %int1 = load volatile i64 *@iptr |
| 188 | %masked1 = and i64 %int1, %mask |
| 189 | %double1 = bitcast i64 %masked1 to double |
| 190 | %int2 = load volatile i64 *@iptr |
| 191 | %masked2 = and i64 %int2, %mask |
| 192 | %double2 = bitcast i64 %masked2 to double |
| 193 | %int3 = load volatile i64 *@iptr |
| 194 | %masked3 = and i64 %int3, %mask |
| 195 | %double3 = bitcast i64 %masked3 to double |
| 196 | %int4 = load volatile i64 *@iptr |
| 197 | %masked4 = and i64 %int4, %mask |
| 198 | %double4 = bitcast i64 %masked4 to double |
| 199 | %int5 = load volatile i64 *@iptr |
| 200 | %masked5 = and i64 %int5, %mask |
| 201 | %double5 = bitcast i64 %masked5 to double |
| 202 | %int6 = load volatile i64 *@iptr |
| 203 | %masked6 = and i64 %int6, %mask |
| 204 | %double6 = bitcast i64 %masked6 to double |
| 205 | %int7 = load volatile i64 *@iptr |
| 206 | %masked7 = and i64 %int7, %mask |
| 207 | %double7 = bitcast i64 %masked7 to double |
| 208 | %int8 = load volatile i64 *@iptr |
| 209 | %masked8 = and i64 %int8, %mask |
| 210 | %double8 = bitcast i64 %masked8 to double |
| 211 | %int9 = load volatile i64 *@iptr |
| 212 | %masked9 = and i64 %int9, %mask |
| 213 | %double9 = bitcast i64 %masked9 to double |
| 214 | br label %loop |
| 215 | |
| 216 | loop: |
| 217 | %start = call double @bar() |
| 218 | %add0 = fadd double %start, %double0 |
| 219 | %add1 = fadd double %add0, %double1 |
| 220 | %add2 = fadd double %add1, %double2 |
| 221 | %add3 = fadd double %add2, %double3 |
| 222 | %add4 = fadd double %add3, %double4 |
| 223 | %add5 = fadd double %add4, %double5 |
| 224 | %add6 = fadd double %add5, %double6 |
| 225 | %add7 = fadd double %add6, %double7 |
| 226 | %add8 = fadd double %add7, %double8 |
| 227 | %add9 = fadd double %add8, %double9 |
| 228 | store double %add9, double *@dptr |
| 229 | %cont = fcmp one double %start, 1.0 |
| 230 | br i1 %cont, label %loop, label %exit |
| 231 | |
| 232 | exit: |
| 233 | ret void |
| 234 | } |
| 235 | |
| 236 | ; Test cases where the source of an LDGR needs to be spilled. |
| 237 | ; We shouldn't have any integer stack stores or floating-point loads. |
| 238 | define void @f12() { |
Stephen Lin | d24ab20 | 2013-07-14 06:24:09 +0000 | [diff] [blame] | 239 | ; CHECK-LABEL: f12: |
Richard Sandiford | 3f0edc2 | 2013-07-12 08:37:17 +0000 | [diff] [blame] | 240 | ; CHECK: %loop |
| 241 | ; CHECK-NOT: std {{.*}}(%r15) |
| 242 | ; CHECK: %exit |
| 243 | ; CHECK: foo@PLT |
| 244 | ; CHECK-NOT: lg {{.*}}(%r15) |
| 245 | ; CHECK: foo@PLT |
| 246 | ; CHECK: br %r14 |
| 247 | entry: |
| 248 | br label %loop |
| 249 | |
| 250 | loop: |
| 251 | %int0 = phi i64 [ 0, %entry ], [ %add0, %loop ] |
| 252 | %int1 = phi i64 [ 0, %entry ], [ %add1, %loop ] |
| 253 | %int2 = phi i64 [ 0, %entry ], [ %add2, %loop ] |
| 254 | %int3 = phi i64 [ 0, %entry ], [ %add3, %loop ] |
| 255 | %int4 = phi i64 [ 0, %entry ], [ %add4, %loop ] |
| 256 | %int5 = phi i64 [ 0, %entry ], [ %add5, %loop ] |
| 257 | %int6 = phi i64 [ 0, %entry ], [ %add6, %loop ] |
| 258 | %int7 = phi i64 [ 0, %entry ], [ %add7, %loop ] |
| 259 | %int8 = phi i64 [ 0, %entry ], [ %add8, %loop ] |
| 260 | %int9 = phi i64 [ 0, %entry ], [ %add9, %loop ] |
| 261 | |
| 262 | %bias = call i64 @foo() |
| 263 | %add0 = add i64 %int0, %bias |
| 264 | %add1 = add i64 %int1, %bias |
| 265 | %add2 = add i64 %int2, %bias |
| 266 | %add3 = add i64 %int3, %bias |
| 267 | %add4 = add i64 %int4, %bias |
| 268 | %add5 = add i64 %int5, %bias |
| 269 | %add6 = add i64 %int6, %bias |
| 270 | %add7 = add i64 %int7, %bias |
| 271 | %add8 = add i64 %int8, %bias |
| 272 | %add9 = add i64 %int9, %bias |
| 273 | %cont = icmp ne i64 %bias, 1 |
| 274 | br i1 %cont, label %loop, label %exit |
| 275 | |
| 276 | exit: |
| 277 | %unused1 = call i64 @foo() |
| 278 | %factor = load volatile double *@dptr |
| 279 | |
| 280 | %conv0 = bitcast i64 %add0 to double |
| 281 | %mul0 = fmul double %conv0, %factor |
| 282 | store volatile double %mul0, double *@dptr |
| 283 | %conv1 = bitcast i64 %add1 to double |
| 284 | %mul1 = fmul double %conv1, %factor |
| 285 | store volatile double %mul1, double *@dptr |
| 286 | %conv2 = bitcast i64 %add2 to double |
| 287 | %mul2 = fmul double %conv2, %factor |
| 288 | store volatile double %mul2, double *@dptr |
| 289 | %conv3 = bitcast i64 %add3 to double |
| 290 | %mul3 = fmul double %conv3, %factor |
| 291 | store volatile double %mul3, double *@dptr |
| 292 | %conv4 = bitcast i64 %add4 to double |
| 293 | %mul4 = fmul double %conv4, %factor |
| 294 | store volatile double %mul4, double *@dptr |
| 295 | %conv5 = bitcast i64 %add5 to double |
| 296 | %mul5 = fmul double %conv5, %factor |
| 297 | store volatile double %mul5, double *@dptr |
| 298 | %conv6 = bitcast i64 %add6 to double |
| 299 | %mul6 = fmul double %conv6, %factor |
| 300 | store volatile double %mul6, double *@dptr |
| 301 | %conv7 = bitcast i64 %add7 to double |
| 302 | %mul7 = fmul double %conv7, %factor |
| 303 | store volatile double %mul7, double *@dptr |
| 304 | %conv8 = bitcast i64 %add8 to double |
| 305 | %mul8 = fmul double %conv8, %factor |
| 306 | store volatile double %mul8, double *@dptr |
| 307 | %conv9 = bitcast i64 %add9 to double |
| 308 | %mul9 = fmul double %conv9, %factor |
| 309 | store volatile double %mul9, double *@dptr |
| 310 | |
| 311 | %unused2 = call i64 @foo() |
| 312 | |
| 313 | ret void |
| 314 | } |
| 315 | |
| 316 | ; ...likewise LGDR, with the requirements the other way around. |
| 317 | define void @f13() { |
Stephen Lin | d24ab20 | 2013-07-14 06:24:09 +0000 | [diff] [blame] | 318 | ; CHECK-LABEL: f13: |
Richard Sandiford | 3f0edc2 | 2013-07-12 08:37:17 +0000 | [diff] [blame] | 319 | ; CHECK: %loop |
| 320 | ; CHECK-NOT: stg {{.*}}(%r15) |
| 321 | ; CHECK: %exit |
| 322 | ; CHECK: foo@PLT |
| 323 | ; CHECK-NOT: ld {{.*}}(%r15) |
| 324 | ; CHECK: foo@PLT |
| 325 | ; CHECK: br %r14 |
| 326 | entry: |
| 327 | br label %loop |
| 328 | |
| 329 | loop: |
| 330 | %double0 = phi double [ 1.0, %entry ], [ %mul0, %loop ] |
| 331 | %double1 = phi double [ 1.0, %entry ], [ %mul1, %loop ] |
| 332 | %double2 = phi double [ 1.0, %entry ], [ %mul2, %loop ] |
| 333 | %double3 = phi double [ 1.0, %entry ], [ %mul3, %loop ] |
| 334 | %double4 = phi double [ 1.0, %entry ], [ %mul4, %loop ] |
| 335 | %double5 = phi double [ 1.0, %entry ], [ %mul5, %loop ] |
| 336 | %double6 = phi double [ 1.0, %entry ], [ %mul6, %loop ] |
| 337 | %double7 = phi double [ 1.0, %entry ], [ %mul7, %loop ] |
| 338 | %double8 = phi double [ 1.0, %entry ], [ %mul8, %loop ] |
| 339 | %double9 = phi double [ 1.0, %entry ], [ %mul9, %loop ] |
| 340 | |
| 341 | %factor = call double @bar() |
| 342 | %mul0 = fmul double %double0, %factor |
| 343 | %mul1 = fmul double %double1, %factor |
| 344 | %mul2 = fmul double %double2, %factor |
| 345 | %mul3 = fmul double %double3, %factor |
| 346 | %mul4 = fmul double %double4, %factor |
| 347 | %mul5 = fmul double %double5, %factor |
| 348 | %mul6 = fmul double %double6, %factor |
| 349 | %mul7 = fmul double %double7, %factor |
| 350 | %mul8 = fmul double %double8, %factor |
| 351 | %mul9 = fmul double %double9, %factor |
| 352 | %cont = fcmp one double %factor, 1.0 |
| 353 | br i1 %cont, label %loop, label %exit |
| 354 | |
| 355 | exit: |
| 356 | %unused1 = call i64 @foo() |
| 357 | %bias = load volatile i64 *@iptr |
| 358 | |
| 359 | %conv0 = bitcast double %mul0 to i64 |
| 360 | %add0 = add i64 %conv0, %bias |
| 361 | store volatile i64 %add0, i64 *@iptr |
| 362 | %conv1 = bitcast double %mul1 to i64 |
| 363 | %add1 = add i64 %conv1, %bias |
| 364 | store volatile i64 %add1, i64 *@iptr |
| 365 | %conv2 = bitcast double %mul2 to i64 |
| 366 | %add2 = add i64 %conv2, %bias |
| 367 | store volatile i64 %add2, i64 *@iptr |
| 368 | %conv3 = bitcast double %mul3 to i64 |
| 369 | %add3 = add i64 %conv3, %bias |
| 370 | store volatile i64 %add3, i64 *@iptr |
| 371 | %conv4 = bitcast double %mul4 to i64 |
| 372 | %add4 = add i64 %conv4, %bias |
| 373 | store volatile i64 %add4, i64 *@iptr |
| 374 | %conv5 = bitcast double %mul5 to i64 |
| 375 | %add5 = add i64 %conv5, %bias |
| 376 | store volatile i64 %add5, i64 *@iptr |
| 377 | %conv6 = bitcast double %mul6 to i64 |
| 378 | %add6 = add i64 %conv6, %bias |
| 379 | store volatile i64 %add6, i64 *@iptr |
| 380 | %conv7 = bitcast double %mul7 to i64 |
| 381 | %add7 = add i64 %conv7, %bias |
| 382 | store volatile i64 %add7, i64 *@iptr |
| 383 | %conv8 = bitcast double %mul8 to i64 |
| 384 | %add8 = add i64 %conv8, %bias |
| 385 | store volatile i64 %add8, i64 *@iptr |
| 386 | %conv9 = bitcast double %mul9 to i64 |
| 387 | %add9 = add i64 %conv9, %bias |
| 388 | store volatile i64 %add9, i64 *@iptr |
| 389 | |
| 390 | %unused2 = call i64 @foo() |
| 391 | |
| 392 | ret void |
| 393 | } |