Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 1 | ; Test multiplication of two f64s, producing an f64 result. |
| 2 | ; |
| 3 | ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s |
| 4 | |
Richard Sandiford | ed1fab6 | 2013-07-03 10:10:02 +0000 | [diff] [blame] | 5 | declare double @foo() |
| 6 | |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 7 | ; Check register multiplication. |
| 8 | define double @f1(double %f1, double %f2) { |
| 9 | ; CHECK: f1: |
| 10 | ; CHECK: mdbr %f0, %f2 |
| 11 | ; CHECK: br %r14 |
| 12 | %res = fmul double %f1, %f2 |
| 13 | ret double %res |
| 14 | } |
| 15 | |
| 16 | ; Check the low end of the MDB range. |
| 17 | define double @f2(double %f1, double *%ptr) { |
| 18 | ; CHECK: f2: |
| 19 | ; CHECK: mdb %f0, 0(%r2) |
| 20 | ; CHECK: br %r14 |
| 21 | %f2 = load double *%ptr |
| 22 | %res = fmul double %f1, %f2 |
| 23 | ret double %res |
| 24 | } |
| 25 | |
| 26 | ; Check the high end of the aligned MDB range. |
| 27 | define double @f3(double %f1, double *%base) { |
| 28 | ; CHECK: f3: |
| 29 | ; CHECK: mdb %f0, 4088(%r2) |
| 30 | ; CHECK: br %r14 |
| 31 | %ptr = getelementptr double *%base, i64 511 |
| 32 | %f2 = load double *%ptr |
| 33 | %res = fmul double %f1, %f2 |
| 34 | ret double %res |
| 35 | } |
| 36 | |
| 37 | ; Check the next doubleword up, which needs separate address logic. |
| 38 | ; Other sequences besides this one would be OK. |
| 39 | define double @f4(double %f1, double *%base) { |
| 40 | ; CHECK: f4: |
| 41 | ; CHECK: aghi %r2, 4096 |
| 42 | ; CHECK: mdb %f0, 0(%r2) |
| 43 | ; CHECK: br %r14 |
| 44 | %ptr = getelementptr double *%base, i64 512 |
| 45 | %f2 = load double *%ptr |
| 46 | %res = fmul double %f1, %f2 |
| 47 | ret double %res |
| 48 | } |
| 49 | |
| 50 | ; Check negative displacements, which also need separate address logic. |
| 51 | define double @f5(double %f1, double *%base) { |
| 52 | ; CHECK: f5: |
| 53 | ; CHECK: aghi %r2, -8 |
| 54 | ; CHECK: mdb %f0, 0(%r2) |
| 55 | ; CHECK: br %r14 |
| 56 | %ptr = getelementptr double *%base, i64 -1 |
| 57 | %f2 = load double *%ptr |
| 58 | %res = fmul double %f1, %f2 |
| 59 | ret double %res |
| 60 | } |
| 61 | |
| 62 | ; Check that MDB allows indices. |
| 63 | define double @f6(double %f1, double *%base, i64 %index) { |
| 64 | ; CHECK: f6: |
| 65 | ; CHECK: sllg %r1, %r3, 3 |
| 66 | ; CHECK: mdb %f0, 800(%r1,%r2) |
| 67 | ; CHECK: br %r14 |
| 68 | %ptr1 = getelementptr double *%base, i64 %index |
| 69 | %ptr2 = getelementptr double *%ptr1, i64 100 |
| 70 | %f2 = load double *%ptr2 |
| 71 | %res = fmul double %f1, %f2 |
| 72 | ret double %res |
| 73 | } |
Richard Sandiford | ed1fab6 | 2013-07-03 10:10:02 +0000 | [diff] [blame] | 74 | |
| 75 | ; Check that multiplications of spilled values can use MDB rather than MDBR. |
| 76 | define double @f7(double *%ptr0) { |
| 77 | ; CHECK: f7: |
| 78 | ; CHECK: brasl %r14, foo@PLT |
| 79 | ; CHECK: mdb %f0, 160(%r15) |
| 80 | ; CHECK: br %r14 |
| 81 | %ptr1 = getelementptr double *%ptr0, i64 2 |
| 82 | %ptr2 = getelementptr double *%ptr0, i64 4 |
| 83 | %ptr3 = getelementptr double *%ptr0, i64 6 |
| 84 | %ptr4 = getelementptr double *%ptr0, i64 8 |
| 85 | %ptr5 = getelementptr double *%ptr0, i64 10 |
| 86 | %ptr6 = getelementptr double *%ptr0, i64 12 |
| 87 | %ptr7 = getelementptr double *%ptr0, i64 14 |
| 88 | %ptr8 = getelementptr double *%ptr0, i64 16 |
| 89 | %ptr9 = getelementptr double *%ptr0, i64 18 |
| 90 | %ptr10 = getelementptr double *%ptr0, i64 20 |
| 91 | |
| 92 | %val0 = load double *%ptr0 |
| 93 | %val1 = load double *%ptr1 |
| 94 | %val2 = load double *%ptr2 |
| 95 | %val3 = load double *%ptr3 |
| 96 | %val4 = load double *%ptr4 |
| 97 | %val5 = load double *%ptr5 |
| 98 | %val6 = load double *%ptr6 |
| 99 | %val7 = load double *%ptr7 |
| 100 | %val8 = load double *%ptr8 |
| 101 | %val9 = load double *%ptr9 |
| 102 | %val10 = load double *%ptr10 |
| 103 | |
| 104 | %ret = call double @foo() |
| 105 | |
| 106 | %mul0 = fmul double %ret, %val0 |
| 107 | %mul1 = fmul double %mul0, %val1 |
| 108 | %mul2 = fmul double %mul1, %val2 |
| 109 | %mul3 = fmul double %mul2, %val3 |
| 110 | %mul4 = fmul double %mul3, %val4 |
| 111 | %mul5 = fmul double %mul4, %val5 |
| 112 | %mul6 = fmul double %mul5, %val6 |
| 113 | %mul7 = fmul double %mul6, %val7 |
| 114 | %mul8 = fmul double %mul7, %val8 |
| 115 | %mul9 = fmul double %mul8, %val9 |
| 116 | %mul10 = fmul double %mul9, %val10 |
| 117 | |
| 118 | ret double %mul10 |
| 119 | } |