Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 1 | ; Test high-part i64->i128 multiplications. |
| 2 | ; |
| 3 | ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s |
| 4 | |
| 5 | ; Check zero-extended multiplication in which only the high part is used. |
| 6 | define i64 @f1(i64 %dummy, i64 %a, i64 %b) { |
| 7 | ; CHECK: f1: |
| 8 | ; CHECK-NOT: {{%r[234]}} |
| 9 | ; CHECK: mlgr %r2, %r4 |
| 10 | ; CHECK: br %r14 |
| 11 | %ax = zext i64 %a to i128 |
| 12 | %bx = zext i64 %b to i128 |
| 13 | %mulx = mul i128 %ax, %bx |
| 14 | %highx = lshr i128 %mulx, 64 |
| 15 | %high = trunc i128 %highx to i64 |
| 16 | ret i64 %high |
| 17 | } |
| 18 | |
| 19 | ; Check sign-extended multiplication in which only the high part is used. |
| 20 | ; This needs a rather convoluted sequence. |
| 21 | define i64 @f2(i64 %dummy, i64 %a, i64 %b) { |
| 22 | ; CHECK: f2: |
| 23 | ; CHECK: mlgr |
| 24 | ; CHECK: agr |
| 25 | ; CHECK: agr |
| 26 | ; CHECK: br %r14 |
| 27 | %ax = sext i64 %a to i128 |
| 28 | %bx = sext i64 %b to i128 |
| 29 | %mulx = mul i128 %ax, %bx |
| 30 | %highx = lshr i128 %mulx, 64 |
| 31 | %high = trunc i128 %highx to i64 |
| 32 | ret i64 %high |
| 33 | } |
| 34 | |
| 35 | ; Check zero-extended multiplication in which only part of the high half |
| 36 | ; is used. |
| 37 | define i64 @f3(i64 %dummy, i64 %a, i64 %b) { |
| 38 | ; CHECK: f3: |
| 39 | ; CHECK-NOT: {{%r[234]}} |
| 40 | ; CHECK: mlgr %r2, %r4 |
| 41 | ; CHECK: srlg %r2, %r2, 3 |
| 42 | ; CHECK: br %r14 |
| 43 | %ax = zext i64 %a to i128 |
| 44 | %bx = zext i64 %b to i128 |
| 45 | %mulx = mul i128 %ax, %bx |
| 46 | %highx = lshr i128 %mulx, 67 |
| 47 | %high = trunc i128 %highx to i64 |
| 48 | ret i64 %high |
| 49 | } |
| 50 | |
| 51 | ; Check zero-extended multiplication in which the result is split into |
| 52 | ; high and low halves. |
| 53 | define i64 @f4(i64 %dummy, i64 %a, i64 %b) { |
| 54 | ; CHECK: f4: |
| 55 | ; CHECK-NOT: {{%r[234]}} |
| 56 | ; CHECK: mlgr %r2, %r4 |
| 57 | ; CHECK: ogr %r2, %r3 |
| 58 | ; CHECK: br %r14 |
| 59 | %ax = zext i64 %a to i128 |
| 60 | %bx = zext i64 %b to i128 |
| 61 | %mulx = mul i128 %ax, %bx |
| 62 | %highx = lshr i128 %mulx, 64 |
| 63 | %high = trunc i128 %highx to i64 |
| 64 | %low = trunc i128 %mulx to i64 |
| 65 | %or = or i64 %high, %low |
| 66 | ret i64 %or |
| 67 | } |
| 68 | |
| 69 | ; Check division by a constant, which should use multiplication instead. |
| 70 | define i64 @f5(i64 %dummy, i64 %a) { |
| 71 | ; CHECK: f5: |
| 72 | ; CHECK: mlgr %r2, |
| 73 | ; CHECK: srlg %r2, %r2, |
| 74 | ; CHECK: br %r14 |
| 75 | %res = udiv i64 %a, 1234 |
| 76 | ret i64 %res |
| 77 | } |
| 78 | |
| 79 | ; Check MLG with no displacement. |
| 80 | define i64 @f6(i64 %dummy, i64 %a, i64 *%src) { |
| 81 | ; CHECK: f6: |
| 82 | ; CHECK-NOT: {{%r[234]}} |
| 83 | ; CHECK: mlg %r2, 0(%r4) |
| 84 | ; CHECK: br %r14 |
| 85 | %b = load i64 *%src |
| 86 | %ax = zext i64 %a to i128 |
| 87 | %bx = zext i64 %b to i128 |
| 88 | %mulx = mul i128 %ax, %bx |
| 89 | %highx = lshr i128 %mulx, 64 |
| 90 | %high = trunc i128 %highx to i64 |
| 91 | ret i64 %high |
| 92 | } |
| 93 | |
| 94 | ; Check the high end of the aligned MLG range. |
| 95 | define i64 @f7(i64 %dummy, i64 %a, i64 *%src) { |
| 96 | ; CHECK: f7: |
| 97 | ; CHECK: mlg %r2, 524280(%r4) |
| 98 | ; CHECK: br %r14 |
| 99 | %ptr = getelementptr i64 *%src, i64 65535 |
| 100 | %b = load i64 *%ptr |
| 101 | %ax = zext i64 %a to i128 |
| 102 | %bx = zext i64 %b to i128 |
| 103 | %mulx = mul i128 %ax, %bx |
| 104 | %highx = lshr i128 %mulx, 64 |
| 105 | %high = trunc i128 %highx to i64 |
| 106 | ret i64 %high |
| 107 | } |
| 108 | |
| 109 | ; Check the next doubleword up, which requires separate address logic. |
| 110 | ; Other sequences besides this one would be OK. |
| 111 | define i64 @f8(i64 %dummy, i64 %a, i64 *%src) { |
| 112 | ; CHECK: f8: |
| 113 | ; CHECK: agfi %r4, 524288 |
| 114 | ; CHECK: mlg %r2, 0(%r4) |
| 115 | ; CHECK: br %r14 |
| 116 | %ptr = getelementptr i64 *%src, i64 65536 |
| 117 | %b = load i64 *%ptr |
| 118 | %ax = zext i64 %a to i128 |
| 119 | %bx = zext i64 %b to i128 |
| 120 | %mulx = mul i128 %ax, %bx |
| 121 | %highx = lshr i128 %mulx, 64 |
| 122 | %high = trunc i128 %highx to i64 |
| 123 | ret i64 %high |
| 124 | } |
| 125 | |
| 126 | ; Check the high end of the negative aligned MLG range. |
| 127 | define i64 @f9(i64 %dummy, i64 %a, i64 *%src) { |
| 128 | ; CHECK: f9: |
| 129 | ; CHECK: mlg %r2, -8(%r4) |
| 130 | ; CHECK: br %r14 |
| 131 | %ptr = getelementptr i64 *%src, i64 -1 |
| 132 | %b = load i64 *%ptr |
| 133 | %ax = zext i64 %a to i128 |
| 134 | %bx = zext i64 %b to i128 |
| 135 | %mulx = mul i128 %ax, %bx |
| 136 | %highx = lshr i128 %mulx, 64 |
| 137 | %high = trunc i128 %highx to i64 |
| 138 | ret i64 %high |
| 139 | } |
| 140 | |
| 141 | ; Check the low end of the MLG range. |
| 142 | define i64 @f10(i64 %dummy, i64 %a, i64 *%src) { |
| 143 | ; CHECK: f10: |
| 144 | ; CHECK: mlg %r2, -524288(%r4) |
| 145 | ; CHECK: br %r14 |
| 146 | %ptr = getelementptr i64 *%src, i64 -65536 |
| 147 | %b = load i64 *%ptr |
| 148 | %ax = zext i64 %a to i128 |
| 149 | %bx = zext i64 %b to i128 |
| 150 | %mulx = mul i128 %ax, %bx |
| 151 | %highx = lshr i128 %mulx, 64 |
| 152 | %high = trunc i128 %highx to i64 |
| 153 | ret i64 %high |
| 154 | } |
| 155 | |
| 156 | ; Check the next doubleword down, which needs separate address logic. |
| 157 | ; Other sequences besides this one would be OK. |
| 158 | define i64 @f11(i64 *%dest, i64 %a, i64 *%src) { |
| 159 | ; CHECK: f11: |
| 160 | ; CHECK: agfi %r4, -524296 |
| 161 | ; CHECK: mlg %r2, 0(%r4) |
| 162 | ; CHECK: br %r14 |
| 163 | %ptr = getelementptr i64 *%src, i64 -65537 |
| 164 | %b = load i64 *%ptr |
| 165 | %ax = zext i64 %a to i128 |
| 166 | %bx = zext i64 %b to i128 |
| 167 | %mulx = mul i128 %ax, %bx |
| 168 | %highx = lshr i128 %mulx, 64 |
| 169 | %high = trunc i128 %highx to i64 |
| 170 | ret i64 %high |
| 171 | } |
| 172 | |
| 173 | ; Check that MLG allows an index. |
| 174 | define i64 @f12(i64 *%dest, i64 %a, i64 %src, i64 %index) { |
| 175 | ; CHECK: f12: |
| 176 | ; CHECK: mlg %r2, 524287(%r5,%r4) |
| 177 | ; CHECK: br %r14 |
| 178 | %add1 = add i64 %src, %index |
| 179 | %add2 = add i64 %add1, 524287 |
| 180 | %ptr = inttoptr i64 %add2 to i64 * |
| 181 | %b = load i64 *%ptr |
| 182 | %ax = zext i64 %a to i128 |
| 183 | %bx = zext i64 %b to i128 |
| 184 | %mulx = mul i128 %ax, %bx |
| 185 | %highx = lshr i128 %mulx, 64 |
| 186 | %high = trunc i128 %highx to i64 |
| 187 | ret i64 %high |
| 188 | } |