Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame^] | 1 | ; Test 32-bit addition in which the second operand is variable. |
| 2 | ; |
| 3 | ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s |
| 4 | |
| 5 | ; Check AR. |
| 6 | define i32 @f1(i32 %a, i32 %b) { |
| 7 | ; CHECK: f1: |
| 8 | ; CHECK: ar %r2, %r3 |
| 9 | ; CHECK: br %r14 |
| 10 | %add = add i32 %a, %b |
| 11 | ret i32 %add |
| 12 | } |
| 13 | |
| 14 | ; Check the low end of the A range. |
| 15 | define i32 @f2(i32 %a, i32 *%src) { |
| 16 | ; CHECK: f2: |
| 17 | ; CHECK: a %r2, 0(%r3) |
| 18 | ; CHECK: br %r14 |
| 19 | %b = load i32 *%src |
| 20 | %add = add i32 %a, %b |
| 21 | ret i32 %add |
| 22 | } |
| 23 | |
| 24 | ; Check the high end of the aligned A range. |
| 25 | define i32 @f3(i32 %a, i32 *%src) { |
| 26 | ; CHECK: f3: |
| 27 | ; CHECK: a %r2, 4092(%r3) |
| 28 | ; CHECK: br %r14 |
| 29 | %ptr = getelementptr i32 *%src, i64 1023 |
| 30 | %b = load i32 *%ptr |
| 31 | %add = add i32 %a, %b |
| 32 | ret i32 %add |
| 33 | } |
| 34 | |
| 35 | ; Check the next word up, which should use AY instead of A. |
| 36 | define i32 @f4(i32 %a, i32 *%src) { |
| 37 | ; CHECK: f4: |
| 38 | ; CHECK: ay %r2, 4096(%r3) |
| 39 | ; CHECK: br %r14 |
| 40 | %ptr = getelementptr i32 *%src, i64 1024 |
| 41 | %b = load i32 *%ptr |
| 42 | %add = add i32 %a, %b |
| 43 | ret i32 %add |
| 44 | } |
| 45 | |
| 46 | ; Check the high end of the aligned AY range. |
| 47 | define i32 @f5(i32 %a, i32 *%src) { |
| 48 | ; CHECK: f5: |
| 49 | ; CHECK: ay %r2, 524284(%r3) |
| 50 | ; CHECK: br %r14 |
| 51 | %ptr = getelementptr i32 *%src, i64 131071 |
| 52 | %b = load i32 *%ptr |
| 53 | %add = add i32 %a, %b |
| 54 | ret i32 %add |
| 55 | } |
| 56 | |
| 57 | ; Check the next word up, which needs separate address logic. |
| 58 | ; Other sequences besides this one would be OK. |
| 59 | define i32 @f6(i32 %a, i32 *%src) { |
| 60 | ; CHECK: f6: |
| 61 | ; CHECK: agfi %r3, 524288 |
| 62 | ; CHECK: a %r2, 0(%r3) |
| 63 | ; CHECK: br %r14 |
| 64 | %ptr = getelementptr i32 *%src, i64 131072 |
| 65 | %b = load i32 *%ptr |
| 66 | %add = add i32 %a, %b |
| 67 | ret i32 %add |
| 68 | } |
| 69 | |
| 70 | ; Check the high end of the negative aligned AY range. |
| 71 | define i32 @f7(i32 %a, i32 *%src) { |
| 72 | ; CHECK: f7: |
| 73 | ; CHECK: ay %r2, -4(%r3) |
| 74 | ; CHECK: br %r14 |
| 75 | %ptr = getelementptr i32 *%src, i64 -1 |
| 76 | %b = load i32 *%ptr |
| 77 | %add = add i32 %a, %b |
| 78 | ret i32 %add |
| 79 | } |
| 80 | |
| 81 | ; Check the low end of the AY range. |
| 82 | define i32 @f8(i32 %a, i32 *%src) { |
| 83 | ; CHECK: f8: |
| 84 | ; CHECK: ay %r2, -524288(%r3) |
| 85 | ; CHECK: br %r14 |
| 86 | %ptr = getelementptr i32 *%src, i64 -131072 |
| 87 | %b = load i32 *%ptr |
| 88 | %add = add i32 %a, %b |
| 89 | ret i32 %add |
| 90 | } |
| 91 | |
| 92 | ; Check the next word down, which needs separate address logic. |
| 93 | ; Other sequences besides this one would be OK. |
| 94 | define i32 @f9(i32 %a, i32 *%src) { |
| 95 | ; CHECK: f9: |
| 96 | ; CHECK: agfi %r3, -524292 |
| 97 | ; CHECK: a %r2, 0(%r3) |
| 98 | ; CHECK: br %r14 |
| 99 | %ptr = getelementptr i32 *%src, i64 -131073 |
| 100 | %b = load i32 *%ptr |
| 101 | %add = add i32 %a, %b |
| 102 | ret i32 %add |
| 103 | } |
| 104 | |
| 105 | ; Check that A allows an index. |
| 106 | define i32 @f10(i32 %a, i64 %src, i64 %index) { |
| 107 | ; CHECK: f10: |
| 108 | ; CHECK: a %r2, 4092({{%r4,%r3|%r3,%r4}}) |
| 109 | ; CHECK: br %r14 |
| 110 | %add1 = add i64 %src, %index |
| 111 | %add2 = add i64 %add1, 4092 |
| 112 | %ptr = inttoptr i64 %add2 to i32 * |
| 113 | %b = load i32 *%ptr |
| 114 | %add = add i32 %a, %b |
| 115 | ret i32 %add |
| 116 | } |
| 117 | |
| 118 | ; Check that AY allows an index. |
| 119 | define i32 @f11(i32 %a, i64 %src, i64 %index) { |
| 120 | ; CHECK: f11: |
| 121 | ; CHECK: ay %r2, 4096({{%r4,%r3|%r3,%r4}}) |
| 122 | ; CHECK: br %r14 |
| 123 | %add1 = add i64 %src, %index |
| 124 | %add2 = add i64 %add1, 4096 |
| 125 | %ptr = inttoptr i64 %add2 to i32 * |
| 126 | %b = load i32 *%ptr |
| 127 | %add = add i32 %a, %b |
| 128 | ret i32 %add |
| 129 | } |