Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 1 | ; Like frame-05.ll, but with i64s rather than i32s. Internally this |
| 2 | ; uses a different register class, but the set of saved and restored |
| 3 | ; registers should be the same. |
| 4 | ; |
| 5 | ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s |
| 6 | |
| 7 | ; This function should require all GPRs, but no other spill slots. The caller |
| 8 | ; allocates room for the GPR save slots, so we shouldn't need to allocate any |
| 9 | ; extra space. |
| 10 | ; |
| 11 | ; Use a different address for the final store, so that we can check that |
| 12 | ; %r15 isn't referenced again until after that. |
| 13 | define void @f1(i64 *%ptr) { |
Stephen Lin | d24ab20 | 2013-07-14 06:24:09 +0000 | [diff] [blame] | 14 | ; CHECK-LABEL: f1: |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 15 | ; CHECK: stmg %r6, %r15, 48(%r15) |
| 16 | ; CHECK-NOT: %r15 |
| 17 | ; CHECK: .cfi_offset %r6, -112 |
| 18 | ; CHECK: .cfi_offset %r7, -104 |
| 19 | ; CHECK: .cfi_offset %r8, -96 |
| 20 | ; CHECK: .cfi_offset %r9, -88 |
| 21 | ; CHECK: .cfi_offset %r10, -80 |
| 22 | ; CHECK: .cfi_offset %r11, -72 |
| 23 | ; CHECK: .cfi_offset %r12, -64 |
| 24 | ; CHECK: .cfi_offset %r13, -56 |
| 25 | ; CHECK: .cfi_offset %r14, -48 |
| 26 | ; CHECK: .cfi_offset %r15, -40 |
| 27 | ; ...main function body... |
| 28 | ; CHECK-NOT: %r15 |
| 29 | ; CHECK: stg {{.*}}, 8(%r2) |
| 30 | ; CHECK: lmg %r6, %r15, 48(%r15) |
| 31 | ; CHECK: br %r14 |
Ulrich Weigand | 9dd23b8 | 2018-07-20 12:12:10 +0000 | [diff] [blame] | 32 | %l0 = load volatile i64, i64 *%ptr |
| 33 | %l1 = load volatile i64, i64 *%ptr |
| 34 | %l3 = load volatile i64, i64 *%ptr |
| 35 | %l4 = load volatile i64, i64 *%ptr |
| 36 | %l5 = load volatile i64, i64 *%ptr |
| 37 | %l6 = load volatile i64, i64 *%ptr |
| 38 | %l7 = load volatile i64, i64 *%ptr |
| 39 | %l8 = load volatile i64, i64 *%ptr |
| 40 | %l9 = load volatile i64, i64 *%ptr |
| 41 | %l10 = load volatile i64, i64 *%ptr |
| 42 | %l11 = load volatile i64, i64 *%ptr |
| 43 | %l12 = load volatile i64, i64 *%ptr |
| 44 | %l13 = load volatile i64, i64 *%ptr |
| 45 | %l14 = load volatile i64, i64 *%ptr |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 46 | %add0 = add i64 %l0, %l0 |
| 47 | %add1 = add i64 %l1, %add0 |
| 48 | %add3 = add i64 %l3, %add1 |
| 49 | %add4 = add i64 %l4, %add3 |
| 50 | %add5 = add i64 %l5, %add4 |
| 51 | %add6 = add i64 %l6, %add5 |
| 52 | %add7 = add i64 %l7, %add6 |
| 53 | %add8 = add i64 %l8, %add7 |
| 54 | %add9 = add i64 %l9, %add8 |
| 55 | %add10 = add i64 %l10, %add9 |
| 56 | %add11 = add i64 %l11, %add10 |
| 57 | %add12 = add i64 %l12, %add11 |
| 58 | %add13 = add i64 %l13, %add12 |
| 59 | %add14 = add i64 %l14, %add13 |
| 60 | store volatile i64 %add0, i64 *%ptr |
| 61 | store volatile i64 %add1, i64 *%ptr |
| 62 | store volatile i64 %add3, i64 *%ptr |
| 63 | store volatile i64 %add4, i64 *%ptr |
| 64 | store volatile i64 %add5, i64 *%ptr |
| 65 | store volatile i64 %add6, i64 *%ptr |
| 66 | store volatile i64 %add7, i64 *%ptr |
| 67 | store volatile i64 %add8, i64 *%ptr |
| 68 | store volatile i64 %add9, i64 *%ptr |
| 69 | store volatile i64 %add10, i64 *%ptr |
| 70 | store volatile i64 %add11, i64 *%ptr |
| 71 | store volatile i64 %add12, i64 *%ptr |
| 72 | store volatile i64 %add13, i64 *%ptr |
David Blaikie | 79e6c74 | 2015-02-27 19:29:02 +0000 | [diff] [blame] | 73 | %final = getelementptr i64, i64 *%ptr, i64 1 |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 74 | store volatile i64 %add14, i64 *%final |
| 75 | ret void |
| 76 | } |
| 77 | |
| 78 | ; Like f1, but requires one fewer GPR. We allocate the call-saved GPRs |
| 79 | ; from %r14 down, so that the STMG/LMG sequences aren't any longer than |
| 80 | ; they need to be. |
| 81 | define void @f2(i64 *%ptr) { |
Stephen Lin | d24ab20 | 2013-07-14 06:24:09 +0000 | [diff] [blame] | 82 | ; CHECK-LABEL: f2: |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 83 | ; CHECK: stmg %r7, %r15, 56(%r15) |
| 84 | ; CHECK-NOT: %r15 |
| 85 | ; CHECK: .cfi_offset %r7, -104 |
| 86 | ; CHECK: .cfi_offset %r8, -96 |
| 87 | ; CHECK: .cfi_offset %r9, -88 |
| 88 | ; CHECK: .cfi_offset %r10, -80 |
| 89 | ; CHECK: .cfi_offset %r11, -72 |
| 90 | ; CHECK: .cfi_offset %r12, -64 |
| 91 | ; CHECK: .cfi_offset %r13, -56 |
| 92 | ; CHECK: .cfi_offset %r14, -48 |
| 93 | ; CHECK: .cfi_offset %r15, -40 |
| 94 | ; ...main function body... |
| 95 | ; CHECK-NOT: %r15 |
| 96 | ; CHECK-NOT: %r6 |
| 97 | ; CHECK: stg {{.*}}, 8(%r2) |
| 98 | ; CHECK: lmg %r7, %r15, 56(%r15) |
| 99 | ; CHECK: br %r14 |
Ulrich Weigand | 9dd23b8 | 2018-07-20 12:12:10 +0000 | [diff] [blame] | 100 | %l0 = load volatile i64, i64 *%ptr |
| 101 | %l1 = load volatile i64, i64 *%ptr |
| 102 | %l3 = load volatile i64, i64 *%ptr |
| 103 | %l4 = load volatile i64, i64 *%ptr |
| 104 | %l5 = load volatile i64, i64 *%ptr |
| 105 | %l7 = load volatile i64, i64 *%ptr |
| 106 | %l8 = load volatile i64, i64 *%ptr |
| 107 | %l9 = load volatile i64, i64 *%ptr |
| 108 | %l10 = load volatile i64, i64 *%ptr |
| 109 | %l11 = load volatile i64, i64 *%ptr |
| 110 | %l12 = load volatile i64, i64 *%ptr |
| 111 | %l13 = load volatile i64, i64 *%ptr |
| 112 | %l14 = load volatile i64, i64 *%ptr |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 113 | %add0 = add i64 %l0, %l0 |
| 114 | %add1 = add i64 %l1, %add0 |
| 115 | %add3 = add i64 %l3, %add1 |
| 116 | %add4 = add i64 %l4, %add3 |
| 117 | %add5 = add i64 %l5, %add4 |
| 118 | %add7 = add i64 %l7, %add5 |
| 119 | %add8 = add i64 %l8, %add7 |
| 120 | %add9 = add i64 %l9, %add8 |
| 121 | %add10 = add i64 %l10, %add9 |
| 122 | %add11 = add i64 %l11, %add10 |
| 123 | %add12 = add i64 %l12, %add11 |
| 124 | %add13 = add i64 %l13, %add12 |
| 125 | %add14 = add i64 %l14, %add13 |
| 126 | store volatile i64 %add0, i64 *%ptr |
| 127 | store volatile i64 %add1, i64 *%ptr |
| 128 | store volatile i64 %add3, i64 *%ptr |
| 129 | store volatile i64 %add4, i64 *%ptr |
| 130 | store volatile i64 %add5, i64 *%ptr |
| 131 | store volatile i64 %add7, i64 *%ptr |
| 132 | store volatile i64 %add8, i64 *%ptr |
| 133 | store volatile i64 %add9, i64 *%ptr |
| 134 | store volatile i64 %add10, i64 *%ptr |
| 135 | store volatile i64 %add11, i64 *%ptr |
| 136 | store volatile i64 %add12, i64 *%ptr |
| 137 | store volatile i64 %add13, i64 *%ptr |
David Blaikie | 79e6c74 | 2015-02-27 19:29:02 +0000 | [diff] [blame] | 138 | %final = getelementptr i64, i64 *%ptr, i64 1 |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 139 | store volatile i64 %add14, i64 *%final |
| 140 | ret void |
| 141 | } |
| 142 | |
| 143 | ; Like f1, but only needs one call-saved GPR, which ought to be %r14. |
| 144 | define void @f3(i64 *%ptr) { |
Stephen Lin | d24ab20 | 2013-07-14 06:24:09 +0000 | [diff] [blame] | 145 | ; CHECK-LABEL: f3: |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 146 | ; CHECK: stmg %r14, %r15, 112(%r15) |
| 147 | ; CHECK-NOT: %r15 |
| 148 | ; CHECK: .cfi_offset %r14, -48 |
| 149 | ; CHECK: .cfi_offset %r15, -40 |
| 150 | ; ...main function body... |
| 151 | ; CHECK-NOT: %r15 |
| 152 | ; CHECK-NOT: %r6 |
| 153 | ; CHECK-NOT: %r7 |
| 154 | ; CHECK-NOT: %r8 |
| 155 | ; CHECK-NOT: %r9 |
| 156 | ; CHECK-NOT: %r10 |
| 157 | ; CHECK-NOT: %r11 |
| 158 | ; CHECK-NOT: %r12 |
| 159 | ; CHECK-NOT: %r13 |
| 160 | ; CHECK: stg {{.*}}, 8(%r2) |
| 161 | ; CHECK: lmg %r14, %r15, 112(%r15) |
| 162 | ; CHECK: br %r14 |
Ulrich Weigand | 9dd23b8 | 2018-07-20 12:12:10 +0000 | [diff] [blame] | 163 | %l0 = load volatile i64, i64 *%ptr |
| 164 | %l1 = load volatile i64, i64 *%ptr |
| 165 | %l3 = load volatile i64, i64 *%ptr |
| 166 | %l4 = load volatile i64, i64 *%ptr |
| 167 | %l5 = load volatile i64, i64 *%ptr |
| 168 | %l14 = load volatile i64, i64 *%ptr |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 169 | %add0 = add i64 %l0, %l0 |
| 170 | %add1 = add i64 %l1, %add0 |
| 171 | %add3 = add i64 %l3, %add1 |
| 172 | %add4 = add i64 %l4, %add3 |
| 173 | %add5 = add i64 %l5, %add4 |
| 174 | %add14 = add i64 %l14, %add5 |
| 175 | store volatile i64 %add0, i64 *%ptr |
| 176 | store volatile i64 %add1, i64 *%ptr |
| 177 | store volatile i64 %add3, i64 *%ptr |
| 178 | store volatile i64 %add4, i64 *%ptr |
| 179 | store volatile i64 %add5, i64 *%ptr |
David Blaikie | 79e6c74 | 2015-02-27 19:29:02 +0000 | [diff] [blame] | 180 | %final = getelementptr i64, i64 *%ptr, i64 1 |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 181 | store volatile i64 %add14, i64 *%final |
| 182 | ret void |
| 183 | } |
| 184 | |
| 185 | ; This function should use all call-clobbered GPRs but no call-saved ones. |
| 186 | ; It shouldn't need to touch the stack at all. |
| 187 | define void @f4(i64 *%ptr) { |
Stephen Lin | d24ab20 | 2013-07-14 06:24:09 +0000 | [diff] [blame] | 188 | ; CHECK-LABEL: f4: |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 189 | ; CHECK-NOT: %r15 |
| 190 | ; CHECK-NOT: %r6 |
| 191 | ; CHECK-NOT: %r7 |
| 192 | ; CHECK-NOT: %r8 |
| 193 | ; CHECK-NOT: %r9 |
| 194 | ; CHECK-NOT: %r10 |
| 195 | ; CHECK-NOT: %r11 |
| 196 | ; CHECK-NOT: %r12 |
| 197 | ; CHECK-NOT: %r13 |
| 198 | ; CHECK: br %r14 |
Ulrich Weigand | 9dd23b8 | 2018-07-20 12:12:10 +0000 | [diff] [blame] | 199 | %l0 = load volatile i64, i64 *%ptr |
| 200 | %l1 = load volatile i64, i64 *%ptr |
| 201 | %l3 = load volatile i64, i64 *%ptr |
| 202 | %l4 = load volatile i64, i64 *%ptr |
| 203 | %l5 = load volatile i64, i64 *%ptr |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 204 | %add0 = add i64 %l0, %l0 |
| 205 | %add1 = add i64 %l1, %add0 |
| 206 | %add3 = add i64 %l3, %add1 |
| 207 | %add4 = add i64 %l4, %add3 |
| 208 | %add5 = add i64 %l5, %add4 |
| 209 | store volatile i64 %add0, i64 *%ptr |
| 210 | store volatile i64 %add1, i64 *%ptr |
| 211 | store volatile i64 %add3, i64 *%ptr |
| 212 | store volatile i64 %add4, i64 *%ptr |
David Blaikie | 79e6c74 | 2015-02-27 19:29:02 +0000 | [diff] [blame] | 213 | %final = getelementptr i64, i64 *%ptr, i64 1 |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 214 | store volatile i64 %add5, i64 *%final |
| 215 | ret void |
| 216 | } |