Richard Sandiford | f834ea1 | 2013-10-31 12:14:17 +0000 | [diff] [blame] | 1 | ; Test spilling of GPRs. The tests here assume z10 register pressure, |
| 2 | ; without the high words being available. |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 3 | ; |
Richard Sandiford | f834ea1 | 2013-10-31 12:14:17 +0000 | [diff] [blame] | 4 | ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 5 | |
| 6 | ; We need to allocate a 4-byte spill slot, rounded to 8 bytes. The frame |
| 7 | ; size should be exactly 160 + 8 = 168. |
| 8 | define void @f1(i32 *%ptr) { |
Stephen Lin | d24ab20 | 2013-07-14 06:24:09 +0000 | [diff] [blame] | 9 | ; CHECK-LABEL: f1: |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 10 | ; CHECK: stmg %r6, %r15, 48(%r15) |
| 11 | ; CHECK: aghi %r15, -168 |
| 12 | ; CHECK-NOT: 160(%r15) |
| 13 | ; CHECK: st [[REGISTER:%r[0-9]+]], 164(%r15) |
| 14 | ; CHECK-NOT: 160(%r15) |
| 15 | ; CHECK: l [[REGISTER]], 164(%r15) |
| 16 | ; CHECK-NOT: 160(%r15) |
| 17 | ; CHECK: lmg %r6, %r15, 216(%r15) |
| 18 | ; CHECK: br %r14 |
| 19 | %l0 = load volatile i32 *%ptr |
| 20 | %l1 = load volatile i32 *%ptr |
| 21 | %l3 = load volatile i32 *%ptr |
| 22 | %l4 = load volatile i32 *%ptr |
| 23 | %l5 = load volatile i32 *%ptr |
| 24 | %l6 = load volatile i32 *%ptr |
| 25 | %l7 = load volatile i32 *%ptr |
| 26 | %l8 = load volatile i32 *%ptr |
| 27 | %l9 = load volatile i32 *%ptr |
| 28 | %l10 = load volatile i32 *%ptr |
| 29 | %l11 = load volatile i32 *%ptr |
| 30 | %l12 = load volatile i32 *%ptr |
| 31 | %l13 = load volatile i32 *%ptr |
| 32 | %l14 = load volatile i32 *%ptr |
| 33 | %lx = load volatile i32 *%ptr |
| 34 | store volatile i32 %lx, i32 *%ptr |
| 35 | store volatile i32 %l14, i32 *%ptr |
| 36 | store volatile i32 %l13, i32 *%ptr |
| 37 | store volatile i32 %l12, i32 *%ptr |
| 38 | store volatile i32 %l11, i32 *%ptr |
| 39 | store volatile i32 %l10, i32 *%ptr |
| 40 | store volatile i32 %l9, i32 *%ptr |
| 41 | store volatile i32 %l8, i32 *%ptr |
| 42 | store volatile i32 %l7, i32 *%ptr |
| 43 | store volatile i32 %l6, i32 *%ptr |
| 44 | store volatile i32 %l5, i32 *%ptr |
| 45 | store volatile i32 %l4, i32 *%ptr |
| 46 | store volatile i32 %l3, i32 *%ptr |
| 47 | store volatile i32 %l1, i32 *%ptr |
| 48 | store volatile i32 %l0, i32 *%ptr |
| 49 | ret void |
| 50 | } |
| 51 | |
| 52 | ; Same for i64, except that the full spill slot is used. |
| 53 | define void @f2(i64 *%ptr) { |
Stephen Lin | d24ab20 | 2013-07-14 06:24:09 +0000 | [diff] [blame] | 54 | ; CHECK-LABEL: f2: |
Ulrich Weigand | 9e3577f | 2013-05-06 16:17:29 +0000 | [diff] [blame] | 55 | ; CHECK: stmg %r6, %r15, 48(%r15) |
| 56 | ; CHECK: aghi %r15, -168 |
| 57 | ; CHECK: stg [[REGISTER:%r[0-9]+]], 160(%r15) |
| 58 | ; CHECK: lg [[REGISTER]], 160(%r15) |
| 59 | ; CHECK: lmg %r6, %r15, 216(%r15) |
| 60 | ; CHECK: br %r14 |
| 61 | %l0 = load volatile i64 *%ptr |
| 62 | %l1 = load volatile i64 *%ptr |
| 63 | %l3 = load volatile i64 *%ptr |
| 64 | %l4 = load volatile i64 *%ptr |
| 65 | %l5 = load volatile i64 *%ptr |
| 66 | %l6 = load volatile i64 *%ptr |
| 67 | %l7 = load volatile i64 *%ptr |
| 68 | %l8 = load volatile i64 *%ptr |
| 69 | %l9 = load volatile i64 *%ptr |
| 70 | %l10 = load volatile i64 *%ptr |
| 71 | %l11 = load volatile i64 *%ptr |
| 72 | %l12 = load volatile i64 *%ptr |
| 73 | %l13 = load volatile i64 *%ptr |
| 74 | %l14 = load volatile i64 *%ptr |
| 75 | %lx = load volatile i64 *%ptr |
| 76 | store volatile i64 %lx, i64 *%ptr |
| 77 | store volatile i64 %l14, i64 *%ptr |
| 78 | store volatile i64 %l13, i64 *%ptr |
| 79 | store volatile i64 %l12, i64 *%ptr |
| 80 | store volatile i64 %l11, i64 *%ptr |
| 81 | store volatile i64 %l10, i64 *%ptr |
| 82 | store volatile i64 %l9, i64 *%ptr |
| 83 | store volatile i64 %l8, i64 *%ptr |
| 84 | store volatile i64 %l7, i64 *%ptr |
| 85 | store volatile i64 %l6, i64 *%ptr |
| 86 | store volatile i64 %l5, i64 *%ptr |
| 87 | store volatile i64 %l4, i64 *%ptr |
| 88 | store volatile i64 %l3, i64 *%ptr |
| 89 | store volatile i64 %l1, i64 *%ptr |
| 90 | store volatile i64 %l0, i64 *%ptr |
| 91 | ret void |
| 92 | } |