Jakob Stoklund Olesen | f37812e | 2013-04-02 04:09:02 +0000 | [diff] [blame] | 1 | ; RUN: llc < %s -march=sparcv9 | FileCheck %s |
| 2 | |
| 3 | ; CHECK: ret2: |
| 4 | ; CHECK: or %g0, %i1, %i0 |
| 5 | define i64 @ret2(i64 %a, i64 %b) { |
| 6 | ret i64 %b |
| 7 | } |
Jakob Stoklund Olesen | c3ff3f4 | 2013-04-02 04:09:12 +0000 | [diff] [blame] | 8 | |
| 9 | ; CHECK: shl_imm |
| 10 | ; CHECK: sllx %i0, 7, %i0 |
| 11 | define i64 @shl_imm(i64 %a) { |
| 12 | %x = shl i64 %a, 7 |
| 13 | ret i64 %x |
| 14 | } |
| 15 | |
| 16 | ; CHECK: sra_reg |
| 17 | ; CHECK: srax %i0, %i1, %i0 |
| 18 | define i64 @sra_reg(i64 %a, i64 %b) { |
| 19 | %x = ashr i64 %a, %b |
| 20 | ret i64 %x |
| 21 | } |
Jakob Stoklund Olesen | 39e7554 | 2013-04-02 04:09:17 +0000 | [diff] [blame] | 22 | |
| 23 | ; Immediate materialization. Many of these patterns could actually be merged |
| 24 | ; into the restore instruction: |
| 25 | ; |
| 26 | ; restore %g0, %g0, %o0 |
| 27 | ; |
| 28 | ; CHECK: ret_imm0 |
| 29 | ; CHECK: or %g0, %g0, %i0 |
| 30 | define i64 @ret_imm0() { |
| 31 | ret i64 0 |
| 32 | } |
| 33 | |
| 34 | ; CHECK: ret_simm13 |
| 35 | ; CHECK: or %g0, -4096, %i0 |
| 36 | define i64 @ret_simm13() { |
| 37 | ret i64 -4096 |
| 38 | } |
| 39 | |
| 40 | ; CHECK: ret_sethi |
| 41 | ; CHECK: sethi 4, %i0 |
| 42 | ; CHECK-NOT: or |
| 43 | ; CHECK: restore |
| 44 | define i64 @ret_sethi() { |
| 45 | ret i64 4096 |
| 46 | } |
| 47 | |
| 48 | ; CHECK: ret_sethi |
| 49 | ; CHECK: sethi 4, [[R:%[goli][0-7]]] |
| 50 | ; CHECK: or [[R]], 1, %i0 |
| 51 | define i64 @ret_sethi_or() { |
| 52 | ret i64 4097 |
| 53 | } |
| 54 | |
| 55 | ; CHECK: ret_nimm33 |
| 56 | ; CHECK: sethi 4, [[R:%[goli][0-7]]] |
| 57 | ; CHECK: xor [[R]], -4, %i0 |
| 58 | define i64 @ret_nimm33() { |
| 59 | ret i64 -4100 |
| 60 | } |
| 61 | |
| 62 | ; CHECK: ret_bigimm |
| 63 | ; CHECK: sethi |
| 64 | ; CHECK: sethi |
| 65 | define i64 @ret_bigimm() { |
| 66 | ret i64 6800754272627607872 |
| 67 | } |
Jakob Stoklund Olesen | 73c5f80 | 2013-04-02 04:09:23 +0000 | [diff] [blame] | 68 | |
| 69 | ; CHECK: reg_reg_alu |
| 70 | ; CHECK: add %i0, %i1, [[R0:%[goli][0-7]]] |
| 71 | ; CHECK: sub [[R0]], %i2, [[R1:%[goli][0-7]]] |
| 72 | ; CHECK: andn [[R1]], %i0, %i0 |
| 73 | define i64 @reg_reg_alu(i64 %x, i64 %y, i64 %z) { |
| 74 | %a = add i64 %x, %y |
| 75 | %b = sub i64 %a, %z |
| 76 | %c = xor i64 %x, -1 |
| 77 | %d = and i64 %b, %c |
| 78 | ret i64 %d |
| 79 | } |
| 80 | |
| 81 | ; CHECK: reg_imm_alu |
| 82 | ; CHECK: add %i0, -5, [[R0:%[goli][0-7]]] |
| 83 | ; CHECK: xor [[R0]], 2, %i0 |
| 84 | define i64 @reg_imm_alu(i64 %x, i64 %y, i64 %z) { |
| 85 | %a = add i64 %x, -5 |
| 86 | %b = xor i64 %a, 2 |
| 87 | ret i64 %b |
| 88 | } |
Jakob Stoklund Olesen | 61ed5dd | 2013-04-02 04:09:28 +0000 | [diff] [blame^] | 89 | |
| 90 | ; CHECK: loads |
| 91 | ; CHECK: ldx [%i0] |
| 92 | ; CHECK: stx % |
| 93 | ; CHECK: ld [%i1] |
| 94 | ; CHECK: st % |
| 95 | ; CHECK: ldsw [%i2] |
| 96 | ; CHECK: stx % |
| 97 | ; CHECK: ldsh [%i3] |
| 98 | ; CHECK: sth % |
| 99 | define i64 @loads(i64* %p, i32* %q, i32* %r, i16* %s) { |
| 100 | %a = load i64* %p |
| 101 | %ai = add i64 1, %a |
| 102 | store i64 %ai, i64* %p |
| 103 | %b = load i32* %q |
| 104 | %b2 = zext i32 %b to i64 |
| 105 | %bi = trunc i64 %ai to i32 |
| 106 | store i32 %bi, i32* %q |
| 107 | %c = load i32* %r |
| 108 | %c2 = sext i32 %c to i64 |
| 109 | store i64 %ai, i64* %p |
| 110 | %d = load i16* %s |
| 111 | %d2 = sext i16 %d to i64 |
| 112 | %di = trunc i64 %ai to i16 |
| 113 | store i16 %di, i16* %s |
| 114 | |
| 115 | %x1 = add i64 %a, %b2 |
| 116 | %x2 = add i64 %c2, %d2 |
| 117 | %x3 = add i64 %x1, %x2 |
| 118 | ret i64 %x3 |
| 119 | } |
| 120 | |
| 121 | ; CHECK: stores |
| 122 | ; CHECK: ldx [%i0+8], [[R:%[goli][0-7]]] |
| 123 | ; CHECK: stx [[R]], [%i0+16] |
| 124 | ; CHECK: st [[R]], [%i1+-8] |
| 125 | ; CHECK: sth [[R]], [%i2+40] |
| 126 | ; CHECK: stb [[R]], [%i3+-20] |
| 127 | define void @stores(i64* %p, i32* %q, i16* %r, i8* %s) { |
| 128 | %p1 = getelementptr i64* %p, i64 1 |
| 129 | %p2 = getelementptr i64* %p, i64 2 |
| 130 | %pv = load i64* %p1 |
| 131 | store i64 %pv, i64* %p2 |
| 132 | |
| 133 | %q2 = getelementptr i32* %q, i32 -2 |
| 134 | %qv = trunc i64 %pv to i32 |
| 135 | store i32 %qv, i32* %q2 |
| 136 | |
| 137 | %r2 = getelementptr i16* %r, i16 20 |
| 138 | %rv = trunc i64 %pv to i16 |
| 139 | store i16 %rv, i16* %r2 |
| 140 | |
| 141 | %s2 = getelementptr i8* %s, i8 -20 |
| 142 | %sv = trunc i64 %pv to i8 |
| 143 | store i8 %sv, i8* %s2 |
| 144 | |
| 145 | ret void |
| 146 | } |