Tim Northover | e3d4236 | 2013-02-01 11:40:47 +0000 | [diff] [blame^] | 1 | ; RUN: llc -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s |
Tim Northover | e0e3aef | 2013-01-31 12:12:40 +0000 | [diff] [blame] | 2 | |
| 3 | ; First, a simple example from Clang. The registers could plausibly be |
| 4 | ; different, but probably won't be. |
| 5 | |
| 6 | %struct.foo = type { i8, [2 x i8], i8 } |
| 7 | |
| 8 | define [1 x i64] @from_clang([1 x i64] %f.coerce, i32 %n) nounwind readnone { |
| 9 | ; CHECK: from_clang: |
| 10 | ; CHECK: bfi w0, w1, #3, #4 |
| 11 | ; CHECK-NEXT: ret |
| 12 | |
| 13 | entry: |
| 14 | %f.coerce.fca.0.extract = extractvalue [1 x i64] %f.coerce, 0 |
| 15 | %tmp.sroa.0.0.extract.trunc = trunc i64 %f.coerce.fca.0.extract to i32 |
| 16 | %bf.value = shl i32 %n, 3 |
| 17 | %0 = and i32 %bf.value, 120 |
| 18 | %f.sroa.0.0.insert.ext.masked = and i32 %tmp.sroa.0.0.extract.trunc, 135 |
| 19 | %1 = or i32 %f.sroa.0.0.insert.ext.masked, %0 |
| 20 | %f.sroa.0.0.extract.trunc = zext i32 %1 to i64 |
| 21 | %tmp1.sroa.1.1.insert.insert = and i64 %f.coerce.fca.0.extract, 4294967040 |
| 22 | %tmp1.sroa.0.0.insert.insert = or i64 %f.sroa.0.0.extract.trunc, %tmp1.sroa.1.1.insert.insert |
| 23 | %.fca.0.insert = insertvalue [1 x i64] undef, i64 %tmp1.sroa.0.0.insert.insert, 0 |
| 24 | ret [1 x i64] %.fca.0.insert |
| 25 | } |
| 26 | |
| 27 | define void @test_whole32(i32* %existing, i32* %new) { |
| 28 | ; CHECK: test_whole32: |
| 29 | ; CHECK: bfi {{w[0-9]+}}, {{w[0-9]+}}, #26, #5 |
| 30 | |
| 31 | %oldval = load volatile i32* %existing |
| 32 | %oldval_keep = and i32 %oldval, 2214592511 ; =0x83ffffff |
| 33 | |
| 34 | %newval = load volatile i32* %new |
| 35 | %newval_shifted = shl i32 %newval, 26 |
| 36 | %newval_masked = and i32 %newval_shifted, 2080374784 ; = 0x7c000000 |
| 37 | |
| 38 | %combined = or i32 %oldval_keep, %newval_masked |
| 39 | store volatile i32 %combined, i32* %existing |
| 40 | |
| 41 | ret void |
| 42 | } |
| 43 | |
| 44 | define void @test_whole64(i64* %existing, i64* %new) { |
| 45 | ; CHECK: test_whole64: |
| 46 | ; CHECK: bfi {{x[0-9]+}}, {{x[0-9]+}}, #26, #14 |
| 47 | ; CHECK-NOT: and |
| 48 | ; CHECK: ret |
| 49 | |
| 50 | %oldval = load volatile i64* %existing |
| 51 | %oldval_keep = and i64 %oldval, 18446742974265032703 ; = 0xffffff0003ffffffL |
| 52 | |
| 53 | %newval = load volatile i64* %new |
| 54 | %newval_shifted = shl i64 %newval, 26 |
| 55 | %newval_masked = and i64 %newval_shifted, 1099444518912 ; = 0xfffc000000 |
| 56 | |
| 57 | %combined = or i64 %oldval_keep, %newval_masked |
| 58 | store volatile i64 %combined, i64* %existing |
| 59 | |
| 60 | ret void |
| 61 | } |
| 62 | |
| 63 | define void @test_whole32_from64(i64* %existing, i64* %new) { |
| 64 | ; CHECK: test_whole32_from64: |
| 65 | ; CHECK: bfi {{w[0-9]+}}, {{w[0-9]+}}, #{{0|16}}, #16 |
| 66 | ; CHECK-NOT: and |
| 67 | ; CHECK: ret |
| 68 | |
| 69 | %oldval = load volatile i64* %existing |
| 70 | %oldval_keep = and i64 %oldval, 4294901760 ; = 0xffff0000 |
| 71 | |
| 72 | %newval = load volatile i64* %new |
| 73 | %newval_masked = and i64 %newval, 65535 ; = 0xffff |
| 74 | |
| 75 | %combined = or i64 %oldval_keep, %newval_masked |
| 76 | store volatile i64 %combined, i64* %existing |
| 77 | |
| 78 | ret void |
| 79 | } |
| 80 | |
| 81 | define void @test_32bit_masked(i32 *%existing, i32 *%new) { |
| 82 | ; CHECK: test_32bit_masked: |
| 83 | ; CHECK: bfi [[INSERT:w[0-9]+]], {{w[0-9]+}}, #3, #4 |
| 84 | ; CHECK: and {{w[0-9]+}}, [[INSERT]], #0xff |
| 85 | |
| 86 | %oldval = load volatile i32* %existing |
| 87 | %oldval_keep = and i32 %oldval, 135 ; = 0x87 |
| 88 | |
| 89 | %newval = load volatile i32* %new |
| 90 | %newval_shifted = shl i32 %newval, 3 |
| 91 | %newval_masked = and i32 %newval_shifted, 120 ; = 0x78 |
| 92 | |
| 93 | %combined = or i32 %oldval_keep, %newval_masked |
| 94 | store volatile i32 %combined, i32* %existing |
| 95 | |
| 96 | ret void |
| 97 | } |
| 98 | |
| 99 | define void @test_64bit_masked(i64 *%existing, i64 *%new) { |
| 100 | ; CHECK: test_64bit_masked: |
| 101 | ; CHECK: bfi [[INSERT:x[0-9]+]], {{x[0-9]+}}, #40, #8 |
| 102 | ; CHECK: and {{x[0-9]+}}, [[INSERT]], #0xffff00000000 |
| 103 | |
| 104 | %oldval = load volatile i64* %existing |
| 105 | %oldval_keep = and i64 %oldval, 1095216660480 ; = 0xff_0000_0000 |
| 106 | |
| 107 | %newval = load volatile i64* %new |
| 108 | %newval_shifted = shl i64 %newval, 40 |
| 109 | %newval_masked = and i64 %newval_shifted, 280375465082880 ; = 0xff00_0000_0000 |
| 110 | |
| 111 | %combined = or i64 %newval_masked, %oldval_keep |
| 112 | store volatile i64 %combined, i64* %existing |
| 113 | |
| 114 | ret void |
| 115 | } |
| 116 | |
| 117 | ; Mask is too complicated for literal ANDwwi, make sure other avenues are tried. |
| 118 | define void @test_32bit_complexmask(i32 *%existing, i32 *%new) { |
| 119 | ; CHECK: test_32bit_complexmask: |
| 120 | ; CHECK: bfi {{w[0-9]+}}, {{w[0-9]+}}, #3, #4 |
| 121 | ; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} |
| 122 | |
| 123 | %oldval = load volatile i32* %existing |
| 124 | %oldval_keep = and i32 %oldval, 647 ; = 0x287 |
| 125 | |
| 126 | %newval = load volatile i32* %new |
| 127 | %newval_shifted = shl i32 %newval, 3 |
| 128 | %newval_masked = and i32 %newval_shifted, 120 ; = 0x278 |
| 129 | |
| 130 | %combined = or i32 %oldval_keep, %newval_masked |
| 131 | store volatile i32 %combined, i32* %existing |
| 132 | |
| 133 | ret void |
| 134 | } |
| 135 | |
| 136 | ; Neither mask is is a contiguous set of 1s. BFI can't be used |
| 137 | define void @test_32bit_badmask(i32 *%existing, i32 *%new) { |
| 138 | ; CHECK: test_32bit_badmask: |
| 139 | ; CHECK-NOT: bfi |
| 140 | ; CHECK: ret |
| 141 | |
| 142 | %oldval = load volatile i32* %existing |
| 143 | %oldval_keep = and i32 %oldval, 135 ; = 0x87 |
| 144 | |
| 145 | %newval = load volatile i32* %new |
| 146 | %newval_shifted = shl i32 %newval, 3 |
| 147 | %newval_masked = and i32 %newval_shifted, 632 ; = 0x278 |
| 148 | |
| 149 | %combined = or i32 %oldval_keep, %newval_masked |
| 150 | store volatile i32 %combined, i32* %existing |
| 151 | |
| 152 | ret void |
| 153 | } |
| 154 | |
| 155 | ; Ditto |
| 156 | define void @test_64bit_badmask(i64 *%existing, i64 *%new) { |
| 157 | ; CHECK: test_64bit_badmask: |
| 158 | ; CHECK-NOT: bfi |
| 159 | ; CHECK: ret |
| 160 | |
| 161 | %oldval = load volatile i64* %existing |
| 162 | %oldval_keep = and i64 %oldval, 135 ; = 0x87 |
| 163 | |
| 164 | %newval = load volatile i64* %new |
| 165 | %newval_shifted = shl i64 %newval, 3 |
| 166 | %newval_masked = and i64 %newval_shifted, 664 ; = 0x278 |
| 167 | |
| 168 | %combined = or i64 %oldval_keep, %newval_masked |
| 169 | store volatile i64 %combined, i64* %existing |
| 170 | |
| 171 | ret void |
| 172 | } |
| 173 | |
| 174 | ; Bitfield insert where there's a left-over shr needed at the beginning |
| 175 | ; (e.g. result of str.bf1 = str.bf2) |
| 176 | define void @test_32bit_with_shr(i32* %existing, i32* %new) { |
| 177 | ; CHECK: test_32bit_with_shr: |
| 178 | |
| 179 | %oldval = load volatile i32* %existing |
| 180 | %oldval_keep = and i32 %oldval, 2214592511 ; =0x83ffffff |
| 181 | |
| 182 | %newval = load i32* %new |
| 183 | %newval_shifted = shl i32 %newval, 12 |
| 184 | %newval_masked = and i32 %newval_shifted, 2080374784 ; = 0x7c000000 |
| 185 | |
| 186 | %combined = or i32 %oldval_keep, %newval_masked |
| 187 | store volatile i32 %combined, i32* %existing |
| 188 | ; CHECK: lsr [[BIT:w[0-9]+]], {{w[0-9]+}}, #14 |
| 189 | ; CHECK: bfi {{w[0-9]}}, [[BIT]], #26, #5 |
| 190 | |
| 191 | ret void |
| 192 | } |
| 193 | |