| Tim Northover | e0e3aef | 2013-01-31 12:12:40 +0000 | [diff] [blame^] | 1 | ; RUN: llc -verify-machineinstrs < %s -march=aarch64 -O0 | FileCheck %s | 
|  | 2 |  | 
|  | 3 | @var1_32 = global i32 0 | 
|  | 4 | @var2_32 = global i32 0 | 
|  | 5 |  | 
|  | 6 | @var1_64 = global i64 0 | 
|  | 7 | @var2_64 = global i64 0 | 
|  | 8 |  | 
|  | 9 | define void @logical_32bit() { | 
|  | 10 | ; CHECK: logical_32bit: | 
|  | 11 | %val1 = load i32* @var1_32 | 
|  | 12 | %val2 = load i32* @var2_32 | 
|  | 13 |  | 
|  | 14 | ; First check basic and/bic/or/orn/eor/eon patterns with no shift | 
|  | 15 | %neg_val2 = xor i32 -1, %val2 | 
|  | 16 |  | 
|  | 17 | %and_noshift = and i32 %val1, %val2 | 
|  | 18 | ; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} | 
|  | 19 | store volatile i32 %and_noshift, i32* @var1_32 | 
|  | 20 | %bic_noshift = and i32 %neg_val2, %val1 | 
|  | 21 | ; CHECK: bic {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} | 
|  | 22 | store volatile i32 %bic_noshift, i32* @var1_32 | 
|  | 23 |  | 
|  | 24 | %or_noshift = or i32 %val1, %val2 | 
|  | 25 | ; CHECK: orr {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} | 
|  | 26 | store volatile i32 %or_noshift, i32* @var1_32 | 
|  | 27 | %orn_noshift = or i32 %neg_val2, %val1 | 
|  | 28 | ; CHECK: orn {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} | 
|  | 29 | store volatile i32 %orn_noshift, i32* @var1_32 | 
|  | 30 |  | 
|  | 31 | %xor_noshift = xor i32 %val1, %val2 | 
|  | 32 | ; CHECK: eor {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} | 
|  | 33 | store volatile i32 %xor_noshift, i32* @var1_32 | 
|  | 34 | %xorn_noshift = xor i32 %neg_val2, %val1 | 
|  | 35 | ; CHECK: eon {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} | 
|  | 36 | store volatile i32 %xorn_noshift, i32* @var1_32 | 
|  | 37 |  | 
|  | 38 | ; Check the maximum shift on each | 
|  | 39 | %operand_lsl31 = shl i32 %val2, 31 | 
|  | 40 | %neg_operand_lsl31 = xor i32 -1, %operand_lsl31 | 
|  | 41 |  | 
|  | 42 | %and_lsl31 = and i32 %val1, %operand_lsl31 | 
|  | 43 | ; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31 | 
|  | 44 | store volatile i32 %and_lsl31, i32* @var1_32 | 
|  | 45 | %bic_lsl31 = and i32 %val1, %neg_operand_lsl31 | 
|  | 46 | ; CHECK: bic {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31 | 
|  | 47 | store volatile i32 %bic_lsl31, i32* @var1_32 | 
|  | 48 |  | 
|  | 49 | %or_lsl31 = or i32 %val1, %operand_lsl31 | 
|  | 50 | ; CHECK: orr {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31 | 
|  | 51 | store volatile i32 %or_lsl31, i32* @var1_32 | 
|  | 52 | %orn_lsl31 = or i32 %val1, %neg_operand_lsl31 | 
|  | 53 | ; CHECK: orn {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31 | 
|  | 54 | store volatile i32 %orn_lsl31, i32* @var1_32 | 
|  | 55 |  | 
|  | 56 | %xor_lsl31 = xor i32 %val1, %operand_lsl31 | 
|  | 57 | ; CHECK: eor {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31 | 
|  | 58 | store volatile i32 %xor_lsl31, i32* @var1_32 | 
|  | 59 | %xorn_lsl31 = xor i32 %val1, %neg_operand_lsl31 | 
|  | 60 | ; CHECK: eon {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31 | 
|  | 61 | store volatile i32 %xorn_lsl31, i32* @var1_32 | 
|  | 62 |  | 
|  | 63 | ; Check other shifts on a subset | 
|  | 64 | %operand_asr10 = ashr i32 %val2, 10 | 
|  | 65 | %neg_operand_asr10 = xor i32 -1, %operand_asr10 | 
|  | 66 |  | 
|  | 67 | %bic_asr10 = and i32 %val1, %neg_operand_asr10 | 
|  | 68 | ; CHECK: bic {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #10 | 
|  | 69 | store volatile i32 %bic_asr10, i32* @var1_32 | 
|  | 70 | %xor_asr10 = xor i32 %val1, %operand_asr10 | 
|  | 71 | ; CHECK: eor {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #10 | 
|  | 72 | store volatile i32 %xor_asr10, i32* @var1_32 | 
|  | 73 |  | 
|  | 74 | %operand_lsr1 = lshr i32 %val2, 1 | 
|  | 75 | %neg_operand_lsr1 = xor i32 -1, %operand_lsr1 | 
|  | 76 |  | 
|  | 77 | %orn_lsr1 = or i32 %val1, %neg_operand_lsr1 | 
|  | 78 | ; CHECK: orn {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #1 | 
|  | 79 | store volatile i32 %orn_lsr1, i32* @var1_32 | 
|  | 80 | %xor_lsr1 = xor i32 %val1, %operand_lsr1 | 
|  | 81 | ; CHECK: eor {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #1 | 
|  | 82 | store volatile i32 %xor_lsr1, i32* @var1_32 | 
|  | 83 |  | 
|  | 84 | %operand_ror20_big = shl i32 %val2, 12 | 
|  | 85 | %operand_ror20_small = lshr i32 %val2, 20 | 
|  | 86 | %operand_ror20 = or i32 %operand_ror20_big, %operand_ror20_small | 
|  | 87 | %neg_operand_ror20 = xor i32 -1, %operand_ror20 | 
|  | 88 |  | 
|  | 89 | %xorn_ror20 = xor i32 %val1, %neg_operand_ror20 | 
|  | 90 | ; CHECK: eon {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, ror #20 | 
|  | 91 | store volatile i32 %xorn_ror20, i32* @var1_32 | 
|  | 92 | %and_ror20 = and i32 %val1, %operand_ror20 | 
|  | 93 | ; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, ror #20 | 
|  | 94 | store volatile i32 %and_ror20, i32* @var1_32 | 
|  | 95 |  | 
|  | 96 | ret void | 
|  | 97 | } | 
|  | 98 |  | 
|  | 99 | define void @logical_64bit() { | 
|  | 100 | ; CHECK: logical_64bit: | 
|  | 101 | %val1 = load i64* @var1_64 | 
|  | 102 | %val2 = load i64* @var2_64 | 
|  | 103 |  | 
|  | 104 | ; First check basic and/bic/or/orn/eor/eon patterns with no shift | 
|  | 105 | %neg_val2 = xor i64 -1, %val2 | 
|  | 106 |  | 
|  | 107 | %and_noshift = and i64 %val1, %val2 | 
|  | 108 | ; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} | 
|  | 109 | store volatile i64 %and_noshift, i64* @var1_64 | 
|  | 110 | %bic_noshift = and i64 %neg_val2, %val1 | 
|  | 111 | ; CHECK: bic {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} | 
|  | 112 | store volatile i64 %bic_noshift, i64* @var1_64 | 
|  | 113 |  | 
|  | 114 | %or_noshift = or i64 %val1, %val2 | 
|  | 115 | ; CHECK: orr {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} | 
|  | 116 | store volatile i64 %or_noshift, i64* @var1_64 | 
|  | 117 | %orn_noshift = or i64 %neg_val2, %val1 | 
|  | 118 | ; CHECK: orn {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} | 
|  | 119 | store volatile i64 %orn_noshift, i64* @var1_64 | 
|  | 120 |  | 
|  | 121 | %xor_noshift = xor i64 %val1, %val2 | 
|  | 122 | ; CHECK: eor {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} | 
|  | 123 | store volatile i64 %xor_noshift, i64* @var1_64 | 
|  | 124 | %xorn_noshift = xor i64 %neg_val2, %val1 | 
|  | 125 | ; CHECK: eon {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} | 
|  | 126 | store volatile i64 %xorn_noshift, i64* @var1_64 | 
|  | 127 |  | 
|  | 128 | ; Check the maximum shift on each | 
|  | 129 | %operand_lsl63 = shl i64 %val2, 63 | 
|  | 130 | %neg_operand_lsl63 = xor i64 -1, %operand_lsl63 | 
|  | 131 |  | 
|  | 132 | %and_lsl63 = and i64 %val1, %operand_lsl63 | 
|  | 133 | ; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #63 | 
|  | 134 | store volatile i64 %and_lsl63, i64* @var1_64 | 
|  | 135 | %bic_lsl63 = and i64 %val1, %neg_operand_lsl63 | 
|  | 136 | ; CHECK: bic {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #63 | 
|  | 137 | store volatile i64 %bic_lsl63, i64* @var1_64 | 
|  | 138 |  | 
|  | 139 | %or_lsl63 = or i64 %val1, %operand_lsl63 | 
|  | 140 | ; CHECK: orr {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #63 | 
|  | 141 | store volatile i64 %or_lsl63, i64* @var1_64 | 
|  | 142 | %orn_lsl63 = or i64 %val1, %neg_operand_lsl63 | 
|  | 143 | ; CHECK: orn {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #63 | 
|  | 144 | store volatile i64 %orn_lsl63, i64* @var1_64 | 
|  | 145 |  | 
|  | 146 | %xor_lsl63 = xor i64 %val1, %operand_lsl63 | 
|  | 147 | ; CHECK: eor {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #63 | 
|  | 148 | store volatile i64 %xor_lsl63, i64* @var1_64 | 
|  | 149 | %xorn_lsl63 = xor i64 %val1, %neg_operand_lsl63 | 
|  | 150 | ; CHECK: eon {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #63 | 
|  | 151 | store volatile i64 %xorn_lsl63, i64* @var1_64 | 
|  | 152 |  | 
|  | 153 | ; Check other shifts on a subset | 
|  | 154 | %operand_asr10 = ashr i64 %val2, 10 | 
|  | 155 | %neg_operand_asr10 = xor i64 -1, %operand_asr10 | 
|  | 156 |  | 
|  | 157 | %bic_asr10 = and i64 %val1, %neg_operand_asr10 | 
|  | 158 | ; CHECK: bic {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #10 | 
|  | 159 | store volatile i64 %bic_asr10, i64* @var1_64 | 
|  | 160 | %xor_asr10 = xor i64 %val1, %operand_asr10 | 
|  | 161 | ; CHECK: eor {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #10 | 
|  | 162 | store volatile i64 %xor_asr10, i64* @var1_64 | 
|  | 163 |  | 
|  | 164 | %operand_lsr1 = lshr i64 %val2, 1 | 
|  | 165 | %neg_operand_lsr1 = xor i64 -1, %operand_lsr1 | 
|  | 166 |  | 
|  | 167 | %orn_lsr1 = or i64 %val1, %neg_operand_lsr1 | 
|  | 168 | ; CHECK: orn {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #1 | 
|  | 169 | store volatile i64 %orn_lsr1, i64* @var1_64 | 
|  | 170 | %xor_lsr1 = xor i64 %val1, %operand_lsr1 | 
|  | 171 | ; CHECK: eor {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #1 | 
|  | 172 | store volatile i64 %xor_lsr1, i64* @var1_64 | 
|  | 173 |  | 
|  | 174 | ; Construct a rotate-right from a bunch of other logical | 
|  | 175 | ; operations. DAGCombiner should ensure we the ROTR during | 
|  | 176 | ; selection | 
|  | 177 | %operand_ror20_big = shl i64 %val2, 44 | 
|  | 178 | %operand_ror20_small = lshr i64 %val2, 20 | 
|  | 179 | %operand_ror20 = or i64 %operand_ror20_big, %operand_ror20_small | 
|  | 180 | %neg_operand_ror20 = xor i64 -1, %operand_ror20 | 
|  | 181 |  | 
|  | 182 | %xorn_ror20 = xor i64 %val1, %neg_operand_ror20 | 
|  | 183 | ; CHECK: eon {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, ror #20 | 
|  | 184 | store volatile i64 %xorn_ror20, i64* @var1_64 | 
|  | 185 | %and_ror20 = and i64 %val1, %operand_ror20 | 
|  | 186 | ; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, ror #20 | 
|  | 187 | store volatile i64 %and_ror20, i64* @var1_64 | 
|  | 188 |  | 
|  | 189 | ret void | 
|  | 190 | } | 
|  | 191 |  | 
|  | 192 | define void @flag_setting() { | 
|  | 193 | ; CHECK: flag_setting: | 
|  | 194 | %val1 = load i64* @var1_64 | 
|  | 195 | %val2 = load i64* @var2_64 | 
|  | 196 |  | 
|  | 197 | ; CHECK: tst {{x[0-9]+}}, {{x[0-9]+}} | 
|  | 198 | ; CHECK: b.gt .L | 
|  | 199 | %simple_and = and i64 %val1, %val2 | 
|  | 200 | %tst1 = icmp sgt i64 %simple_and, 0 | 
|  | 201 | br i1 %tst1, label %ret, label %test2 | 
|  | 202 |  | 
|  | 203 | test2: | 
|  | 204 | ; CHECK: tst {{x[0-9]+}}, {{x[0-9]+}}, lsl #63 | 
|  | 205 | ; CHECK: b.lt .L | 
|  | 206 | %shifted_op = shl i64 %val2, 63 | 
|  | 207 | %shifted_and = and i64 %val1, %shifted_op | 
|  | 208 | %tst2 = icmp slt i64 %shifted_and, 0 | 
|  | 209 | br i1 %tst2, label %ret, label %test3 | 
|  | 210 |  | 
|  | 211 | test3: | 
|  | 212 | ; CHECK: tst {{x[0-9]+}}, {{x[0-9]+}}, asr #12 | 
|  | 213 | ; CHECK: b.gt .L | 
|  | 214 | %asr_op = ashr i64 %val2, 12 | 
|  | 215 | %asr_and = and i64 %asr_op, %val1 | 
|  | 216 | %tst3 = icmp sgt i64 %asr_and, 0 | 
|  | 217 | br i1 %tst3, label %ret, label %other_exit | 
|  | 218 |  | 
|  | 219 | other_exit: | 
|  | 220 | store volatile i64 %val1, i64* @var1_64 | 
|  | 221 | ret void | 
|  | 222 | ret: | 
|  | 223 | ret void | 
|  | 224 | } |