| Andrew Trick | d5d2db9 | 2012-01-10 01:45:08 +0000 | [diff] [blame^] | 1 | ; RUN: llc < %s -O3 -march=thumb -mcpu=cortex-a9 | FileCheck %s -check-prefix=A9 |
| 2 | |
| 3 | ; @simple is the most basic chain of address induction variables. Chaining |
| 4 | ; saves at least one register and avoids complex addressing and setup |
| 5 | ; code. |
| 6 | ; |
| 7 | ; A9: @simple |
| 8 | ; no expensive address computation in the preheader |
| 9 | ; A9: lsl |
| 10 | ; A9-NOT: lsl |
| 11 | ; A9: %loop |
| 12 | ; no complex address modes |
| 13 | ; A9-NOT: lsl |
| 14 | define i32 @simple(i32* %a, i32* %b, i32 %x) nounwind { |
| 15 | entry: |
| 16 | br label %loop |
| 17 | loop: |
| 18 | %iv = phi i32* [ %a, %entry ], [ %iv4, %loop ] |
| 19 | %s = phi i32 [ 0, %entry ], [ %s4, %loop ] |
| 20 | %v = load i32* %iv |
| 21 | %iv1 = getelementptr inbounds i32* %iv, i32 %x |
| 22 | %v1 = load i32* %iv1 |
| 23 | %iv2 = getelementptr inbounds i32* %iv1, i32 %x |
| 24 | %v2 = load i32* %iv2 |
| 25 | %iv3 = getelementptr inbounds i32* %iv2, i32 %x |
| 26 | %v3 = load i32* %iv3 |
| 27 | %s1 = add i32 %s, %v |
| 28 | %s2 = add i32 %s1, %v1 |
| 29 | %s3 = add i32 %s2, %v2 |
| 30 | %s4 = add i32 %s3, %v3 |
| 31 | %iv4 = getelementptr inbounds i32* %iv3, i32 %x |
| 32 | %cmp = icmp eq i32* %iv4, %b |
| 33 | br i1 %cmp, label %exit, label %loop |
| 34 | exit: |
| 35 | ret i32 %s4 |
| 36 | } |
| 37 | |
| 38 | ; @user is not currently chained because the IV is live across memory ops. |
| 39 | ; |
| 40 | ; A9: @user |
| 41 | ; stride multiples computed in the preheader |
| 42 | ; A9: lsl |
| 43 | ; A9: lsl |
| 44 | ; A9: %loop |
| 45 | ; complex address modes |
| 46 | ; A9: lsl |
| 47 | ; A9: lsl |
| 48 | define i32 @user(i32* %a, i32* %b, i32 %x) nounwind { |
| 49 | entry: |
| 50 | br label %loop |
| 51 | loop: |
| 52 | %iv = phi i32* [ %a, %entry ], [ %iv4, %loop ] |
| 53 | %s = phi i32 [ 0, %entry ], [ %s4, %loop ] |
| 54 | %v = load i32* %iv |
| 55 | %iv1 = getelementptr inbounds i32* %iv, i32 %x |
| 56 | %v1 = load i32* %iv1 |
| 57 | %iv2 = getelementptr inbounds i32* %iv1, i32 %x |
| 58 | %v2 = load i32* %iv2 |
| 59 | %iv3 = getelementptr inbounds i32* %iv2, i32 %x |
| 60 | %v3 = load i32* %iv3 |
| 61 | %s1 = add i32 %s, %v |
| 62 | %s2 = add i32 %s1, %v1 |
| 63 | %s3 = add i32 %s2, %v2 |
| 64 | %s4 = add i32 %s3, %v3 |
| 65 | %iv4 = getelementptr inbounds i32* %iv3, i32 %x |
| 66 | store i32 %s4, i32* %iv |
| 67 | %cmp = icmp eq i32* %iv4, %b |
| 68 | br i1 %cmp, label %exit, label %loop |
| 69 | exit: |
| 70 | ret i32 %s4 |
| 71 | } |
| 72 | |
| 73 | ; @extrastride is a slightly more interesting case of a single |
| 74 | ; complete chain with multiple strides. The test case IR is what LSR |
| 75 | ; used to do, and exactly what we don't want to do. LSR's new IV |
| 76 | ; chaining feature should now undo the damage. |
| 77 | ; |
| 78 | ; A9: extrastride: |
| 79 | ; no spills |
| 80 | ; A9-NOT: str |
| 81 | ; only one stride multiple in the preheader |
| 82 | ; A9: lsl |
| 83 | ; A9-NOT: {{str r|lsl}} |
| 84 | ; A9: %for.body{{$}} |
| 85 | ; no complex address modes or reloads |
| 86 | ; A9-NOT: {{ldr .*[sp]|lsl}} |
| 87 | define void @extrastride(i8* nocapture %main, i32 %main_stride, i32* nocapture %res, i32 %x, i32 %y, i32 %z) nounwind { |
| 88 | entry: |
| 89 | %cmp8 = icmp eq i32 %z, 0 |
| 90 | br i1 %cmp8, label %for.end, label %for.body.lr.ph |
| 91 | |
| 92 | for.body.lr.ph: ; preds = %entry |
| 93 | %add.ptr.sum = shl i32 %main_stride, 1 ; s*2 |
| 94 | %add.ptr1.sum = add i32 %add.ptr.sum, %main_stride ; s*3 |
| 95 | %add.ptr2.sum = add i32 %x, %main_stride ; s + x |
| 96 | %add.ptr4.sum = shl i32 %main_stride, 2 ; s*4 |
| 97 | %add.ptr3.sum = add i32 %add.ptr2.sum, %add.ptr4.sum ; total IV stride = s*5+x |
| 98 | br label %for.body |
| 99 | |
| 100 | for.body: ; preds = %for.body.lr.ph, %for.body |
| 101 | %main.addr.011 = phi i8* [ %main, %for.body.lr.ph ], [ %add.ptr6, %for.body ] |
| 102 | %i.010 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ] |
| 103 | %res.addr.09 = phi i32* [ %res, %for.body.lr.ph ], [ %add.ptr7, %for.body ] |
| 104 | %0 = bitcast i8* %main.addr.011 to i32* |
| 105 | %1 = load i32* %0, align 4 |
| 106 | %add.ptr = getelementptr inbounds i8* %main.addr.011, i32 %main_stride |
| 107 | %2 = bitcast i8* %add.ptr to i32* |
| 108 | %3 = load i32* %2, align 4 |
| 109 | %add.ptr1 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr.sum |
| 110 | %4 = bitcast i8* %add.ptr1 to i32* |
| 111 | %5 = load i32* %4, align 4 |
| 112 | %add.ptr2 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr1.sum |
| 113 | %6 = bitcast i8* %add.ptr2 to i32* |
| 114 | %7 = load i32* %6, align 4 |
| 115 | %add.ptr3 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr4.sum |
| 116 | %8 = bitcast i8* %add.ptr3 to i32* |
| 117 | %9 = load i32* %8, align 4 |
| 118 | %add = add i32 %3, %1 |
| 119 | %add4 = add i32 %add, %5 |
| 120 | %add5 = add i32 %add4, %7 |
| 121 | %add6 = add i32 %add5, %9 |
| 122 | store i32 %add6, i32* %res.addr.09, align 4 |
| 123 | %add.ptr6 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr3.sum |
| 124 | %add.ptr7 = getelementptr inbounds i32* %res.addr.09, i32 %y |
| 125 | %inc = add i32 %i.010, 1 |
| 126 | %cmp = icmp eq i32 %inc, %z |
| 127 | br i1 %cmp, label %for.end, label %for.body |
| 128 | |
| 129 | for.end: ; preds = %for.body, %entry |
| 130 | ret void |
| 131 | } |
| 132 | |
| 133 | ; @foldedidx is an unrolled variant of this loop: |
| 134 | ; for (unsigned long i = 0; i < len; i += s) { |
| 135 | ; c[i] = a[i] + b[i]; |
| 136 | ; } |
| 137 | ; where 's' can be folded into the addressing mode. |
| 138 | ; Consequently, we should *not* form any chains. |
| 139 | ; |
| 140 | ; A9: foldedidx: |
| 141 | ; A9: ldrb.w {{r[0-9]|lr}}, [{{r[0-9]|lr}}, #3] |
| 142 | define void @foldedidx(i8* nocapture %a, i8* nocapture %b, i8* nocapture %c) nounwind ssp { |
| 143 | entry: |
| 144 | br label %for.body |
| 145 | |
| 146 | for.body: ; preds = %for.body, %entry |
| 147 | %i.07 = phi i32 [ 0, %entry ], [ %inc.3, %for.body ] |
| 148 | %arrayidx = getelementptr inbounds i8* %a, i32 %i.07 |
| 149 | %0 = load i8* %arrayidx, align 1 |
| 150 | %conv5 = zext i8 %0 to i32 |
| 151 | %arrayidx1 = getelementptr inbounds i8* %b, i32 %i.07 |
| 152 | %1 = load i8* %arrayidx1, align 1 |
| 153 | %conv26 = zext i8 %1 to i32 |
| 154 | %add = add nsw i32 %conv26, %conv5 |
| 155 | %conv3 = trunc i32 %add to i8 |
| 156 | %arrayidx4 = getelementptr inbounds i8* %c, i32 %i.07 |
| 157 | store i8 %conv3, i8* %arrayidx4, align 1 |
| 158 | %inc1 = or i32 %i.07, 1 |
| 159 | %arrayidx.1 = getelementptr inbounds i8* %a, i32 %inc1 |
| 160 | %2 = load i8* %arrayidx.1, align 1 |
| 161 | %conv5.1 = zext i8 %2 to i32 |
| 162 | %arrayidx1.1 = getelementptr inbounds i8* %b, i32 %inc1 |
| 163 | %3 = load i8* %arrayidx1.1, align 1 |
| 164 | %conv26.1 = zext i8 %3 to i32 |
| 165 | %add.1 = add nsw i32 %conv26.1, %conv5.1 |
| 166 | %conv3.1 = trunc i32 %add.1 to i8 |
| 167 | %arrayidx4.1 = getelementptr inbounds i8* %c, i32 %inc1 |
| 168 | store i8 %conv3.1, i8* %arrayidx4.1, align 1 |
| 169 | %inc.12 = or i32 %i.07, 2 |
| 170 | %arrayidx.2 = getelementptr inbounds i8* %a, i32 %inc.12 |
| 171 | %4 = load i8* %arrayidx.2, align 1 |
| 172 | %conv5.2 = zext i8 %4 to i32 |
| 173 | %arrayidx1.2 = getelementptr inbounds i8* %b, i32 %inc.12 |
| 174 | %5 = load i8* %arrayidx1.2, align 1 |
| 175 | %conv26.2 = zext i8 %5 to i32 |
| 176 | %add.2 = add nsw i32 %conv26.2, %conv5.2 |
| 177 | %conv3.2 = trunc i32 %add.2 to i8 |
| 178 | %arrayidx4.2 = getelementptr inbounds i8* %c, i32 %inc.12 |
| 179 | store i8 %conv3.2, i8* %arrayidx4.2, align 1 |
| 180 | %inc.23 = or i32 %i.07, 3 |
| 181 | %arrayidx.3 = getelementptr inbounds i8* %a, i32 %inc.23 |
| 182 | %6 = load i8* %arrayidx.3, align 1 |
| 183 | %conv5.3 = zext i8 %6 to i32 |
| 184 | %arrayidx1.3 = getelementptr inbounds i8* %b, i32 %inc.23 |
| 185 | %7 = load i8* %arrayidx1.3, align 1 |
| 186 | %conv26.3 = zext i8 %7 to i32 |
| 187 | %add.3 = add nsw i32 %conv26.3, %conv5.3 |
| 188 | %conv3.3 = trunc i32 %add.3 to i8 |
| 189 | %arrayidx4.3 = getelementptr inbounds i8* %c, i32 %inc.23 |
| 190 | store i8 %conv3.3, i8* %arrayidx4.3, align 1 |
| 191 | %inc.3 = add nsw i32 %i.07, 4 |
| 192 | %exitcond.3 = icmp eq i32 %inc.3, 400 |
| 193 | br i1 %exitcond.3, label %for.end, label %for.body |
| 194 | |
| 195 | for.end: ; preds = %for.body |
| 196 | ret void |
| 197 | } |
| 198 | |
| 199 | ; @testNeon is an important example of the nead for ivchains. |
| 200 | ; |
| 201 | ; Currently we have three extra add.w's that keep the store address |
| 202 | ; live past the next increment because ISEL is unfortunately undoing |
| 203 | ; the store chain. ISEL also fails to convert the stores to |
| 204 | ; post-increment addressing. However, the loads should use |
| 205 | ; post-increment addressing, no add's or add.w's beyond the three |
| 206 | ; mentioned. Most importantly, there should be no spills or reloads! |
| 207 | ; |
| 208 | ; CHECK: testNeon: |
| 209 | ; CHECK: %.lr.ph |
| 210 | ; CHECK-NOT: lsl.w |
| 211 | ; CHECK-NOT: {{ldr|str|adds|add r}} |
| 212 | ; CHECK: add.w r |
| 213 | ; CHECK-NOT: {{ldr|str|adds|add r}} |
| 214 | ; CHECK: add.w r |
| 215 | ; CHECK-NOT: {{ldr|str|adds|add r}} |
| 216 | ; CHECK: add.w r |
| 217 | ; CHECK-NOT: {{ldr|str|adds|add r}} |
| 218 | ; CHECK-NOT: add.w r |
| 219 | ; CHECK: bne |
| 220 | define hidden void @testNeon(i8* %ref_data, i32 %ref_stride, i32 %limit, <16 x i8>* nocapture %data) nounwind optsize { |
| 221 | %1 = icmp sgt i32 %limit, 0 |
| 222 | br i1 %1, label %.lr.ph, label %45 |
| 223 | |
| 224 | .lr.ph: ; preds = %0 |
| 225 | %2 = shl nsw i32 %ref_stride, 1 |
| 226 | %3 = mul nsw i32 %ref_stride, 3 |
| 227 | %4 = shl nsw i32 %ref_stride, 2 |
| 228 | %5 = mul nsw i32 %ref_stride, 5 |
| 229 | %6 = mul nsw i32 %ref_stride, 6 |
| 230 | %7 = mul nsw i32 %ref_stride, 7 |
| 231 | %8 = shl nsw i32 %ref_stride, 3 |
| 232 | %9 = sub i32 0, %8 |
| 233 | %10 = mul i32 %limit, -64 |
| 234 | br label %11 |
| 235 | |
| 236 | ; <label>:11 ; preds = %11, %.lr.ph |
| 237 | %.05 = phi i8* [ %ref_data, %.lr.ph ], [ %42, %11 ] |
| 238 | %counter.04 = phi i32 [ 0, %.lr.ph ], [ %44, %11 ] |
| 239 | %result.03 = phi <16 x i8> [ zeroinitializer, %.lr.ph ], [ %41, %11 ] |
| 240 | %.012 = phi <16 x i8>* [ %data, %.lr.ph ], [ %43, %11 ] |
| 241 | %12 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %.05, i32 1) nounwind |
| 242 | %13 = getelementptr inbounds i8* %.05, i32 %ref_stride |
| 243 | %14 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %13, i32 1) nounwind |
| 244 | %15 = shufflevector <1 x i64> %12, <1 x i64> %14, <2 x i32> <i32 0, i32 1> |
| 245 | %16 = bitcast <2 x i64> %15 to <16 x i8> |
| 246 | %17 = getelementptr inbounds <16 x i8>* %.012, i32 1 |
| 247 | store <16 x i8> %16, <16 x i8>* %.012, align 4 |
| 248 | %18 = getelementptr inbounds i8* %.05, i32 %2 |
| 249 | %19 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %18, i32 1) nounwind |
| 250 | %20 = getelementptr inbounds i8* %.05, i32 %3 |
| 251 | %21 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %20, i32 1) nounwind |
| 252 | %22 = shufflevector <1 x i64> %19, <1 x i64> %21, <2 x i32> <i32 0, i32 1> |
| 253 | %23 = bitcast <2 x i64> %22 to <16 x i8> |
| 254 | %24 = getelementptr inbounds <16 x i8>* %.012, i32 2 |
| 255 | store <16 x i8> %23, <16 x i8>* %17, align 4 |
| 256 | %25 = getelementptr inbounds i8* %.05, i32 %4 |
| 257 | %26 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %25, i32 1) nounwind |
| 258 | %27 = getelementptr inbounds i8* %.05, i32 %5 |
| 259 | %28 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %27, i32 1) nounwind |
| 260 | %29 = shufflevector <1 x i64> %26, <1 x i64> %28, <2 x i32> <i32 0, i32 1> |
| 261 | %30 = bitcast <2 x i64> %29 to <16 x i8> |
| 262 | %31 = getelementptr inbounds <16 x i8>* %.012, i32 3 |
| 263 | store <16 x i8> %30, <16 x i8>* %24, align 4 |
| 264 | %32 = getelementptr inbounds i8* %.05, i32 %6 |
| 265 | %33 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %32, i32 1) nounwind |
| 266 | %34 = getelementptr inbounds i8* %.05, i32 %7 |
| 267 | %35 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %34, i32 1) nounwind |
| 268 | %36 = shufflevector <1 x i64> %33, <1 x i64> %35, <2 x i32> <i32 0, i32 1> |
| 269 | %37 = bitcast <2 x i64> %36 to <16 x i8> |
| 270 | store <16 x i8> %37, <16 x i8>* %31, align 4 |
| 271 | %38 = add <16 x i8> %16, %23 |
| 272 | %39 = add <16 x i8> %38, %30 |
| 273 | %40 = add <16 x i8> %39, %37 |
| 274 | %41 = add <16 x i8> %result.03, %40 |
| 275 | %42 = getelementptr i8* %.05, i32 %9 |
| 276 | %43 = getelementptr inbounds <16 x i8>* %.012, i32 -64 |
| 277 | %44 = add nsw i32 %counter.04, 1 |
| 278 | %exitcond = icmp eq i32 %44, %limit |
| 279 | br i1 %exitcond, label %._crit_edge, label %11 |
| 280 | |
| 281 | ._crit_edge: ; preds = %11 |
| 282 | %scevgep = getelementptr <16 x i8>* %data, i32 %10 |
| 283 | br label %45 |
| 284 | |
| 285 | ; <label>:45 ; preds = %._crit_edge, %0 |
| 286 | %result.0.lcssa = phi <16 x i8> [ %41, %._crit_edge ], [ zeroinitializer, %0 ] |
| 287 | %.01.lcssa = phi <16 x i8>* [ %scevgep, %._crit_edge ], [ %data, %0 ] |
| 288 | store <16 x i8> %result.0.lcssa, <16 x i8>* %.01.lcssa, align 4 |
| 289 | ret void |
| 290 | } |
| 291 | |
| 292 | declare <1 x i64> @llvm.arm.neon.vld1.v1i64(i8*, i32) nounwind readonly |