Evan Cheng | f40deed | 2010-10-27 23:41:30 +0000 | [diff] [blame^] | 1 | ; RUN: llc < %s -mtriple=armv7-apple-darwin -mcpu=cortex-a8 | FileCheck %s -check-prefix=A8 |
| 2 | ; RUN: llc < %s -mtriple=armv7-apple-darwin -mcpu=cortex-a9 | FileCheck %s -check-prefix=A9 |
| 3 | ; rdar://8576755 |
Evan Cheng | 02b985c | 2007-01-19 09:20:23 +0000 | [diff] [blame] | 4 | |
Tanya Lattner | 6263f94 | 2008-02-17 20:02:20 +0000 | [diff] [blame] | 5 | |
| 6 | define i32 @test1(i32 %X, i32 %Y, i8 %sh) { |
Evan Cheng | f40deed | 2010-10-27 23:41:30 +0000 | [diff] [blame^] | 7 | ; A8: test1: |
| 8 | ; A8: add r0, r0, r1, lsl r2 |
| 9 | |
| 10 | ; A9: test1: |
| 11 | ; A9: add r0, r0, r1, lsl r2 |
| 12 | %shift.upgrd.1 = zext i8 %sh to i32 |
| 13 | %A = shl i32 %Y, %shift.upgrd.1 |
| 14 | %B = add i32 %X, %A |
Tanya Lattner | 6263f94 | 2008-02-17 20:02:20 +0000 | [diff] [blame] | 15 | ret i32 %B |
Evan Cheng | 02b985c | 2007-01-19 09:20:23 +0000 | [diff] [blame] | 16 | } |
| 17 | |
Tanya Lattner | 6263f94 | 2008-02-17 20:02:20 +0000 | [diff] [blame] | 18 | define i32 @test2(i32 %X, i32 %Y, i8 %sh) { |
Evan Cheng | f40deed | 2010-10-27 23:41:30 +0000 | [diff] [blame^] | 19 | ; A8: test2: |
| 20 | ; A8: bic r0, r0, r1, asr r2 |
| 21 | |
| 22 | ; A9: test2: |
| 23 | ; A9: bic r0, r0, r1, asr r2 |
| 24 | %shift.upgrd.2 = zext i8 %sh to i32 |
| 25 | %A = ashr i32 %Y, %shift.upgrd.2 |
| 26 | %B = xor i32 %A, -1 |
| 27 | %C = and i32 %X, %B |
Tanya Lattner | 6263f94 | 2008-02-17 20:02:20 +0000 | [diff] [blame] | 28 | ret i32 %C |
Evan Cheng | 02b985c | 2007-01-19 09:20:23 +0000 | [diff] [blame] | 29 | } |
Evan Cheng | f40deed | 2010-10-27 23:41:30 +0000 | [diff] [blame^] | 30 | |
| 31 | define i32 @test3(i32 %base, i32 %base2, i32 %offset) { |
| 32 | entry: |
| 33 | ; A8: test3: |
| 34 | ; A8: ldr r0, [r0, r2, lsl #2] |
| 35 | ; A8: ldr r1, [r1, r2, lsl #2] |
| 36 | |
| 37 | ; lsl #2 is free |
| 38 | ; A9: test3: |
| 39 | ; A9: ldr r1, [r1, r2, lsl #2] |
| 40 | ; A9: ldr r0, [r0, r2, lsl #2] |
| 41 | %tmp1 = shl i32 %offset, 2 |
| 42 | %tmp2 = add i32 %base, %tmp1 |
| 43 | %tmp3 = inttoptr i32 %tmp2 to i32* |
| 44 | %tmp4 = add i32 %base2, %tmp1 |
| 45 | %tmp5 = inttoptr i32 %tmp4 to i32* |
| 46 | %tmp6 = load i32* %tmp3 |
| 47 | %tmp7 = load i32* %tmp5 |
| 48 | %tmp8 = add i32 %tmp7, %tmp6 |
| 49 | ret i32 %tmp8 |
| 50 | } |
| 51 | |
| 52 | declare i8* @malloc(...) |
| 53 | |
| 54 | define fastcc void @test4() nounwind { |
| 55 | entry: |
| 56 | ; A8: test4: |
| 57 | ; A8: ldr r1, [r0, r0, lsl #2] |
| 58 | ; A8: str r1, [r0, r0, lsl #2] |
| 59 | |
| 60 | ; A9: test4: |
| 61 | ; A9: add r0, r0, r0, lsl #2 |
| 62 | ; A9: ldr r1, [r0] |
| 63 | ; A9: str r1, [r0] |
| 64 | %0 = tail call i8* (...)* @malloc(i32 undef) nounwind |
| 65 | %1 = bitcast i8* %0 to i32* |
| 66 | %2 = sext i16 undef to i32 |
| 67 | %3 = getelementptr inbounds i32* %1, i32 %2 |
| 68 | %4 = load i32* %3, align 4 |
| 69 | %5 = add nsw i32 %4, 1 |
| 70 | store i32 %5, i32* %3, align 4 |
| 71 | ret void |
| 72 | } |