NAKAMURA Takumi | 64779f4 | 2011-02-22 07:20:02 +0000 | [diff] [blame^] | 1 | ; RUN: llc < %s -mtriple=x86_64-linux -O0 | FileCheck %s --check-prefix=X64 |
| 2 | ; RUN: llc < %s -mtriple=x86_64-win32 -O0 | FileCheck %s --check-prefix=X64 |
Chris Lattner | 25519dd | 2009-09-15 18:23:23 +0000 | [diff] [blame] | 3 | ; RUN: llc < %s -march=x86 -O0 | FileCheck %s --check-prefix=X32 |
Dan Gohman | c8a1a3c | 2008-12-08 07:57:47 +0000 | [diff] [blame] | 4 | |
| 5 | ; GEP indices are interpreted as signed integers, so they |
| 6 | ; should be sign-extended to 64 bits on 64-bit targets. |
Chris Lattner | dffb6e5 | 2009-09-15 18:27:02 +0000 | [diff] [blame] | 7 | ; PR3181 |
Chris Lattner | 25519dd | 2009-09-15 18:23:23 +0000 | [diff] [blame] | 8 | define i32 @test1(i32 %t3, i32* %t1) nounwind { |
Dan Gohman | c8a1a3c | 2008-12-08 07:57:47 +0000 | [diff] [blame] | 9 | %t9 = getelementptr i32* %t1, i32 %t3 ; <i32*> [#uses=1] |
| 10 | %t15 = load i32* %t9 ; <i32> [#uses=1] |
| 11 | ret i32 %t15 |
Chris Lattner | 25519dd | 2009-09-15 18:23:23 +0000 | [diff] [blame] | 12 | ; X32: test1: |
Dan Gohman | e667e01 | 2010-07-16 02:01:19 +0000 | [diff] [blame] | 13 | ; X32: movl (%eax,%ecx,4), %eax |
Chris Lattner | 25519dd | 2009-09-15 18:23:23 +0000 | [diff] [blame] | 14 | ; X32: ret |
| 15 | |
| 16 | ; X64: test1: |
NAKAMURA Takumi | 64779f4 | 2011-02-22 07:20:02 +0000 | [diff] [blame^] | 17 | ; X64: movslq %e[[A0:di|cx]], %rax |
| 18 | ; X64: movl (%r[[A1:si|dx]],%rax,4), %eax |
Dan Gohman | f595141 | 2010-07-08 01:00:56 +0000 | [diff] [blame] | 19 | ; X64: ret |
Chris Lattner | 25519dd | 2009-09-15 18:23:23 +0000 | [diff] [blame] | 20 | |
Dan Gohman | c8a1a3c | 2008-12-08 07:57:47 +0000 | [diff] [blame] | 21 | } |
Chris Lattner | 25519dd | 2009-09-15 18:23:23 +0000 | [diff] [blame] | 22 | define i32 @test2(i64 %t3, i32* %t1) nounwind { |
Dan Gohman | c8a1a3c | 2008-12-08 07:57:47 +0000 | [diff] [blame] | 23 | %t9 = getelementptr i32* %t1, i64 %t3 ; <i32*> [#uses=1] |
| 24 | %t15 = load i32* %t9 ; <i32> [#uses=1] |
| 25 | ret i32 %t15 |
Chris Lattner | 25519dd | 2009-09-15 18:23:23 +0000 | [diff] [blame] | 26 | ; X32: test2: |
Dan Gohman | e667e01 | 2010-07-16 02:01:19 +0000 | [diff] [blame] | 27 | ; X32: movl (%edx,%ecx,4), %eax |
Chris Lattner | 25519dd | 2009-09-15 18:23:23 +0000 | [diff] [blame] | 28 | ; X32: ret |
| 29 | |
| 30 | ; X64: test2: |
NAKAMURA Takumi | 64779f4 | 2011-02-22 07:20:02 +0000 | [diff] [blame^] | 31 | ; X64: movl (%r[[A1]],%r[[A0]],4), %eax |
Chris Lattner | 25519dd | 2009-09-15 18:23:23 +0000 | [diff] [blame] | 32 | ; X64: ret |
Dan Gohman | c8a1a3c | 2008-12-08 07:57:47 +0000 | [diff] [blame] | 33 | } |
Chris Lattner | dffb6e5 | 2009-09-15 18:27:02 +0000 | [diff] [blame] | 34 | |
| 35 | |
| 36 | |
| 37 | ; PR4984 |
| 38 | define i8 @test3(i8* %start) nounwind { |
| 39 | entry: |
| 40 | %A = getelementptr i8* %start, i64 -2 ; <i8*> [#uses=1] |
| 41 | %B = load i8* %A, align 1 ; <i8> [#uses=1] |
| 42 | ret i8 %B |
| 43 | |
| 44 | |
| 45 | ; X32: test3: |
| 46 | ; X32: movl 4(%esp), %eax |
| 47 | ; X32: movb -2(%eax), %al |
| 48 | ; X32: ret |
| 49 | |
| 50 | ; X64: test3: |
NAKAMURA Takumi | 64779f4 | 2011-02-22 07:20:02 +0000 | [diff] [blame^] | 51 | ; X64: movb -2(%r[[A0]]), %al |
Chris Lattner | dffb6e5 | 2009-09-15 18:27:02 +0000 | [diff] [blame] | 52 | ; X64: ret |
| 53 | |
| 54 | } |
Dan Gohman | 5c87bf6 | 2010-07-01 02:27:15 +0000 | [diff] [blame] | 55 | |
| 56 | define double @test4(i64 %x, double* %p) nounwind { |
| 57 | entry: |
| 58 | %x.addr = alloca i64, align 8 ; <i64*> [#uses=2] |
| 59 | %p.addr = alloca double*, align 8 ; <double**> [#uses=2] |
| 60 | store i64 %x, i64* %x.addr |
| 61 | store double* %p, double** %p.addr |
| 62 | %tmp = load i64* %x.addr ; <i64> [#uses=1] |
| 63 | %add = add nsw i64 %tmp, 16 ; <i64> [#uses=1] |
| 64 | %tmp1 = load double** %p.addr ; <double*> [#uses=1] |
| 65 | %arrayidx = getelementptr inbounds double* %tmp1, i64 %add ; <double*> [#uses=1] |
| 66 | %tmp2 = load double* %arrayidx ; <double> [#uses=1] |
| 67 | ret double %tmp2 |
| 68 | |
| 69 | ; X32: test4: |
| 70 | ; X32: 128(%e{{.*}},%e{{.*}},8) |
| 71 | ; X64: test4: |
| 72 | ; X64: 128(%r{{.*}},%r{{.*}},8) |
| 73 | } |
Chris Lattner | b99fdee | 2011-01-16 02:27:38 +0000 | [diff] [blame] | 74 | |
| 75 | ; PR8961 - Make sure the sext for the GEP addressing comes before the load that |
| 76 | ; is folded. |
| 77 | define i64 @test5(i8* %A, i32 %I, i64 %B) nounwind { |
| 78 | %v8 = getelementptr i8* %A, i32 %I |
| 79 | %v9 = bitcast i8* %v8 to i64* |
| 80 | %v10 = load i64* %v9 |
| 81 | %v11 = add i64 %B, %v10 |
| 82 | ret i64 %v11 |
| 83 | ; X64: test5: |
NAKAMURA Takumi | 64779f4 | 2011-02-22 07:20:02 +0000 | [diff] [blame^] | 84 | ; X64: movslq %e[[A1]], %rax |
| 85 | ; X64-NEXT: movq (%r[[A0]],%rax), %rax |
| 86 | ; X64-NEXT: addq %{{rdx|r8}}, %rax |
Chris Lattner | b99fdee | 2011-01-16 02:27:38 +0000 | [diff] [blame] | 87 | ; X64-NEXT: ret |
| 88 | } |
| 89 | |
| 90 | |