blob: 577dd7223a4d7a9464f805c5d44d337bb2b6e5ad [file] [log] [blame]
Chris Lattner25519dd2009-09-15 18:23:23 +00001; RUN: llc < %s -march=x86-64 -O0 | FileCheck %s --check-prefix=X64
2; RUN: llc < %s -march=x86 -O0 | FileCheck %s --check-prefix=X32
Dan Gohmanc8a1a3c2008-12-08 07:57:47 +00003
4; GEP indices are interpreted as signed integers, so they
5; should be sign-extended to 64 bits on 64-bit targets.
Chris Lattnerdffb6e52009-09-15 18:27:02 +00006; PR3181
Chris Lattner25519dd2009-09-15 18:23:23 +00007define i32 @test1(i32 %t3, i32* %t1) nounwind {
Dan Gohmanc8a1a3c2008-12-08 07:57:47 +00008 %t9 = getelementptr i32* %t1, i32 %t3 ; <i32*> [#uses=1]
9 %t15 = load i32* %t9 ; <i32> [#uses=1]
10 ret i32 %t15
Chris Lattner25519dd2009-09-15 18:23:23 +000011; X32: test1:
Dan Gohmane667e012010-07-16 02:01:19 +000012; X32: movl (%eax,%ecx,4), %eax
Chris Lattner25519dd2009-09-15 18:23:23 +000013; X32: ret
14
15; X64: test1:
16; X64: movslq %edi, %rax
Dan Gohmanf5951412010-07-08 01:00:56 +000017; X64: movl (%rsi,%rax,4), %eax
18; X64: ret
Chris Lattner25519dd2009-09-15 18:23:23 +000019
Dan Gohmanc8a1a3c2008-12-08 07:57:47 +000020}
Chris Lattner25519dd2009-09-15 18:23:23 +000021define i32 @test2(i64 %t3, i32* %t1) nounwind {
Dan Gohmanc8a1a3c2008-12-08 07:57:47 +000022 %t9 = getelementptr i32* %t1, i64 %t3 ; <i32*> [#uses=1]
23 %t15 = load i32* %t9 ; <i32> [#uses=1]
24 ret i32 %t15
Chris Lattner25519dd2009-09-15 18:23:23 +000025; X32: test2:
Dan Gohmane667e012010-07-16 02:01:19 +000026; X32: movl (%edx,%ecx,4), %eax
Chris Lattner25519dd2009-09-15 18:23:23 +000027; X32: ret
28
29; X64: test2:
30; X64: movl (%rsi,%rdi,4), %eax
31; X64: ret
Dan Gohmanc8a1a3c2008-12-08 07:57:47 +000032}
Chris Lattnerdffb6e52009-09-15 18:27:02 +000033
34
35
36; PR4984
37define i8 @test3(i8* %start) nounwind {
38entry:
39 %A = getelementptr i8* %start, i64 -2 ; <i8*> [#uses=1]
40 %B = load i8* %A, align 1 ; <i8> [#uses=1]
41 ret i8 %B
42
43
44; X32: test3:
45; X32: movl 4(%esp), %eax
46; X32: movb -2(%eax), %al
47; X32: ret
48
49; X64: test3:
50; X64: movb -2(%rdi), %al
51; X64: ret
52
53}
Dan Gohman5c87bf62010-07-01 02:27:15 +000054
55define double @test4(i64 %x, double* %p) nounwind {
56entry:
57 %x.addr = alloca i64, align 8 ; <i64*> [#uses=2]
58 %p.addr = alloca double*, align 8 ; <double**> [#uses=2]
59 store i64 %x, i64* %x.addr
60 store double* %p, double** %p.addr
61 %tmp = load i64* %x.addr ; <i64> [#uses=1]
62 %add = add nsw i64 %tmp, 16 ; <i64> [#uses=1]
63 %tmp1 = load double** %p.addr ; <double*> [#uses=1]
64 %arrayidx = getelementptr inbounds double* %tmp1, i64 %add ; <double*> [#uses=1]
65 %tmp2 = load double* %arrayidx ; <double> [#uses=1]
66 ret double %tmp2
67
68; X32: test4:
69; X32: 128(%e{{.*}},%e{{.*}},8)
70; X64: test4:
71; X64: 128(%r{{.*}},%r{{.*}},8)
72}