blob: f0375f86028c1f050a6948d03a97dcf15331845d [file] [log] [blame]
NAKAMURA Takumi64779f42011-02-22 07:20:02 +00001; RUN: llc < %s -mtriple=x86_64-linux -O0 | FileCheck %s --check-prefix=X64
2; RUN: llc < %s -mtriple=x86_64-win32 -O0 | FileCheck %s --check-prefix=X64
Chris Lattner25519dd2009-09-15 18:23:23 +00003; RUN: llc < %s -march=x86 -O0 | FileCheck %s --check-prefix=X32
Dan Gohmanc8a1a3c2008-12-08 07:57:47 +00004
5; GEP indices are interpreted as signed integers, so they
6; should be sign-extended to 64 bits on 64-bit targets.
Chris Lattnerdffb6e52009-09-15 18:27:02 +00007; PR3181
Chris Lattner25519dd2009-09-15 18:23:23 +00008define i32 @test1(i32 %t3, i32* %t1) nounwind {
Dan Gohmanc8a1a3c2008-12-08 07:57:47 +00009 %t9 = getelementptr i32* %t1, i32 %t3 ; <i32*> [#uses=1]
10 %t15 = load i32* %t9 ; <i32> [#uses=1]
11 ret i32 %t15
Chris Lattner25519dd2009-09-15 18:23:23 +000012; X32: test1:
Dan Gohmane667e012010-07-16 02:01:19 +000013; X32: movl (%eax,%ecx,4), %eax
Chris Lattner25519dd2009-09-15 18:23:23 +000014; X32: ret
15
16; X64: test1:
Bill Wendlingd336de32011-04-14 01:46:37 +000017; X64: movslq %e[[A0:di|cx]], %rax
NAKAMURA Takumi64779f42011-02-22 07:20:02 +000018; X64: movl (%r[[A1:si|dx]],%rax,4), %eax
Dan Gohmanf5951412010-07-08 01:00:56 +000019; X64: ret
Chris Lattner25519dd2009-09-15 18:23:23 +000020
Dan Gohmanc8a1a3c2008-12-08 07:57:47 +000021}
Chris Lattner25519dd2009-09-15 18:23:23 +000022define i32 @test2(i64 %t3, i32* %t1) nounwind {
Dan Gohmanc8a1a3c2008-12-08 07:57:47 +000023 %t9 = getelementptr i32* %t1, i64 %t3 ; <i32*> [#uses=1]
24 %t15 = load i32* %t9 ; <i32> [#uses=1]
25 ret i32 %t15
Chris Lattner25519dd2009-09-15 18:23:23 +000026; X32: test2:
Jakob Stoklund Olesen5e5ed442011-06-13 03:26:46 +000027; X32: movl (%edx,%ecx,4), %e
Chris Lattner25519dd2009-09-15 18:23:23 +000028; X32: ret
29
30; X64: test2:
NAKAMURA Takumi64779f42011-02-22 07:20:02 +000031; X64: movl (%r[[A1]],%r[[A0]],4), %eax
Chris Lattner25519dd2009-09-15 18:23:23 +000032; X64: ret
Dan Gohmanc8a1a3c2008-12-08 07:57:47 +000033}
Chris Lattnerdffb6e52009-09-15 18:27:02 +000034
35
36
37; PR4984
38define i8 @test3(i8* %start) nounwind {
39entry:
40 %A = getelementptr i8* %start, i64 -2 ; <i8*> [#uses=1]
41 %B = load i8* %A, align 1 ; <i8> [#uses=1]
42 ret i8 %B
43
44
45; X32: test3:
46; X32: movl 4(%esp), %eax
47; X32: movb -2(%eax), %al
48; X32: ret
49
50; X64: test3:
NAKAMURA Takumi64779f42011-02-22 07:20:02 +000051; X64: movb -2(%r[[A0]]), %al
Chris Lattnerdffb6e52009-09-15 18:27:02 +000052; X64: ret
53
54}
Dan Gohman5c87bf62010-07-01 02:27:15 +000055
56define double @test4(i64 %x, double* %p) nounwind {
57entry:
58 %x.addr = alloca i64, align 8 ; <i64*> [#uses=2]
59 %p.addr = alloca double*, align 8 ; <double**> [#uses=2]
60 store i64 %x, i64* %x.addr
61 store double* %p, double** %p.addr
62 %tmp = load i64* %x.addr ; <i64> [#uses=1]
63 %add = add nsw i64 %tmp, 16 ; <i64> [#uses=1]
64 %tmp1 = load double** %p.addr ; <double*> [#uses=1]
65 %arrayidx = getelementptr inbounds double* %tmp1, i64 %add ; <double*> [#uses=1]
66 %tmp2 = load double* %arrayidx ; <double> [#uses=1]
67 ret double %tmp2
68
69; X32: test4:
70; X32: 128(%e{{.*}},%e{{.*}},8)
71; X64: test4:
72; X64: 128(%r{{.*}},%r{{.*}},8)
73}
Chris Lattnerb99fdee2011-01-16 02:27:38 +000074
75; PR8961 - Make sure the sext for the GEP addressing comes before the load that
76; is folded.
77define i64 @test5(i8* %A, i32 %I, i64 %B) nounwind {
78 %v8 = getelementptr i8* %A, i32 %I
79 %v9 = bitcast i8* %v8 to i64*
80 %v10 = load i64* %v9
81 %v11 = add i64 %B, %v10
82 ret i64 %v11
83; X64: test5:
Bill Wendlingd336de32011-04-14 01:46:37 +000084; X64: movslq %e[[A1]], %rax
Evan Chengc3aa7c52011-11-16 18:44:48 +000085; X64-NEXT: (%r[[A0]],%rax),
86; X64: ret
Chris Lattnerb99fdee2011-01-16 02:27:38 +000087}
88
Dan Gohmanb55d6b62011-03-22 00:04:35 +000089; PR9500, rdar://9156159 - Don't do non-local address mode folding,
90; because it may require values which wouldn't otherwise be live out
91; of their blocks.
92define void @test6() {
93if.end: ; preds = %if.then, %invoke.cont
94 %tmp15 = load i64* undef
95 %dec = add i64 %tmp15, 13
96 store i64 %dec, i64* undef
97 %call17 = invoke i8* @_ZNK18G__FastAllocString4dataEv()
98 to label %invoke.cont16 unwind label %lpad
Chris Lattnerb99fdee2011-01-16 02:27:38 +000099
Dan Gohmanb55d6b62011-03-22 00:04:35 +0000100invoke.cont16: ; preds = %if.then14
101 %arrayidx18 = getelementptr inbounds i8* %call17, i64 %dec
102 store i8 0, i8* %arrayidx18
103 unreachable
104
105lpad: ; preds = %if.end19, %if.then14, %if.end, %entry
Bill Wendling93590312011-08-31 21:39:05 +0000106 %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
107 cleanup
Dan Gohmanb55d6b62011-03-22 00:04:35 +0000108 unreachable
109}
110declare i8* @_ZNK18G__FastAllocString4dataEv() nounwind
Chris Lattnerf4ea68f2011-08-11 06:26:54 +0000111
112
113; PR10605 / rdar://9930964 - Don't fold loads incorrectly. The load should
114; happen before the store.
115define i32 @test7({i32,i32,i32}* %tmp1, i32 %tmp71, i32 %tmp63) nounwind {
116; X64: test7:
117; X64: movl 8({{%rdi|%rcx}}), %eax
Chris Lattner7eba85e2011-08-11 16:15:10 +0000118; X64: movl $4, 8({{%rdi|%rcx}})
Chris Lattnerf4ea68f2011-08-11 06:26:54 +0000119
120
121 %tmp29 = getelementptr inbounds {i32,i32,i32}* %tmp1, i32 0, i32 2
122 %tmp30 = load i32* %tmp29, align 4
123
124 %p2 = getelementptr inbounds {i32,i32,i32}* %tmp1, i32 0, i32 2
125 store i32 4, i32* %p2
126
127 %tmp72 = or i32 %tmp71, %tmp30
128 %tmp73 = icmp ne i32 %tmp63, 32
129 br i1 %tmp73, label %T, label %F
130
131T:
132 ret i32 %tmp72
133
134F:
135 ret i32 4
136}
137
Bill Wendling93590312011-08-31 21:39:05 +0000138declare i32 @__gxx_personality_v0(...)