blob: 39ffcac292219eaad45cad768d6931efc4c8ab6e [file] [log] [blame]
Evan Cheng68132d82011-12-20 18:26:50 +00001; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
2; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
Chad Rosier2364f582012-09-21 00:41:42 +00003; RUN: llc < %s -O0 -arm-strict-align -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM-STRICT-ALIGN
4; RUN: llc < %s -O0 -arm-strict-align -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB-STRICT-ALIGN
Eric Christopher84bdfd82010-07-21 22:26:11 +00005
6; Very basic fast-isel functionality.
Eric Christopherca2ec952010-09-08 19:32:34 +00007define i32 @add(i32 %a, i32 %b) nounwind {
Eric Christopher84bdfd82010-07-21 22:26:11 +00008entry:
9 %a.addr = alloca i32, align 4
10 %b.addr = alloca i32, align 4
11 store i32 %a, i32* %a.addr
12 store i32 %b, i32* %b.addr
13 %tmp = load i32* %a.addr
14 %tmp1 = load i32* %b.addr
15 %add = add nsw i32 %tmp, %tmp1
16 ret i32 %add
Eric Christopher8d46b472011-04-29 20:02:39 +000017}
18
Eli Friedman328bad02011-04-29 22:48:03 +000019; Check truncate to bool
Eric Christopher8d46b472011-04-29 20:02:39 +000020define void @test1(i32 %tmp) nounwind {
21entry:
22%tobool = trunc i32 %tmp to i1
23br i1 %tobool, label %if.then, label %if.end
24
25if.then: ; preds = %entry
26call void @test1(i32 0)
27br label %if.end
28
29if.end: ; preds = %if.then, %entry
30ret void
Eli Friedman328bad02011-04-29 22:48:03 +000031; ARM: test1:
32; ARM: tst r0, #1
33; THUMB: test1:
34; THUMB: tst.w r0, #1
35}
36
37; Check some simple operations with immediates
38define void @test2(i32 %tmp, i32* %ptr) nounwind {
39; THUMB: test2:
40; ARM: test2:
41
42b1:
Eli Friedman4105ed12011-04-29 23:34:52 +000043 %a = add i32 %tmp, 4096
44 store i32 %a, i32* %ptr
Eli Friedman328bad02011-04-29 22:48:03 +000045 br label %b2
46
47; THUMB: add.w {{.*}} #4096
Jim Grosbach581da642011-07-11 16:48:36 +000048; ARM: add {{.*}} #4096
Eli Friedman328bad02011-04-29 22:48:03 +000049
50b2:
Eli Friedman4105ed12011-04-29 23:34:52 +000051 %b = add i32 %tmp, 4095
52 store i32 %b, i32* %ptr
53 br label %b3
54; THUMB: addw {{.*}} #4095
55; ARM: movw {{.*}} #4095
56; ARM: add
57
58b3:
Eli Friedman328bad02011-04-29 22:48:03 +000059 %c = or i32 %tmp, 4
60 store i32 %c, i32* %ptr
61 ret void
62
63; THUMB: orr {{.*}} #4
64; ARM: orr {{.*}} #4
Eric Christopher8d46b472011-04-29 20:02:39 +000065}
Eli Friedmanc7035512011-05-25 23:49:02 +000066
67define void @test3(i32 %tmp, i32* %ptr1, i16* %ptr2, i8* %ptr3) nounwind {
68; THUMB: test3:
69; ARM: test3:
70
71bb1:
72 %a1 = trunc i32 %tmp to i16
73 %a2 = trunc i16 %a1 to i8
74 %a3 = trunc i8 %a2 to i1
75 %a4 = zext i1 %a3 to i8
76 store i8 %a4, i8* %ptr3
77 %a5 = zext i8 %a4 to i16
78 store i16 %a5, i16* %ptr2
79 %a6 = zext i16 %a5 to i32
80 store i32 %a6, i32* %ptr1
81 br label %bb2
82
83; THUMB: and
84; THUMB: strb
85; THUMB: uxtb
86; THUMB: strh
87; THUMB: uxth
88; ARM: and
89; ARM: strb
90; ARM: uxtb
91; ARM: strh
92; ARM: uxth
93
94bb2:
95 %b1 = trunc i32 %tmp to i16
96 %b2 = trunc i16 %b1 to i8
97 store i8 %b2, i8* %ptr3
98 %b3 = sext i8 %b2 to i16
99 store i16 %b3, i16* %ptr2
100 %b4 = sext i16 %b3 to i32
101 store i32 %b4, i32* %ptr1
102 br label %bb3
103
104; THUMB: strb
105; THUMB: sxtb
106; THUMB: strh
107; THUMB: sxth
108; ARM: strb
109; ARM: sxtb
110; ARM: strh
111; ARM: sxth
112
113bb3:
114 %c1 = load i8* %ptr3
115 %c2 = load i16* %ptr2
116 %c3 = load i32* %ptr1
117 %c4 = zext i8 %c1 to i32
118 %c5 = sext i16 %c2 to i32
119 %c6 = add i32 %c4, %c5
120 %c7 = sub i32 %c3, %c6
121 store i32 %c7, i32* %ptr1
122 ret void
123
124; THUMB: ldrb
125; THUMB: ldrh
126; THUMB: uxtb
127; THUMB: sxth
128; THUMB: add
129; THUMB: sub
130; ARM: ldrb
131; ARM: ldrh
132; ARM: uxtb
133; ARM: sxth
134; ARM: add
135; ARM: sub
Eli Friedman86585792011-06-03 01:13:19 +0000136}
137
138; Check loads/stores with globals
139@test4g = external global i32
140
141define void @test4() {
142 %a = load i32* @test4g
143 %b = add i32 %a, 1
144 store i32 %b, i32* @test4g
145 ret void
146
Derek Schuffbd7c6e52013-05-14 16:26:38 +0000147
148; Note that relocations are either movw/movt or constant pool
149; loads. Different platforms will select different approaches.
150
151; THUMB: {{(movw r0, :lower16:L_test4g\$non_lazy_ptr)|(ldr.n r0, .LCPI)}}
152; THUMB: {{(movt r0, :upper16:L_test4g\$non_lazy_ptr)?}}
Eli Friedman86585792011-06-03 01:13:19 +0000153; THUMB: ldr r0, [r0]
Jakob Stoklund Olesen8cdce7e2012-01-07 04:07:22 +0000154; THUMB: ldr r1, [r0]
155; THUMB: adds r1, #1
156; THUMB: str r1, [r0]
Eli Friedman86585792011-06-03 01:13:19 +0000157
Derek Schuffbd7c6e52013-05-14 16:26:38 +0000158; ARM: {{(movw r0, :lower16:L_test4g\$non_lazy_ptr)|(ldr r0, .LCPI)}}
159; ARM: {{(movt r0, :upper16:L_test4g\$non_lazy_ptr)?}}
Eli Friedman86585792011-06-03 01:13:19 +0000160; ARM: ldr r0, [r0]
Jakob Stoklund Olesen8cdce7e2012-01-07 04:07:22 +0000161; ARM: ldr r1, [r0]
162; ARM: add r1, r1, #1
163; ARM: str r1, [r0]
Eli Friedman86585792011-06-03 01:13:19 +0000164}
Chad Rosierec3b77e2011-12-03 02:21:57 +0000165
166; Check unaligned stores
167%struct.anon = type <{ float }>
168
169@a = common global %struct.anon* null, align 4
170
171define void @unaligned_store(float %x, float %y) nounwind {
172entry:
173; ARM: @unaligned_store
174; ARM: vmov r1, s0
175; ARM: str r1, [r0]
176
177; THUMB: @unaligned_store
178; THUMB: vmov r1, s0
179; THUMB: str r1, [r0]
180
181 %add = fadd float %x, %y
182 %0 = load %struct.anon** @a, align 4
183 %x1 = getelementptr inbounds %struct.anon* %0, i32 0, i32 0
184 store float %add, float* %x1, align 1
185 ret void
186}
Chad Rosierc77830d2011-12-06 01:44:17 +0000187
188; Doublewords require only word-alignment.
189; rdar://10528060
190%struct.anon.0 = type { double }
191
192@foo_unpacked = common global %struct.anon.0 zeroinitializer, align 4
193
194define void @test5(double %a, double %b) nounwind {
195entry:
196; ARM: @test5
197; THUMB: @test5
198 %add = fadd double %a, %b
199 store double %add, double* getelementptr inbounds (%struct.anon.0* @foo_unpacked, i32 0, i32 0), align 4
200; ARM: vstr d16, [r0]
201; THUMB: vstr d16, [r0]
202 ret void
203}
204
Chad Rosier563de602011-12-13 19:22:14 +0000205; Check unaligned loads of floats
206%class.TAlignTest = type <{ i16, float }>
207
208define zeroext i1 @test6(%class.TAlignTest* %this) nounwind align 2 {
209entry:
210; ARM: @test6
211; THUMB: @test6
212 %0 = alloca %class.TAlignTest*, align 4
213 store %class.TAlignTest* %this, %class.TAlignTest** %0, align 4
214 %1 = load %class.TAlignTest** %0
215 %2 = getelementptr inbounds %class.TAlignTest* %1, i32 0, i32 1
216 %3 = load float* %2, align 1
217 %4 = fcmp une float %3, 0.000000e+00
218; ARM: ldr r0, [r0, #2]
219; ARM: vmov s0, r0
220; ARM: vcmpe.f32 s0, #0
221; THUMB: ldr.w r0, [r0, #2]
222; THUMB: vmov s0, r0
223; THUMB: vcmpe.f32 s0, #0
224 ret i1 %4
Chad Rosier4020ae72011-12-14 01:34:39 +0000225}
Chad Rosier6a63a742012-03-22 00:21:17 +0000226
227; ARM: @urem_fold
228; THUMB: @urem_fold
229; ARM: and r0, r0, #31
230; THUMB: and r0, r0, #31
231define i32 @urem_fold(i32 %a) nounwind {
232 %rem = urem i32 %a, 32
233 ret i32 %rem
234}
Chad Rosieraa9cb9d2012-05-11 21:33:49 +0000235
236define i32 @test7() noreturn nounwind {
237entry:
238; ARM: @test7
239; THUMB: @test7
240; ARM: trap
241; THUMB: trap
242 tail call void @llvm.trap( )
243 unreachable
244}
245
246declare void @llvm.trap() nounwind
Chad Rosier2364f582012-09-21 00:41:42 +0000247
248define void @unaligned_i16_store(i16 %x, i16* %y) nounwind {
249entry:
250; ARM-STRICT-ALIGN: @unaligned_i16_store
251; ARM-STRICT-ALIGN: strb
Chad Rosier8ff5a4a2012-09-21 00:47:08 +0000252; ARM-STRICT-ALIGN: strb
Chad Rosier2364f582012-09-21 00:41:42 +0000253
254; THUMB-STRICT-ALIGN: @unaligned_i16_store
255; THUMB-STRICT-ALIGN: strb
256; THUMB-STRICT-ALIGN: strb
257
258 store i16 %x, i16* %y, align 1
259 ret void
260}
261
262define i16 @unaligned_i16_load(i16* %x) nounwind {
263entry:
NAKAMURA Takumi1a380042012-09-21 01:15:05 +0000264; ARM-STRICT-ALIGN: @unaligned_i16_load
Chad Rosier2364f582012-09-21 00:41:42 +0000265; ARM-STRICT-ALIGN: ldrb
266; ARM-STRICT-ALIGN: ldrb
267
NAKAMURA Takumi1a380042012-09-21 01:15:05 +0000268; THUMB-STRICT-ALIGN: @unaligned_i16_load
Chad Rosier2364f582012-09-21 00:41:42 +0000269; THUMB-STRICT-ALIGN: ldrb
270; THUMB-STRICT-ALIGN: ldrb
271
272 %0 = load i16* %x, align 1
273 ret i16 %0
Chad Rosier1fb301a2012-09-21 00:43:18 +0000274}
Chad Rosier8bf01fc2012-09-21 16:58:35 +0000275
276define void @unaligned_i32_store(i32 %x, i32* %y) nounwind {
277entry:
278; ARM-STRICT-ALIGN: @unaligned_i32_store
279; ARM-STRICT-ALIGN: strb
280; ARM-STRICT-ALIGN: strb
281; ARM-STRICT-ALIGN: strb
282; ARM-STRICT-ALIGN: strb
283
284; THUMB-STRICT-ALIGN: @unaligned_i32_store
285; THUMB-STRICT-ALIGN: strb
286; THUMB-STRICT-ALIGN: strb
287; THUMB-STRICT-ALIGN: strb
288; THUMB-STRICT-ALIGN: strb
289
290 store i32 %x, i32* %y, align 1
291 ret void
292}
293
294define i32 @unaligned_i32_load(i32* %x) nounwind {
295entry:
296; ARM-STRICT-ALIGN: @unaligned_i32_load
297; ARM-STRICT-ALIGN: ldrb
298; ARM-STRICT-ALIGN: ldrb
299; ARM-STRICT-ALIGN: ldrb
300; ARM-STRICT-ALIGN: ldrb
301
302; THUMB-STRICT-ALIGN: @unaligned_i32_load
303; THUMB-STRICT-ALIGN: ldrb
304; THUMB-STRICT-ALIGN: ldrb
305; THUMB-STRICT-ALIGN: ldrb
306; THUMB-STRICT-ALIGN: ldrb
307
308 %0 = load i32* %x, align 1
309 ret i32 %0
310}