blob: e2de434c7b07679d87a6488be81dc7e8dc025444 [file] [log] [blame]
Tim Northover00ed9962014-03-29 10:18:08 +00001; RUN: llc < %s -march=arm64 -mcpu=cyclone -enable-misched=false | FileCheck %s
2; RUN: llc < %s -O0 | FileCheck -check-prefix=FAST %s
3target triple = "arm64-apple-darwin"
4
5; rdar://9932559
6define i64 @i8i16callee(i64 %a1, i64 %a2, i64 %a3, i8 signext %a4, i16 signext %a5, i64 %a6, i64 %a7, i64 %a8, i8 signext %b1, i16 signext %b2, i8 signext %b3, i8 signext %b4) nounwind readnone noinline {
7entry:
8; CHECK-LABEL: i8i16callee:
9; The 8th, 9th, 10th and 11th arguments are passed at sp, sp+2, sp+4, sp+5.
10; They are i8, i16, i8 and i8.
11; CHECK: ldrsb {{w[0-9]+}}, [sp, #5]
12; CHECK: ldrsh {{w[0-9]+}}, [sp, #2]
13; CHECK: ldrsb {{w[0-9]+}}, [sp]
14; CHECK: ldrsb {{w[0-9]+}}, [sp, #4]
15; FAST-LABEL: i8i16callee:
16; FAST: ldrb {{w[0-9]+}}, [sp, #5]
17; FAST: ldrb {{w[0-9]+}}, [sp, #4]
18; FAST: ldrh {{w[0-9]+}}, [sp, #2]
19; FAST: ldrb {{w[0-9]+}}, [sp]
20 %conv = sext i8 %a4 to i64
21 %conv3 = sext i16 %a5 to i64
22 %conv8 = sext i8 %b1 to i64
23 %conv9 = sext i16 %b2 to i64
24 %conv11 = sext i8 %b3 to i64
25 %conv13 = sext i8 %b4 to i64
26 %add10 = add i64 %a2, %a1
27 %add12 = add i64 %add10, %a3
28 %add14 = add i64 %add12, %conv
29 %add = add i64 %add14, %conv3
30 %add1 = add i64 %add, %a6
31 %add2 = add i64 %add1, %a7
32 %add4 = add i64 %add2, %a8
33 %add5 = add i64 %add4, %conv8
34 %add6 = add i64 %add5, %conv9
35 %add7 = add i64 %add6, %conv11
36 %add15 = add i64 %add7, %conv13
37 %sext = shl i64 %add15, 32
38 %conv17 = ashr exact i64 %sext, 32
39 ret i64 %conv17
40}
41
42define i32 @i8i16caller() nounwind readnone {
43entry:
44; CHECK: i8i16caller
45; The 8th, 9th, 10th and 11th arguments are passed at sp, sp+2, sp+4, sp+5.
46; They are i8, i16, i8 and i8.
47; CHECK: strb {{w[0-9]+}}, [sp, #5]
48; CHECK: strb {{w[0-9]+}}, [sp, #4]
49; CHECK: strh {{w[0-9]+}}, [sp, #2]
50; CHECK: strb {{w[0-9]+}}, [sp]
51; CHECK: bl
52; FAST: i8i16caller
53; FAST: strb {{w[0-9]+}}, [sp]
54; FAST: strh {{w[0-9]+}}, [sp, #2]
55; FAST: strb {{w[0-9]+}}, [sp, #4]
56; FAST: strb {{w[0-9]+}}, [sp, #5]
57; FAST: bl
58 %call = tail call i64 @i8i16callee(i64 0, i64 1, i64 2, i8 signext 3, i16 signext 4, i64 5, i64 6, i64 7, i8 signext 97, i16 signext 98, i8 signext 99, i8 signext 100)
59 %conv = trunc i64 %call to i32
60 ret i32 %conv
61}
62
63; rdar://12651543
64define double @circle_center([2 x float] %a) nounwind ssp {
65 %call = tail call double @ext([2 x float] %a) nounwind
66; CHECK: circle_center
67; CHECK: bl
68 ret double %call
69}
70declare double @ext([2 x float])
71
72; rdar://12656141
73; 16-byte vector should be aligned at 16-byte when passing on stack.
74; A double argument will be passed on stack, so vecotr should be at sp+16.
75define double @fixed_4i(<4 x i32>* nocapture %in) nounwind {
76entry:
77; CHECK: fixed_4i
78; CHECK: str [[REG_1:q[0-9]+]], [sp, #16]
79; FAST: fixed_4i
Bradley Smith6f1aa592014-04-09 14:43:50 +000080; FAST: sub sp, sp, #64
Tim Northover00ed9962014-03-29 10:18:08 +000081; FAST: mov x[[ADDR:[0-9]+]], sp
82; FAST: str [[REG_1:q[0-9]+]], [x[[ADDR]], #16]
83 %0 = load <4 x i32>* %in, align 16
84 %call = tail call double @args_vec_4i(double 3.000000e+00, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, double 3.000000e+00, <4 x i32> %0, i8 signext 3)
85 ret double %call
86}
87declare double @args_vec_4i(double, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, double, <4 x i32>, i8 signext)
88
89; rdar://12695237
90; d8 at sp, i in register w0.
91@g_d = common global double 0.000000e+00, align 8
92define void @test1(float %f1, double %d1, double %d2, double %d3, double %d4,
93 double %d5, double %d6, double %d7, double %d8, i32 %i) nounwind ssp {
94entry:
95; CHECK: test1
96; CHECK: ldr [[REG_1:d[0-9]+]], [sp]
97; CHECK: scvtf [[REG_2:s[0-9]+]], w0
98; CHECK: fadd s0, [[REG_2]], s0
99 %conv = sitofp i32 %i to float
100 %add = fadd float %conv, %f1
101 %conv1 = fpext float %add to double
102 %add2 = fadd double %conv1, %d7
103 %add3 = fadd double %add2, %d8
104 store double %add3, double* @g_d, align 8
105 ret void
106}
107
108; i9 at sp, d1 in register s0.
109define void @test2(i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6,
110 i32 %i7, i32 %i8, i32 %i9, float %d1) nounwind ssp {
111entry:
112; CHECK: test2
113; CHECK: scvtf [[REG_2:s[0-9]+]], w0
114; CHECK: fadd s0, [[REG_2]], s0
115; CHECK: ldr [[REG_1:s[0-9]+]], [sp]
116 %conv = sitofp i32 %i1 to float
117 %add = fadd float %conv, %d1
118 %conv1 = fpext float %add to double
119 %conv2 = sitofp i32 %i8 to double
120 %add3 = fadd double %conv2, %conv1
121 %conv4 = sitofp i32 %i9 to double
122 %add5 = fadd double %conv4, %add3
123 store double %add5, double* @g_d, align 8
124 ret void
125}
126
127; rdar://12648441
128; Check alignment on stack for v64, f64, i64, f32, i32.
129define double @test3(<2 x i32>* nocapture %in) nounwind {
130entry:
131; CHECK: test3
132; CHECK: str [[REG_1:d[0-9]+]], [sp, #8]
133; FAST: test3
Bradley Smith6f1aa592014-04-09 14:43:50 +0000134; FAST: sub sp, sp, #32
Tim Northover00ed9962014-03-29 10:18:08 +0000135; FAST: mov x[[ADDR:[0-9]+]], sp
136; FAST: str [[REG_1:d[0-9]+]], [x[[ADDR]], #8]
137 %0 = load <2 x i32>* %in, align 8
138 %call = tail call double @args_vec_2i(double 3.000000e+00, <2 x i32> %0,
139 <2 x i32> %0, <2 x i32> %0, <2 x i32> %0, <2 x i32> %0, <2 x i32> %0,
140 <2 x i32> %0, float 3.000000e+00, <2 x i32> %0, i8 signext 3)
141 ret double %call
142}
143declare double @args_vec_2i(double, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>,
144 <2 x i32>, <2 x i32>, <2 x i32>, float, <2 x i32>, i8 signext)
145
146define double @test4(double* nocapture %in) nounwind {
147entry:
148; CHECK: test4
149; CHECK: str [[REG_1:d[0-9]+]], [sp, #8]
150; CHECK: str [[REG_2:w[0-9]+]], [sp]
151; CHECK: orr w0, wzr, #0x3
152 %0 = load double* %in, align 8
153 %call = tail call double @args_f64(double 3.000000e+00, double %0, double %0,
154 double %0, double %0, double %0, double %0, double %0,
155 float 3.000000e+00, double %0, i8 signext 3)
156 ret double %call
157}
158declare double @args_f64(double, double, double, double, double, double, double,
159 double, float, double, i8 signext)
160
161define i64 @test5(i64* nocapture %in) nounwind {
162entry:
163; CHECK: test5
164; CHECK: strb [[REG_3:w[0-9]+]], [sp, #16]
165; CHECK: str [[REG_1:x[0-9]+]], [sp, #8]
166; CHECK: str [[REG_2:w[0-9]+]], [sp]
167 %0 = load i64* %in, align 8
168 %call = tail call i64 @args_i64(i64 3, i64 %0, i64 %0, i64 %0, i64 %0, i64 %0,
169 i64 %0, i64 %0, i32 3, i64 %0, i8 signext 3)
170 ret i64 %call
171}
172declare i64 @args_i64(i64, i64, i64, i64, i64, i64, i64, i64, i32, i64,
173 i8 signext)
174
175define i32 @test6(float* nocapture %in) nounwind {
176entry:
177; CHECK: test6
178; CHECK: strb [[REG_2:w[0-9]+]], [sp, #8]
179; CHECK: str [[REG_1:s[0-9]+]], [sp, #4]
180; CHECK: strh [[REG_3:w[0-9]+]], [sp]
181 %0 = load float* %in, align 4
182 %call = tail call i32 @args_f32(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6,
183 i32 7, i32 8, float 1.0, float 2.0, float 3.0, float 4.0, float 5.0,
184 float 6.0, float 7.0, float 8.0, i16 signext 3, float %0,
185 i8 signext 3)
186 ret i32 %call
187}
188declare i32 @args_f32(i32, i32, i32, i32, i32, i32, i32, i32,
189 float, float, float, float, float, float, float, float,
190 i16 signext, float, i8 signext)
191
192define i32 @test7(i32* nocapture %in) nounwind {
193entry:
194; CHECK: test7
195; CHECK: strb [[REG_2:w[0-9]+]], [sp, #8]
196; CHECK: str [[REG_1:w[0-9]+]], [sp, #4]
197; CHECK: strh [[REG_3:w[0-9]+]], [sp]
198 %0 = load i32* %in, align 4
199 %call = tail call i32 @args_i32(i32 3, i32 %0, i32 %0, i32 %0, i32 %0, i32 %0,
200 i32 %0, i32 %0, i16 signext 3, i32 %0, i8 signext 4)
201 ret i32 %call
202}
203declare i32 @args_i32(i32, i32, i32, i32, i32, i32, i32, i32, i16 signext, i32,
204 i8 signext)
205
206define i32 @test8(i32 %argc, i8** nocapture %argv) nounwind {
207entry:
208; CHECK: test8
209; CHECK: strb {{w[0-9]+}}, [sp, #3]
210; CHECK: strb wzr, [sp, #2]
211; CHECK: strb {{w[0-9]+}}, [sp, #1]
212; CHECK: strb wzr, [sp]
213; CHECK: bl
214; FAST: test8
215; FAST: strb {{w[0-9]+}}, [sp]
216; FAST: strb {{w[0-9]+}}, [sp, #1]
217; FAST: strb {{w[0-9]+}}, [sp, #2]
218; FAST: strb {{w[0-9]+}}, [sp, #3]
219; FAST: bl
220 tail call void @args_i1(i1 zeroext false, i1 zeroext true, i1 zeroext false,
221 i1 zeroext true, i1 zeroext false, i1 zeroext true,
222 i1 zeroext false, i1 zeroext true, i1 zeroext false,
223 i1 zeroext true, i1 zeroext false, i1 zeroext true)
224 ret i32 0
225}
226
227declare void @args_i1(i1 zeroext, i1 zeroext, i1 zeroext, i1 zeroext,
228 i1 zeroext, i1 zeroext, i1 zeroext, i1 zeroext,
229 i1 zeroext, i1 zeroext, i1 zeroext, i1 zeroext)
230
231define i32 @i1_stack_incoming(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f,
232 i64 %g, i64 %h, i64 %i, i1 zeroext %j) {
233; CHECK-LABEL: i1_stack_incoming:
234; CHECK: ldrb w0, [sp, #8]
235; CHECK: ret
236 %v = zext i1 %j to i32
237 ret i32 %v
238}