blob: 3c29b4d69aef99846f365203f177243ad4ef1411 [file] [log] [blame]
Oliver Stannard51b1d462014-08-21 12:50:31 +00001; RUN: llc < %s -mtriple=thumbv7-none-eabi -mcpu=cortex-m3 | FileCheck %s -check-prefix=CHECK -check-prefix=NONE
Oliver Stannard37e4daa2014-10-01 09:02:17 +00002; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m4 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=SP -check-prefix=VFP4-ALL
3; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m7 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=FP-ARMv8
4; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a8 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=VFP4-ALL -check-prefix=VFP4-DP
Oliver Stannard51b1d462014-08-21 12:50:31 +00005
6define float @add_f(float %a, float %b) {
7entry:
8; CHECK-LABEL: add_f:
9; NONE: bl __aeabi_fadd
10; HARD: vadd.f32 s0, s0, s1
11 %0 = fadd float %a, %b
12 ret float %0
13}
14
15define double @add_d(double %a, double %b) {
16entry:
17; CHECK-LABEL: add_d:
18; NONE: bl __aeabi_dadd
19; SP: bl __aeabi_dadd
20; DP: vadd.f64 d0, d0, d1
21 %0 = fadd double %a, %b
22 ret double %0
23}
24
25define float @sub_f(float %a, float %b) {
26entry:
27; CHECK-LABEL: sub_f:
28; NONE: bl __aeabi_fsub
29; HARD: vsub.f32 s
30 %0 = fsub float %a, %b
31 ret float %0
32}
33
34define double @sub_d(double %a, double %b) {
35entry:
36; CHECK-LABEL: sub_d:
37; NONE: bl __aeabi_dsub
38; SP: bl __aeabi_dsub
39; DP: vsub.f64 d0, d0, d1
40 %0 = fsub double %a, %b
41 ret double %0
42}
43
44define float @mul_f(float %a, float %b) {
45entry:
46; CHECK-LABEL: mul_f:
47; NONE: bl __aeabi_fmul
48; HARD: vmul.f32 s
49 %0 = fmul float %a, %b
50 ret float %0
51}
52
53define double @mul_d(double %a, double %b) {
54entry:
55; CHECK-LABEL: mul_d:
56; NONE: bl __aeabi_dmul
57; SP: bl __aeabi_dmul
58; DP: vmul.f64 d0, d0, d1
59 %0 = fmul double %a, %b
60 ret double %0
61}
62
63define float @div_f(float %a, float %b) {
64entry:
65; CHECK-LABEL: div_f:
66; NONE: bl __aeabi_fdiv
67; HARD: vdiv.f32 s
68 %0 = fdiv float %a, %b
69 ret float %0
70}
71
72define double @div_d(double %a, double %b) {
73entry:
74; CHECK-LABEL: div_d:
75; NONE: bl __aeabi_ddiv
76; SP: bl __aeabi_ddiv
77; DP: vdiv.f64 d0, d0, d1
78 %0 = fdiv double %a, %b
79 ret double %0
80}
81
82define float @rem_f(float %a, float %b) {
83entry:
84; CHECK-LABEL: rem_f:
85; NONE: bl fmodf
Saleem Abdulrasoola7ade332016-09-07 03:17:19 +000086; HARD: b fmodf
Oliver Stannard51b1d462014-08-21 12:50:31 +000087 %0 = frem float %a, %b
88 ret float %0
89}
90
91define double @rem_d(double %a, double %b) {
92entry:
93; CHECK-LABEL: rem_d:
94; NONE: bl fmod
Saleem Abdulrasoola7ade332016-09-07 03:17:19 +000095; HARD: b fmod
Oliver Stannard51b1d462014-08-21 12:50:31 +000096 %0 = frem double %a, %b
97 ret double %0
98}
99
100define float @load_f(float* %a) {
101entry:
102; CHECK-LABEL: load_f:
103; NONE: ldr r0, [r0]
104; HARD: vldr s0, [r0]
David Blaikiea79ac142015-02-27 21:17:42 +0000105 %0 = load float, float* %a, align 4
Oliver Stannard51b1d462014-08-21 12:50:31 +0000106 ret float %0
107}
108
109define double @load_d(double* %a) {
110entry:
111; CHECK-LABEL: load_d:
Matthias Braunba3ecc32015-06-24 20:03:27 +0000112; NONE: ldm r0, {r0, r1}
Oliver Stannard51b1d462014-08-21 12:50:31 +0000113; HARD: vldr d0, [r0]
David Blaikiea79ac142015-02-27 21:17:42 +0000114 %0 = load double, double* %a, align 8
Oliver Stannard51b1d462014-08-21 12:50:31 +0000115 ret double %0
116}
117
118define void @store_f(float* %a, float %b) {
119entry:
120; CHECK-LABEL: store_f:
121; NONE: str r1, [r0]
122; HARD: vstr s0, [r0]
123 store float %b, float* %a, align 4
124 ret void
125}
126
127define void @store_d(double* %a, double %b) {
128entry:
129; CHECK-LABEL: store_d:
Matthias Braun125c9f52015-06-03 16:30:24 +0000130; NONE: strd r2, r3, [r0]
Oliver Stannard51b1d462014-08-21 12:50:31 +0000131; HARD: vstr d0, [r0]
132 store double %b, double* %a, align 8
133 ret void
134}
135
136define double @f_to_d(float %a) {
137; CHECK-LABEL: f_to_d:
138; NONE: bl __aeabi_f2d
139; SP: bl __aeabi_f2d
140; DP: vcvt.f64.f32 d0, s0
141 %1 = fpext float %a to double
142 ret double %1
143}
144
145define float @d_to_f(double %a) {
146; CHECK-LABEL: d_to_f:
147; NONE: bl __aeabi_d2f
148; SP: bl __aeabi_d2f
149; DP: vcvt.f32.f64 s0, d0
150 %1 = fptrunc double %a to float
151 ret float %1
152}
153
154define i32 @f_to_si(float %a) {
155; CHECK-LABEL: f_to_si:
156; NONE: bl __aeabi_f2iz
157; HARD: vcvt.s32.f32 s0, s0
158; HARD: vmov r0, s0
159 %1 = fptosi float %a to i32
160 ret i32 %1
161}
162
163define i32 @d_to_si(double %a) {
164; CHECK-LABEL: d_to_si:
165; NONE: bl __aeabi_d2iz
166; SP: vmov r0, r1, d0
167; SP: bl __aeabi_d2iz
168; DP: vcvt.s32.f64 s0, d0
169; DP: vmov r0, s0
170 %1 = fptosi double %a to i32
171 ret i32 %1
172}
173
174define i32 @f_to_ui(float %a) {
175; CHECK-LABEL: f_to_ui:
176; NONE: bl __aeabi_f2uiz
177; HARD: vcvt.u32.f32 s0, s0
178; HARD: vmov r0, s0
179 %1 = fptoui float %a to i32
180 ret i32 %1
181}
182
183define i32 @d_to_ui(double %a) {
184; CHECK-LABEL: d_to_ui:
185; NONE: bl __aeabi_d2uiz
186; SP: vmov r0, r1, d0
187; SP: bl __aeabi_d2uiz
188; DP: vcvt.u32.f64 s0, d0
189; DP: vmov r0, s0
190 %1 = fptoui double %a to i32
191 ret i32 %1
192}
193
194define float @si_to_f(i32 %a) {
195; CHECK-LABEL: si_to_f:
196; NONE: bl __aeabi_i2f
197; HARD: vcvt.f32.s32 s0, s0
198 %1 = sitofp i32 %a to float
199 ret float %1
200}
201
202define double @si_to_d(i32 %a) {
203; CHECK-LABEL: si_to_d:
204; NONE: bl __aeabi_i2d
205; SP: bl __aeabi_i2d
206; DP: vcvt.f64.s32 d0, s0
207 %1 = sitofp i32 %a to double
208 ret double %1
209}
210
211define float @ui_to_f(i32 %a) {
212; CHECK-LABEL: ui_to_f:
213; NONE: bl __aeabi_ui2f
214; HARD: vcvt.f32.u32 s0, s0
215 %1 = uitofp i32 %a to float
216 ret float %1
217}
218
219define double @ui_to_d(i32 %a) {
220; CHECK-LABEL: ui_to_d:
221; NONE: bl __aeabi_ui2d
222; SP: bl __aeabi_ui2d
223; DP: vcvt.f64.u32 d0, s0
224 %1 = uitofp i32 %a to double
225 ret double %1
226}
227
228define float @bitcast_i_to_f(i32 %a) {
229; CHECK-LABEL: bitcast_i_to_f:
230; NONE-NOT: mov
231; HARD: vmov s0, r0
232 %1 = bitcast i32 %a to float
233 ret float %1
234}
235
236define double @bitcast_i_to_d(i64 %a) {
237; CHECK-LABEL: bitcast_i_to_d:
238; NONE-NOT: mov
239; HARD: vmov d0, r0, r1
240 %1 = bitcast i64 %a to double
241 ret double %1
242}
243
244define i32 @bitcast_f_to_i(float %a) {
245; CHECK-LABEL: bitcast_f_to_i:
246; NONE-NOT: mov
247; HARD: vmov r0, s0
248 %1 = bitcast float %a to i32
249 ret i32 %1
250}
251
252define i64 @bitcast_d_to_i(double %a) {
253; CHECK-LABEL: bitcast_d_to_i:
254; NONE-NOT: mov
255; HARD: vmov r0, r1, d0
256 %1 = bitcast double %a to i64
257 ret i64 %1
258}
259
260define float @select_f(float %a, float %b, i1 %c) {
261; CHECK-LABEL: select_f:
Sjoerd Meijer96e10b52016-12-15 09:38:59 +0000262; NONE: lsls r2, r2, #31
Oliver Stannard51b1d462014-08-21 12:50:31 +0000263; NONE: moveq r0, r1
Sjoerd Meijer96e10b52016-12-15 09:38:59 +0000264; HARD: lsls r0, r0, #31
Oliver Stannard37e4daa2014-10-01 09:02:17 +0000265; VFP4-ALL: vmovne.f32 s1, s0
266; VFP4-ALL: vmov.f32 s0, s1
267; FP-ARMv8: vseleq.f32 s0, s1, s0
Oliver Stannard51b1d462014-08-21 12:50:31 +0000268 %1 = select i1 %c, float %a, float %b
269 ret float %1
270}
271
272define double @select_d(double %a, double %b, i1 %c) {
273; CHECK-LABEL: select_d:
Sjoerd Meijer96e10b52016-12-15 09:38:59 +0000274; NONE: ldr{{(.w)?}} [[REG:r[0-9]+]], [sp]
275; NONE ands [[REG]], [[REG]], #1
Oliver Stannard51b1d462014-08-21 12:50:31 +0000276; NONE: moveq r0, r2
277; NONE: moveq r1, r3
James Molloye7d97362016-11-03 14:08:01 +0000278; SP: ands r0, r0, #1
Oliver Stannard51b1d462014-08-21 12:50:31 +0000279; SP-DAG: vmov [[ALO:r[0-9]+]], [[AHI:r[0-9]+]], d0
280; SP-DAG: vmov [[BLO:r[0-9]+]], [[BHI:r[0-9]+]], d1
281; SP: itt ne
282; SP-DAG: movne [[BLO]], [[ALO]]
283; SP-DAG: movne [[BHI]], [[AHI]]
284; SP: vmov d0, [[BLO]], [[BHI]]
Sjoerd Meijer96e10b52016-12-15 09:38:59 +0000285; DP: lsls r0, r0, #31
Oliver Stannard37e4daa2014-10-01 09:02:17 +0000286; VFP4-DP: vmovne.f64 d1, d0
287; VFP4-DP: vmov.f64 d0, d1
288; FP-ARMV8: vseleq.f64 d0, d1, d0
Oliver Stannard51b1d462014-08-21 12:50:31 +0000289 %1 = select i1 %c, double %a, double %b
290 ret double %1
291}