blob: 46b5e78fb662649bbedbae0a886af070be0af8a1 [file] [log] [blame]
Mehdi Amini945a6602015-02-27 18:32:11 +00001; RUN: llc < %s -verify-machineinstrs -O0 -fast-isel-abort=1 -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
2; RUN: llc < %s -verify-machineinstrs -O0 -fast-isel-abort=1 -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=ARM
3; RUN: llc < %s -verify-machineinstrs -O0 -fast-isel-abort=1 -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
Chad Rosierbf5f4be2011-11-03 02:04:59 +00004
5; Test sitofp
6
7define void @sitofp_single_i32(i32 %a, float %b) nounwind ssp {
8entry:
9; ARM: sitofp_single_i32
10; ARM: vmov s0, r0
11; ARM: vcvt.f32.s32 s0, s0
12; THUMB: sitofp_single_i32
13; THUMB: vmov s0, r0
14; THUMB: vcvt.f32.s32 s0, s0
15 %b.addr = alloca float, align 4
16 %conv = sitofp i32 %a to float
17 store float %conv, float* %b.addr, align 4
18 ret void
19}
20
21define void @sitofp_single_i16(i16 %a, float %b) nounwind ssp {
22entry:
23; ARM: sitofp_single_i16
24; ARM: sxth r0, r0
25; ARM: vmov s0, r0
26; ARM: vcvt.f32.s32 s0, s0
27; THUMB: sitofp_single_i16
28; THUMB: sxth r0, r0
29; THUMB: vmov s0, r0
30; THUMB: vcvt.f32.s32 s0, s0
31 %b.addr = alloca float, align 4
32 %conv = sitofp i16 %a to float
33 store float %conv, float* %b.addr, align 4
34 ret void
35}
36
37define void @sitofp_single_i8(i8 %a) nounwind ssp {
38entry:
39; ARM: sitofp_single_i8
40; ARM: sxtb r0, r0
41; ARM: vmov s0, r0
42; ARM: vcvt.f32.s32 s0, s0
43; THUMB: sitofp_single_i8
44; THUMB: sxtb r0, r0
45; THUMB: vmov s0, r0
46; THUMB: vcvt.f32.s32 s0, s0
47 %b.addr = alloca float, align 4
48 %conv = sitofp i8 %a to float
49 store float %conv, float* %b.addr, align 4
50 ret void
51}
52
53define void @sitofp_double_i32(i32 %a, double %b) nounwind ssp {
54entry:
55; ARM: sitofp_double_i32
56; ARM: vmov s0, r0
57; ARM: vcvt.f64.s32 d16, s0
58; THUMB: sitofp_double_i32
59; THUMB: vmov s0, r0
60; THUMB: vcvt.f64.s32 d16, s0
61 %b.addr = alloca double, align 8
62 %conv = sitofp i32 %a to double
63 store double %conv, double* %b.addr, align 8
64 ret void
65}
66
67define void @sitofp_double_i16(i16 %a, double %b) nounwind ssp {
68entry:
69; ARM: sitofp_double_i16
70; ARM: sxth r0, r0
71; ARM: vmov s0, r0
72; ARM: vcvt.f64.s32 d16, s0
73; THUMB: sitofp_double_i16
74; THUMB: sxth r0, r0
75; THUMB: vmov s0, r0
76; THUMB: vcvt.f64.s32 d16, s0
77 %b.addr = alloca double, align 8
78 %conv = sitofp i16 %a to double
79 store double %conv, double* %b.addr, align 8
80 ret void
81}
82
83define void @sitofp_double_i8(i8 %a, double %b) nounwind ssp {
84entry:
85; ARM: sitofp_double_i8
86; ARM: sxtb r0, r0
87; ARM: vmov s0, r0
88; ARM: vcvt.f64.s32 d16, s0
89; THUMB: sitofp_double_i8
90; THUMB: sxtb r0, r0
91; THUMB: vmov s0, r0
92; THUMB: vcvt.f64.s32 d16, s0
93 %b.addr = alloca double, align 8
94 %conv = sitofp i8 %a to double
95 store double %conv, double* %b.addr, align 8
96 ret void
97}
Chad Rosiera8a8ac52012-02-03 19:42:52 +000098
99; Test uitofp
100
101define void @uitofp_single_i32(i32 %a, float %b) nounwind ssp {
102entry:
103; ARM: uitofp_single_i32
104; ARM: vmov s0, r0
105; ARM: vcvt.f32.u32 s0, s0
106; THUMB: uitofp_single_i32
107; THUMB: vmov s0, r0
108; THUMB: vcvt.f32.u32 s0, s0
109 %b.addr = alloca float, align 4
110 %conv = uitofp i32 %a to float
111 store float %conv, float* %b.addr, align 4
112 ret void
113}
114
115define void @uitofp_single_i16(i16 %a, float %b) nounwind ssp {
116entry:
117; ARM: uitofp_single_i16
118; ARM: uxth r0, r0
119; ARM: vmov s0, r0
120; ARM: vcvt.f32.u32 s0, s0
121; THUMB: uitofp_single_i16
122; THUMB: uxth r0, r0
123; THUMB: vmov s0, r0
124; THUMB: vcvt.f32.u32 s0, s0
125 %b.addr = alloca float, align 4
126 %conv = uitofp i16 %a to float
127 store float %conv, float* %b.addr, align 4
128 ret void
129}
130
131define void @uitofp_single_i8(i8 %a) nounwind ssp {
132entry:
133; ARM: uitofp_single_i8
JF Bastien06ce03d2013-06-07 20:10:37 +0000134; ARM: and r0, r0, #255
Chad Rosiera8a8ac52012-02-03 19:42:52 +0000135; ARM: vmov s0, r0
136; ARM: vcvt.f32.u32 s0, s0
137; THUMB: uitofp_single_i8
JF Bastien06ce03d2013-06-07 20:10:37 +0000138; THUMB: and r0, r0, #255
Chad Rosiera8a8ac52012-02-03 19:42:52 +0000139; THUMB: vmov s0, r0
140; THUMB: vcvt.f32.u32 s0, s0
141 %b.addr = alloca float, align 4
142 %conv = uitofp i8 %a to float
143 store float %conv, float* %b.addr, align 4
144 ret void
145}
146
147define void @uitofp_double_i32(i32 %a, double %b) nounwind ssp {
148entry:
149; ARM: uitofp_double_i32
150; ARM: vmov s0, r0
151; ARM: vcvt.f64.u32 d16, s0
152; THUMB: uitofp_double_i32
153; THUMB: vmov s0, r0
154; THUMB: vcvt.f64.u32 d16, s0
155 %b.addr = alloca double, align 8
156 %conv = uitofp i32 %a to double
157 store double %conv, double* %b.addr, align 8
158 ret void
159}
160
161define void @uitofp_double_i16(i16 %a, double %b) nounwind ssp {
162entry:
163; ARM: uitofp_double_i16
164; ARM: uxth r0, r0
165; ARM: vmov s0, r0
166; ARM: vcvt.f64.u32 d16, s0
167; THUMB: uitofp_double_i16
168; THUMB: uxth r0, r0
169; THUMB: vmov s0, r0
170; THUMB: vcvt.f64.u32 d16, s0
171 %b.addr = alloca double, align 8
172 %conv = uitofp i16 %a to double
173 store double %conv, double* %b.addr, align 8
174 ret void
175}
176
177define void @uitofp_double_i8(i8 %a, double %b) nounwind ssp {
178entry:
179; ARM: uitofp_double_i8
JF Bastien06ce03d2013-06-07 20:10:37 +0000180; ARM: and r0, r0, #255
Chad Rosiera8a8ac52012-02-03 19:42:52 +0000181; ARM: vmov s0, r0
182; ARM: vcvt.f64.u32 d16, s0
183; THUMB: uitofp_double_i8
JF Bastien06ce03d2013-06-07 20:10:37 +0000184; THUMB: and r0, r0, #255
Chad Rosiera8a8ac52012-02-03 19:42:52 +0000185; THUMB: vmov s0, r0
186; THUMB: vcvt.f64.u32 d16, s0
187 %b.addr = alloca double, align 8
188 %conv = uitofp i8 %a to double
189 store double %conv, double* %b.addr, align 8
190 ret void
191}
Chad Rosier41f0e782012-02-03 20:27:51 +0000192
193; Test fptosi
194
195define void @fptosi_float(float %a) nounwind ssp {
196entry:
197; ARM: fptosi_float
198; ARM: vcvt.s32.f32 s0, s0
199; THUMB: fptosi_float
200; THUMB: vcvt.s32.f32 s0, s0
201 %b.addr = alloca i32, align 4
202 %conv = fptosi float %a to i32
203 store i32 %conv, i32* %b.addr, align 4
204 ret void
205}
206
207define void @fptosi_double(double %a) nounwind ssp {
208entry:
209; ARM: fptosi_double
210; ARM: vcvt.s32.f64 s0, d16
211; THUMB: fptosi_double
212; THUMB: vcvt.s32.f64 s0, d16
213 %b.addr = alloca i32, align 8
214 %conv = fptosi double %a to i32
215 store i32 %conv, i32* %b.addr, align 8
216 ret void
217}
218
219; Test fptoui
220
221define void @fptoui_float(float %a) nounwind ssp {
222entry:
223; ARM: fptoui_float
224; ARM: vcvt.u32.f32 s0, s0
225; THUMB: fptoui_float
226; THUMB: vcvt.u32.f32 s0, s0
227 %b.addr = alloca i32, align 4
228 %conv = fptoui float %a to i32
229 store i32 %conv, i32* %b.addr, align 4
230 ret void
231}
232
233define void @fptoui_double(double %a) nounwind ssp {
234entry:
235; ARM: fptoui_double
236; ARM: vcvt.u32.f64 s0, d16
237; THUMB: fptoui_double
238; THUMB: vcvt.u32.f64 s0, d16
239 %b.addr = alloca i32, align 8
240 %conv = fptoui double %a to i32
241 store i32 %conv, i32* %b.addr, align 8
242 ret void
243}