blob: f33c98d7dfc5362fce00f668fe570924df47ad70 [file] [log] [blame]
Evan Chengafff9412011-12-20 18:26:50 +00001; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
2; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
Chad Rosier463fe242011-11-03 02:04:59 +00003
4; Test sitofp
5
6define void @sitofp_single_i32(i32 %a, float %b) nounwind ssp {
7entry:
8; ARM: sitofp_single_i32
9; ARM: vmov s0, r0
10; ARM: vcvt.f32.s32 s0, s0
11; THUMB: sitofp_single_i32
12; THUMB: vmov s0, r0
13; THUMB: vcvt.f32.s32 s0, s0
14 %b.addr = alloca float, align 4
15 %conv = sitofp i32 %a to float
16 store float %conv, float* %b.addr, align 4
17 ret void
18}
19
20define void @sitofp_single_i16(i16 %a, float %b) nounwind ssp {
21entry:
22; ARM: sitofp_single_i16
23; ARM: sxth r0, r0
24; ARM: vmov s0, r0
25; ARM: vcvt.f32.s32 s0, s0
26; THUMB: sitofp_single_i16
27; THUMB: sxth r0, r0
28; THUMB: vmov s0, r0
29; THUMB: vcvt.f32.s32 s0, s0
30 %b.addr = alloca float, align 4
31 %conv = sitofp i16 %a to float
32 store float %conv, float* %b.addr, align 4
33 ret void
34}
35
36define void @sitofp_single_i8(i8 %a) nounwind ssp {
37entry:
38; ARM: sitofp_single_i8
39; ARM: sxtb r0, r0
40; ARM: vmov s0, r0
41; ARM: vcvt.f32.s32 s0, s0
42; THUMB: sitofp_single_i8
43; THUMB: sxtb r0, r0
44; THUMB: vmov s0, r0
45; THUMB: vcvt.f32.s32 s0, s0
46 %b.addr = alloca float, align 4
47 %conv = sitofp i8 %a to float
48 store float %conv, float* %b.addr, align 4
49 ret void
50}
51
52define void @sitofp_double_i32(i32 %a, double %b) nounwind ssp {
53entry:
54; ARM: sitofp_double_i32
55; ARM: vmov s0, r0
56; ARM: vcvt.f64.s32 d16, s0
57; THUMB: sitofp_double_i32
58; THUMB: vmov s0, r0
59; THUMB: vcvt.f64.s32 d16, s0
60 %b.addr = alloca double, align 8
61 %conv = sitofp i32 %a to double
62 store double %conv, double* %b.addr, align 8
63 ret void
64}
65
66define void @sitofp_double_i16(i16 %a, double %b) nounwind ssp {
67entry:
68; ARM: sitofp_double_i16
69; ARM: sxth r0, r0
70; ARM: vmov s0, r0
71; ARM: vcvt.f64.s32 d16, s0
72; THUMB: sitofp_double_i16
73; THUMB: sxth r0, r0
74; THUMB: vmov s0, r0
75; THUMB: vcvt.f64.s32 d16, s0
76 %b.addr = alloca double, align 8
77 %conv = sitofp i16 %a to double
78 store double %conv, double* %b.addr, align 8
79 ret void
80}
81
82define void @sitofp_double_i8(i8 %a, double %b) nounwind ssp {
83entry:
84; ARM: sitofp_double_i8
85; ARM: sxtb r0, r0
86; ARM: vmov s0, r0
87; ARM: vcvt.f64.s32 d16, s0
88; THUMB: sitofp_double_i8
89; THUMB: sxtb r0, r0
90; THUMB: vmov s0, r0
91; THUMB: vcvt.f64.s32 d16, s0
92 %b.addr = alloca double, align 8
93 %conv = sitofp i8 %a to double
94 store double %conv, double* %b.addr, align 8
95 ret void
96}
Chad Rosier36b7beb2012-02-03 19:42:52 +000097
98; Test uitofp
99
100define void @uitofp_single_i32(i32 %a, float %b) nounwind ssp {
101entry:
102; ARM: uitofp_single_i32
103; ARM: vmov s0, r0
104; ARM: vcvt.f32.u32 s0, s0
105; THUMB: uitofp_single_i32
106; THUMB: vmov s0, r0
107; THUMB: vcvt.f32.u32 s0, s0
108 %b.addr = alloca float, align 4
109 %conv = uitofp i32 %a to float
110 store float %conv, float* %b.addr, align 4
111 ret void
112}
113
114define void @uitofp_single_i16(i16 %a, float %b) nounwind ssp {
115entry:
116; ARM: uitofp_single_i16
117; ARM: uxth r0, r0
118; ARM: vmov s0, r0
119; ARM: vcvt.f32.u32 s0, s0
120; THUMB: uitofp_single_i16
121; THUMB: uxth r0, r0
122; THUMB: vmov s0, r0
123; THUMB: vcvt.f32.u32 s0, s0
124 %b.addr = alloca float, align 4
125 %conv = uitofp i16 %a to float
126 store float %conv, float* %b.addr, align 4
127 ret void
128}
129
130define void @uitofp_single_i8(i8 %a) nounwind ssp {
131entry:
132; ARM: uitofp_single_i8
133; ARM: uxtb r0, r0
134; ARM: vmov s0, r0
135; ARM: vcvt.f32.u32 s0, s0
136; THUMB: uitofp_single_i8
137; THUMB: uxtb r0, r0
138; THUMB: vmov s0, r0
139; THUMB: vcvt.f32.u32 s0, s0
140 %b.addr = alloca float, align 4
141 %conv = uitofp i8 %a to float
142 store float %conv, float* %b.addr, align 4
143 ret void
144}
145
146define void @uitofp_double_i32(i32 %a, double %b) nounwind ssp {
147entry:
148; ARM: uitofp_double_i32
149; ARM: vmov s0, r0
150; ARM: vcvt.f64.u32 d16, s0
151; THUMB: uitofp_double_i32
152; THUMB: vmov s0, r0
153; THUMB: vcvt.f64.u32 d16, s0
154 %b.addr = alloca double, align 8
155 %conv = uitofp i32 %a to double
156 store double %conv, double* %b.addr, align 8
157 ret void
158}
159
160define void @uitofp_double_i16(i16 %a, double %b) nounwind ssp {
161entry:
162; ARM: uitofp_double_i16
163; ARM: uxth r0, r0
164; ARM: vmov s0, r0
165; ARM: vcvt.f64.u32 d16, s0
166; THUMB: uitofp_double_i16
167; THUMB: uxth r0, r0
168; THUMB: vmov s0, r0
169; THUMB: vcvt.f64.u32 d16, s0
170 %b.addr = alloca double, align 8
171 %conv = uitofp i16 %a to double
172 store double %conv, double* %b.addr, align 8
173 ret void
174}
175
176define void @uitofp_double_i8(i8 %a, double %b) nounwind ssp {
177entry:
178; ARM: uitofp_double_i8
179; ARM: uxtb r0, r0
180; ARM: vmov s0, r0
181; ARM: vcvt.f64.u32 d16, s0
182; THUMB: uitofp_double_i8
183; THUMB: uxtb r0, r0
184; THUMB: vmov s0, r0
185; THUMB: vcvt.f64.u32 d16, s0
186 %b.addr = alloca double, align 8
187 %conv = uitofp i8 %a to double
188 store double %conv, double* %b.addr, align 8
189 ret void
190}