blob: 61ac4098a6dd6b55aeded58a356277531e15e606 [file] [log] [blame]
Saleem Abdulrasool72587352014-04-03 16:01:44 +00001; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
Eli Friedmane6385e62012-11-15 22:44:27 +00002
3; PR12540: ARM backend lowering of FP_ROUND v2f64 to v2f32.
4define <2 x float> @vtrunc(<2 x double> %a) {
5; CHECK: vcvt.f32.f64 [[S0:s[0-9]+]], [[D0:d[0-9]+]]
6; CHECK: vcvt.f32.f64 [[S1:s[0-9]+]], [[D1:d[0-9]+]]
7 %vt = fptrunc <2 x double> %a to <2 x float>
8 ret <2 x float> %vt
9}
Eli Friedman30834942012-11-17 01:52:46 +000010
11define <2 x double> @vextend(<2 x float> %a) {
12; CHECK: vcvt.f64.f32 [[D0:d[0-9]+]], [[S0:s[0-9]+]]
13; CHECK: vcvt.f64.f32 [[D1:d[0-9]+]], [[S1:s[0-9]+]]
14 %ve = fpext <2 x float> %a to <2 x double>
15 ret <2 x double> %ve
16}
17
Arnold Schwaighofere5083442013-02-19 15:27:05 +000018; We used to generate vmovs between scalar and vfp/neon registers.
19; CHECK: vsitofp_double
20define void @vsitofp_double(<2 x i32>* %loadaddr,
21 <2 x double>* %storeaddr) {
David Blaikiea79ac142015-02-27 21:17:42 +000022 %v0 = load <2 x i32>, <2 x i32>* %loadaddr
Arnold Schwaighofere5083442013-02-19 15:27:05 +000023; CHECK: vldr
24; CHECK-NEXT: vcvt.f64.s32
25; CHECK-NEXT: vcvt.f64.s32
26; CHECK-NEXT: vst
27 %r = sitofp <2 x i32> %v0 to <2 x double>
28 store <2 x double> %r, <2 x double>* %storeaddr
29 ret void
30}
31; CHECK: vuitofp_double
32define void @vuitofp_double(<2 x i32>* %loadaddr,
33 <2 x double>* %storeaddr) {
David Blaikiea79ac142015-02-27 21:17:42 +000034 %v0 = load <2 x i32>, <2 x i32>* %loadaddr
Arnold Schwaighofere5083442013-02-19 15:27:05 +000035; CHECK: vldr
36; CHECK-NEXT: vcvt.f64.u32
37; CHECK-NEXT: vcvt.f64.u32
38; CHECK-NEXT: vst
39 %r = uitofp <2 x i32> %v0 to <2 x double>
40 store <2 x double> %r, <2 x double>* %storeaddr
41 ret void
42}