Eli Friedman | e6385e6 | 2012-11-15 22:44:27 +0000 | [diff] [blame] | 1 | ; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s |
| 2 | |
| 3 | ; PR12540: ARM backend lowering of FP_ROUND v2f64 to v2f32. |
| 4 | define <2 x float> @vtrunc(<2 x double> %a) { |
| 5 | ; CHECK: vcvt.f32.f64 [[S0:s[0-9]+]], [[D0:d[0-9]+]] |
| 6 | ; CHECK: vcvt.f32.f64 [[S1:s[0-9]+]], [[D1:d[0-9]+]] |
| 7 | %vt = fptrunc <2 x double> %a to <2 x float> |
| 8 | ret <2 x float> %vt |
| 9 | } |
Eli Friedman | 3083494 | 2012-11-17 01:52:46 +0000 | [diff] [blame] | 10 | |
| 11 | define <2 x double> @vextend(<2 x float> %a) { |
| 12 | ; CHECK: vcvt.f64.f32 [[D0:d[0-9]+]], [[S0:s[0-9]+]] |
| 13 | ; CHECK: vcvt.f64.f32 [[D1:d[0-9]+]], [[S1:s[0-9]+]] |
| 14 | %ve = fpext <2 x float> %a to <2 x double> |
| 15 | ret <2 x double> %ve |
| 16 | } |
| 17 | |
Arnold Schwaighofer | e508344 | 2013-02-19 15:27:05 +0000 | [diff] [blame^] | 18 | ; We used to generate vmovs between scalar and vfp/neon registers. |
| 19 | ; CHECK: vsitofp_double |
| 20 | define void @vsitofp_double(<2 x i32>* %loadaddr, |
| 21 | <2 x double>* %storeaddr) { |
| 22 | %v0 = load <2 x i32>* %loadaddr |
| 23 | ; CHECK: vldr |
| 24 | ; CHECK-NEXT: vcvt.f64.s32 |
| 25 | ; CHECK-NEXT: vcvt.f64.s32 |
| 26 | ; CHECK-NEXT: vst |
| 27 | %r = sitofp <2 x i32> %v0 to <2 x double> |
| 28 | store <2 x double> %r, <2 x double>* %storeaddr |
| 29 | ret void |
| 30 | } |
| 31 | ; CHECK: vuitofp_double |
| 32 | define void @vuitofp_double(<2 x i32>* %loadaddr, |
| 33 | <2 x double>* %storeaddr) { |
| 34 | %v0 = load <2 x i32>* %loadaddr |
| 35 | ; CHECK: vldr |
| 36 | ; CHECK-NEXT: vcvt.f64.u32 |
| 37 | ; CHECK-NEXT: vcvt.f64.u32 |
| 38 | ; CHECK-NEXT: vst |
| 39 | %r = uitofp <2 x i32> %v0 to <2 x double> |
| 40 | store <2 x double> %r, <2 x double>* %storeaddr |
| 41 | ret void |
| 42 | } |