Tom Stellard | 49f8bfd | 2015-01-06 18:00:21 +0000 | [diff] [blame] | 1 | ; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s |
Matt Arsenault | c3a73c3 | 2014-05-22 03:20:30 +0000 | [diff] [blame] | 2 | |
Matt Arsenault | f7c95e3 | 2014-10-03 23:54:41 +0000 | [diff] [blame] | 3 | declare i32 @llvm.r600.read.tidig.x() nounwind readnone |
| 4 | |
Matt Arsenault | c996175 | 2014-10-03 23:54:56 +0000 | [diff] [blame] | 5 | ; SI-LABEL: {{^}}v_uint_to_fp_i64_to_f64 |
Tom Stellard | 326d6ec | 2014-11-05 14:50:53 +0000 | [diff] [blame] | 6 | ; SI: buffer_load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}} |
Tom Stellard | 654d669 | 2015-01-08 15:08:17 +0000 | [diff] [blame] | 7 | ; SI: v_cvt_f64_u32_e32 [[HI_CONV:v\[[0-9]+:[0-9]+\]]], v[[HI]] |
Tom Stellard | 326d6ec | 2014-11-05 14:50:53 +0000 | [diff] [blame] | 8 | ; SI: v_ldexp_f64 [[LDEXP:v\[[0-9]+:[0-9]+\]]], [[HI_CONV]], 32 |
Tom Stellard | 654d669 | 2015-01-08 15:08:17 +0000 | [diff] [blame] | 9 | ; SI: v_cvt_f64_u32_e32 [[LO_CONV:v\[[0-9]+:[0-9]+\]]], v[[LO]] |
Tom Stellard | 326d6ec | 2014-11-05 14:50:53 +0000 | [diff] [blame] | 10 | ; SI: v_add_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[LDEXP]], [[LO_CONV]] |
| 11 | ; SI: buffer_store_dwordx2 [[RESULT]] |
Matt Arsenault | f7c95e3 | 2014-10-03 23:54:41 +0000 | [diff] [blame] | 12 | define void @v_uint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 addrspace(1)* %in) { |
| 13 | %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone |
David Blaikie | 79e6c74 | 2015-02-27 19:29:02 +0000 | [diff] [blame] | 14 | %gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid |
David Blaikie | a79ac14 | 2015-02-27 21:17:42 +0000 | [diff] [blame] | 15 | %val = load i64, i64 addrspace(1)* %gep, align 8 |
Matt Arsenault | f7c95e3 | 2014-10-03 23:54:41 +0000 | [diff] [blame] | 16 | %result = uitofp i64 %val to double |
| 17 | store double %result, double addrspace(1)* %out |
| 18 | ret void |
| 19 | } |
| 20 | |
Matt Arsenault | 6f1e96b | 2014-12-02 21:02:20 +0000 | [diff] [blame] | 21 | ; SI-LABEL: {{^}}s_uint_to_fp_i64_to_f64 |
| 22 | define void @s_uint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 %in) { |
Matt Arsenault | f7c95e3 | 2014-10-03 23:54:41 +0000 | [diff] [blame] | 23 | %cast = uitofp i64 %in to double |
| 24 | store double %cast, double addrspace(1)* %out, align 8 |
| 25 | ret void |
| 26 | } |
| 27 | |
Matt Arsenault | 6f1e96b | 2014-12-02 21:02:20 +0000 | [diff] [blame] | 28 | ; SI-LABEL: {{^}}s_uint_to_fp_v2i64_to_v2f64 |
| 29 | define void @s_uint_to_fp_v2i64_to_v2f64(<2 x double> addrspace(1)* %out, <2 x i64> %in) { |
Matt Arsenault | f7c95e3 | 2014-10-03 23:54:41 +0000 | [diff] [blame] | 30 | %cast = uitofp <2 x i64> %in to <2 x double> |
| 31 | store <2 x double> %cast, <2 x double> addrspace(1)* %out, align 16 |
| 32 | ret void |
| 33 | } |
| 34 | |
Matt Arsenault | 6f1e96b | 2014-12-02 21:02:20 +0000 | [diff] [blame] | 35 | ; SI-LABEL: {{^}}s_uint_to_fp_v4i64_to_v4f64 |
| 36 | define void @s_uint_to_fp_v4i64_to_v4f64(<4 x double> addrspace(1)* %out, <4 x i64> %in) { |
Matt Arsenault | f7c95e3 | 2014-10-03 23:54:41 +0000 | [diff] [blame] | 37 | %cast = uitofp <4 x i64> %in to <4 x double> |
| 38 | store <4 x double> %cast, <4 x double> addrspace(1)* %out, align 16 |
| 39 | ret void |
| 40 | } |
Matt Arsenault | 6f1e96b | 2014-12-02 21:02:20 +0000 | [diff] [blame] | 41 | |
| 42 | ; SI-LABEL: {{^}}s_uint_to_fp_i32_to_f64 |
| 43 | ; SI: v_cvt_f64_u32_e32 |
| 44 | ; SI: s_endpgm |
| 45 | define void @s_uint_to_fp_i32_to_f64(double addrspace(1)* %out, i32 %in) { |
| 46 | %cast = uitofp i32 %in to double |
| 47 | store double %cast, double addrspace(1)* %out, align 8 |
| 48 | ret void |
| 49 | } |
| 50 | |
| 51 | ; SI-LABEL: {{^}}s_uint_to_fp_v2i32_to_v2f64 |
| 52 | ; SI: v_cvt_f64_u32_e32 |
| 53 | ; SI: v_cvt_f64_u32_e32 |
| 54 | ; SI: s_endpgm |
| 55 | define void @s_uint_to_fp_v2i32_to_v2f64(<2 x double> addrspace(1)* %out, <2 x i32> %in) { |
| 56 | %cast = uitofp <2 x i32> %in to <2 x double> |
| 57 | store <2 x double> %cast, <2 x double> addrspace(1)* %out, align 16 |
| 58 | ret void |
| 59 | } |
| 60 | |
| 61 | ; SI-LABEL: {{^}}s_uint_to_fp_v4i32_to_v4f64 |
| 62 | ; SI: v_cvt_f64_u32_e32 |
| 63 | ; SI: v_cvt_f64_u32_e32 |
| 64 | ; SI: v_cvt_f64_u32_e32 |
| 65 | ; SI: v_cvt_f64_u32_e32 |
| 66 | ; SI: s_endpgm |
| 67 | define void @s_uint_to_fp_v4i32_to_v4f64(<4 x double> addrspace(1)* %out, <4 x i32> %in) { |
| 68 | %cast = uitofp <4 x i32> %in to <4 x double> |
| 69 | store <4 x double> %cast, <4 x double> addrspace(1)* %out, align 16 |
| 70 | ret void |
| 71 | } |
| 72 | |
Matt Arsenault | 11a4d67 | 2015-02-13 19:05:03 +0000 | [diff] [blame] | 73 | ; FIXME: select on 0, 0 |
Matt Arsenault | 6f1e96b | 2014-12-02 21:02:20 +0000 | [diff] [blame] | 74 | ; SI-LABEL: {{^}}uint_to_fp_i1_to_f64: |
Tom Stellard | e48fe2a | 2015-07-14 14:15:03 +0000 | [diff] [blame^] | 75 | ; SI: v_cmp_eq_i32_e64 vcc |
| 76 | ; We can't fold the SGPRs into v_cndmask_b32_e32, because it already |
| 77 | ; uses an SGPR (implicit vcc). |
| 78 | ; SI: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}} |
| 79 | ; SI: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 0, vcc |
Matt Arsenault | 6f1e96b | 2014-12-02 21:02:20 +0000 | [diff] [blame] | 80 | ; SI: buffer_store_dwordx2 |
| 81 | ; SI: s_endpgm |
| 82 | define void @uint_to_fp_i1_to_f64(double addrspace(1)* %out, i32 %in) { |
| 83 | %cmp = icmp eq i32 %in, 0 |
| 84 | %fp = uitofp i1 %cmp to double |
| 85 | store double %fp, double addrspace(1)* %out, align 4 |
| 86 | ret void |
| 87 | } |
| 88 | |
| 89 | ; SI-LABEL: {{^}}uint_to_fp_i1_to_f64_load: |
| 90 | ; SI: v_cndmask_b32_e64 [[IRESULT:v[0-9]]], 0, 1 |
| 91 | ; SI-NEXT: v_cvt_f64_u32_e32 [[RESULT:v\[[0-9]+:[0-9]\]]], [[IRESULT]] |
| 92 | ; SI: buffer_store_dwordx2 [[RESULT]] |
| 93 | ; SI: s_endpgm |
| 94 | define void @uint_to_fp_i1_to_f64_load(double addrspace(1)* %out, i1 %in) { |
| 95 | %fp = uitofp i1 %in to double |
| 96 | store double %fp, double addrspace(1)* %out, align 8 |
| 97 | ret void |
| 98 | } |