blob: bddf700d0e86aff6a3721991850a7460c819a8b9 [file] [log] [blame]
Matt Arsenaultc3a73c32014-05-22 03:20:30 +00001; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
2
Matt Arsenaultf7c95e32014-10-03 23:54:41 +00003declare i32 @llvm.r600.read.tidig.x() nounwind readnone
4
Matt Arsenaultc9961752014-10-03 23:54:56 +00005; SI-LABEL: {{^}}uint_to_fp_f64_i32
Tom Stellard326d6ec2014-11-05 14:50:53 +00006; SI: v_cvt_f64_u32_e32
7; SI: s_endpgm
Matt Arsenaultc3a73c32014-05-22 03:20:30 +00008define void @uint_to_fp_f64_i32(double addrspace(1)* %out, i32 %in) {
9 %cast = uitofp i32 %in to double
10 store double %cast, double addrspace(1)* %out, align 8
11 ret void
12}
Matt Arsenaultaeca2fa2014-05-31 06:47:42 +000013
Tom Stellard79243d92014-10-01 17:15:17 +000014; SI-LABEL: {{^}}uint_to_fp_i1_f64:
Tom Stellard326d6ec2014-11-05 14:50:53 +000015; SI: v_cmp_eq_i32_e64 [[CMP:s\[[0-9]+:[0-9]\]]],
Tom Stellard3ca1bfc2014-06-10 16:01:22 +000016; FIXME: We should the VGPR sources for V_CNDMASK are copied from SGPRs,
17; we should be able to fold the SGPRs into the V_CNDMASK instructions.
Tom Stellard326d6ec2014-11-05 14:50:53 +000018; SI: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CMP]]
19; SI: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CMP]]
20; SI: buffer_store_dwordx2
21; SI: s_endpgm
Matt Arsenaultaeca2fa2014-05-31 06:47:42 +000022define void @uint_to_fp_i1_f64(double addrspace(1)* %out, i32 %in) {
23 %cmp = icmp eq i32 %in, 0
24 %fp = uitofp i1 %cmp to double
25 store double %fp, double addrspace(1)* %out, align 4
26 ret void
27}
28
Tom Stellard79243d92014-10-01 17:15:17 +000029; SI-LABEL: {{^}}uint_to_fp_i1_f64_load:
Tom Stellard326d6ec2014-11-05 14:50:53 +000030; SI: v_cndmask_b32_e64 [[IRESULT:v[0-9]]], 0, 1
31; SI-NEXT: v_cvt_f64_u32_e32 [[RESULT:v\[[0-9]+:[0-9]\]]], [[IRESULT]]
32; SI: buffer_store_dwordx2 [[RESULT]]
33; SI: s_endpgm
Matt Arsenaultaeca2fa2014-05-31 06:47:42 +000034define void @uint_to_fp_i1_f64_load(double addrspace(1)* %out, i1 %in) {
35 %fp = uitofp i1 %in to double
36 store double %fp, double addrspace(1)* %out, align 8
37 ret void
38}
Matt Arsenaultf7c95e32014-10-03 23:54:41 +000039
Matt Arsenaultc9961752014-10-03 23:54:56 +000040; SI-LABEL: {{^}}v_uint_to_fp_i64_to_f64
Tom Stellard326d6ec2014-11-05 14:50:53 +000041; SI: buffer_load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
42; SI-DAG: v_cvt_f64_u32_e32 [[LO_CONV:v\[[0-9]+:[0-9]+\]]], v[[LO]]
43; SI-DAG: v_cvt_f64_u32_e32 [[HI_CONV:v\[[0-9]+:[0-9]+\]]], v[[HI]]
44; SI: v_ldexp_f64 [[LDEXP:v\[[0-9]+:[0-9]+\]]], [[HI_CONV]], 32
45; SI: v_add_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[LDEXP]], [[LO_CONV]]
46; SI: buffer_store_dwordx2 [[RESULT]]
Matt Arsenaultf7c95e32014-10-03 23:54:41 +000047define void @v_uint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 addrspace(1)* %in) {
48 %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
49 %gep = getelementptr i64 addrspace(1)* %in, i32 %tid
50 %val = load i64 addrspace(1)* %gep, align 8
51 %result = uitofp i64 %val to double
52 store double %result, double addrspace(1)* %out
53 ret void
54}
55
Matt Arsenaultc9961752014-10-03 23:54:56 +000056; SI-LABEL: {{^}}s_uint_to_fp_f64_i64
Matt Arsenaultf7c95e32014-10-03 23:54:41 +000057define void @s_uint_to_fp_f64_i64(double addrspace(1)* %out, i64 %in) {
58 %cast = uitofp i64 %in to double
59 store double %cast, double addrspace(1)* %out, align 8
60 ret void
61}
62
Matt Arsenaultc9961752014-10-03 23:54:56 +000063; SI-LABEL: {{^}}s_uint_to_fp_v2f64_v2i64
Matt Arsenaultf7c95e32014-10-03 23:54:41 +000064define void @s_uint_to_fp_v2f64_v2i64(<2 x double> addrspace(1)* %out, <2 x i64> %in) {
65 %cast = uitofp <2 x i64> %in to <2 x double>
66 store <2 x double> %cast, <2 x double> addrspace(1)* %out, align 16
67 ret void
68}
69
Matt Arsenaultc9961752014-10-03 23:54:56 +000070; SI-LABEL: {{^}}s_uint_to_fp_v4f64_v4i64
Matt Arsenaultf7c95e32014-10-03 23:54:41 +000071define void @s_uint_to_fp_v4f64_v4i64(<4 x double> addrspace(1)* %out, <4 x i64> %in) {
72 %cast = uitofp <4 x i64> %in to <4 x double>
73 store <4 x double> %cast, <4 x double> addrspace(1)* %out, align 16
74 ret void
75}