blob: 62943aeefbd8af27b76e17d89fbf7bd79280973b [file] [log] [blame]
Matt Arsenault9c47dd52016-02-11 06:02:01 +00001; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
Matt Arsenaultc3a73c32014-05-22 03:20:30 +00002
Matt Arsenault9c47dd52016-02-11 06:02:01 +00003declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
Matt Arsenaultf7c95e32014-10-03 23:54:41 +00004
Matt Arsenaultc9961752014-10-03 23:54:56 +00005; SI-LABEL: {{^}}v_uint_to_fp_i64_to_f64
Tom Stellard326d6ec2014-11-05 14:50:53 +00006; SI: buffer_load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
Matt Arsenaulte8df8792015-08-22 00:50:41 +00007; SI-DAG: v_cvt_f64_u32_e32 [[HI_CONV:v\[[0-9]+:[0-9]+\]]], v[[HI]]
8; SI-DAG: v_cvt_f64_u32_e32 [[LO_CONV:v\[[0-9]+:[0-9]+\]]], v[[LO]]
9; SI-DAG: v_ldexp_f64 [[LDEXP:v\[[0-9]+:[0-9]+\]]], [[HI_CONV]], 32
Tom Stellard326d6ec2014-11-05 14:50:53 +000010; SI: v_add_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[LDEXP]], [[LO_CONV]]
11; SI: buffer_store_dwordx2 [[RESULT]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000012define amdgpu_kernel void @v_uint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 addrspace(1)* %in) {
Matt Arsenault9c47dd52016-02-11 06:02:01 +000013 %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
David Blaikie79e6c742015-02-27 19:29:02 +000014 %gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
David Blaikiea79ac142015-02-27 21:17:42 +000015 %val = load i64, i64 addrspace(1)* %gep, align 8
Matt Arsenaultf7c95e32014-10-03 23:54:41 +000016 %result = uitofp i64 %val to double
17 store double %result, double addrspace(1)* %out
18 ret void
19}
20
Matt Arsenault6f1e96b2014-12-02 21:02:20 +000021; SI-LABEL: {{^}}s_uint_to_fp_i64_to_f64
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000022define amdgpu_kernel void @s_uint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 %in) {
Matt Arsenaultf7c95e32014-10-03 23:54:41 +000023 %cast = uitofp i64 %in to double
24 store double %cast, double addrspace(1)* %out, align 8
25 ret void
26}
27
Matt Arsenault6f1e96b2014-12-02 21:02:20 +000028; SI-LABEL: {{^}}s_uint_to_fp_v2i64_to_v2f64
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000029define amdgpu_kernel void @s_uint_to_fp_v2i64_to_v2f64(<2 x double> addrspace(1)* %out, <2 x i64> %in) {
Matt Arsenaultf7c95e32014-10-03 23:54:41 +000030 %cast = uitofp <2 x i64> %in to <2 x double>
31 store <2 x double> %cast, <2 x double> addrspace(1)* %out, align 16
32 ret void
33}
34
Matt Arsenault6f1e96b2014-12-02 21:02:20 +000035; SI-LABEL: {{^}}s_uint_to_fp_v4i64_to_v4f64
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000036define amdgpu_kernel void @s_uint_to_fp_v4i64_to_v4f64(<4 x double> addrspace(1)* %out, <4 x i64> %in) {
Matt Arsenaultf7c95e32014-10-03 23:54:41 +000037 %cast = uitofp <4 x i64> %in to <4 x double>
38 store <4 x double> %cast, <4 x double> addrspace(1)* %out, align 16
39 ret void
40}
Matt Arsenault6f1e96b2014-12-02 21:02:20 +000041
42; SI-LABEL: {{^}}s_uint_to_fp_i32_to_f64
43; SI: v_cvt_f64_u32_e32
44; SI: s_endpgm
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000045define amdgpu_kernel void @s_uint_to_fp_i32_to_f64(double addrspace(1)* %out, i32 %in) {
Matt Arsenault6f1e96b2014-12-02 21:02:20 +000046 %cast = uitofp i32 %in to double
47 store double %cast, double addrspace(1)* %out, align 8
48 ret void
49}
50
51; SI-LABEL: {{^}}s_uint_to_fp_v2i32_to_v2f64
52; SI: v_cvt_f64_u32_e32
53; SI: v_cvt_f64_u32_e32
54; SI: s_endpgm
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000055define amdgpu_kernel void @s_uint_to_fp_v2i32_to_v2f64(<2 x double> addrspace(1)* %out, <2 x i32> %in) {
Matt Arsenault6f1e96b2014-12-02 21:02:20 +000056 %cast = uitofp <2 x i32> %in to <2 x double>
57 store <2 x double> %cast, <2 x double> addrspace(1)* %out, align 16
58 ret void
59}
60
61; SI-LABEL: {{^}}s_uint_to_fp_v4i32_to_v4f64
62; SI: v_cvt_f64_u32_e32
63; SI: v_cvt_f64_u32_e32
64; SI: v_cvt_f64_u32_e32
65; SI: v_cvt_f64_u32_e32
66; SI: s_endpgm
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000067define amdgpu_kernel void @s_uint_to_fp_v4i32_to_v4f64(<4 x double> addrspace(1)* %out, <4 x i32> %in) {
Matt Arsenault6f1e96b2014-12-02 21:02:20 +000068 %cast = uitofp <4 x i32> %in to <4 x double>
69 store <4 x double> %cast, <4 x double> addrspace(1)* %out, align 16
70 ret void
71}
72
Tom Stellarde48fe2a2015-07-14 14:15:03 +000073; We can't fold the SGPRs into v_cndmask_b32_e32, because it already
74; uses an SGPR (implicit vcc).
Matt Arsenault3d1c1de2016-04-14 21:58:24 +000075
76; SI-LABEL: {{^}}uint_to_fp_i1_to_f64:
Matt Arsenault5d8eb252016-09-30 01:50:20 +000077; SI-DAG: v_cmp_eq_u32_e64 vcc
Matt Arsenault79003342016-04-14 21:58:07 +000078; SI-DAG: v_cndmask_b32_e32 v[[SEL:[0-9]+]], 0, v{{[0-9]+}}
79; SI-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
80; SI: buffer_store_dwordx2 v{{\[}}[[ZERO]]:[[SEL]]{{\]}}
Matt Arsenault6f1e96b2014-12-02 21:02:20 +000081; SI: s_endpgm
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000082define amdgpu_kernel void @uint_to_fp_i1_to_f64(double addrspace(1)* %out, i32 %in) {
Matt Arsenault6f1e96b2014-12-02 21:02:20 +000083 %cmp = icmp eq i32 %in, 0
84 %fp = uitofp i1 %cmp to double
85 store double %fp, double addrspace(1)* %out, align 4
86 ret void
87}
88
89; SI-LABEL: {{^}}uint_to_fp_i1_to_f64_load:
90; SI: v_cndmask_b32_e64 [[IRESULT:v[0-9]]], 0, 1
91; SI-NEXT: v_cvt_f64_u32_e32 [[RESULT:v\[[0-9]+:[0-9]\]]], [[IRESULT]]
92; SI: buffer_store_dwordx2 [[RESULT]]
93; SI: s_endpgm
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000094define amdgpu_kernel void @uint_to_fp_i1_to_f64_load(double addrspace(1)* %out, i1 %in) {
Matt Arsenault6f1e96b2014-12-02 21:02:20 +000095 %fp = uitofp i1 %in to double
96 store double %fp, double addrspace(1)* %out, align 8
97 ret void
98}