Matt Arsenault | 8728c5f | 2017-08-07 14:58:04 +0000 | [diff] [blame] | 1 | ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tahiti -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s |
| 2 | ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s |
Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 3 | |
| 4 | ; GCN-LABEL: {{^}}uitofp_i16_to_f16 |
| 5 | ; GCN: buffer_load_ushort v[[A_I16:[0-9]+]] |
| 6 | ; SI: v_cvt_f32_u32_e32 v[[A_F32:[0-9]+]], v[[A_I16]] |
Konstantin Zhuravlyov | 3f0cdc7 | 2016-11-17 04:00:46 +0000 | [diff] [blame] | 7 | ; VI: v_cvt_f32_i32_e32 v[[A_F32:[0-9]+]], v[[A_I16]] |
| 8 | ; GCN: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[A_F32]] |
Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 9 | ; GCN: buffer_store_short v[[R_F16]] |
| 10 | ; GCN: s_endpgm |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 11 | define amdgpu_kernel void @uitofp_i16_to_f16( |
Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 12 | half addrspace(1)* %r, |
| 13 | i16 addrspace(1)* %a) { |
| 14 | entry: |
| 15 | %a.val = load i16, i16 addrspace(1)* %a |
| 16 | %r.val = uitofp i16 %a.val to half |
| 17 | store half %r.val, half addrspace(1)* %r |
| 18 | ret void |
| 19 | } |
| 20 | |
| 21 | ; GCN-LABEL: {{^}}uitofp_i32_to_f16 |
| 22 | ; GCN: buffer_load_dword v[[A_I32:[0-9]+]] |
| 23 | ; GCN: v_cvt_f32_u32_e32 v[[A_I16:[0-9]+]], v[[A_I32]] |
| 24 | ; GCN: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[A_I16]] |
| 25 | ; GCN: buffer_store_short v[[R_F16]] |
| 26 | ; GCN: s_endpgm |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 27 | define amdgpu_kernel void @uitofp_i32_to_f16( |
Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 28 | half addrspace(1)* %r, |
| 29 | i32 addrspace(1)* %a) { |
| 30 | entry: |
| 31 | %a.val = load i32, i32 addrspace(1)* %a |
| 32 | %r.val = uitofp i32 %a.val to half |
| 33 | store half %r.val, half addrspace(1)* %r |
| 34 | ret void |
| 35 | } |
| 36 | |
| 37 | ; f16 = uitofp i64 is in uint_to_fp.i64.ll |
| 38 | |
| 39 | ; GCN-LABEL: {{^}}uitofp_v2i16_to_v2f16 |
Konstantin Zhuravlyov | 3f0cdc7 | 2016-11-17 04:00:46 +0000 | [diff] [blame] | 40 | ; GCN: buffer_load_dword |
Sam Kolton | 9fa1696 | 2017-04-06 15:03:28 +0000 | [diff] [blame] | 41 | |
| 42 | ; SI: v_cvt_f32_u32_e32 |
| 43 | ; SI: v_cvt_f32_u32_e32 |
| 44 | ; SI: v_cvt_f16_f32_e32 |
| 45 | ; SI: v_cvt_f16_f32_e32 |
| 46 | ; SI-DAG: v_lshlrev_b32_e32 |
| 47 | ; SI: v_or_b32_e32 |
| 48 | |
| 49 | ; VI-DAG: v_cvt_f16_f32_e32 |
| 50 | ; VI-DAG: v_cvt_f32_i32_sdwa |
| 51 | ; VI-DAG: v_cvt_f32_i32_sdwa |
| 52 | ; VI-DAG: v_cvt_f16_f32_sdwa |
| 53 | ; VI: v_or_b32_e32 |
| 54 | |
| 55 | ; GCN: buffer_store_dword |
| 56 | ; GCN: s_endpgm |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 57 | define amdgpu_kernel void @uitofp_v2i16_to_v2f16( |
Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 58 | <2 x half> addrspace(1)* %r, |
| 59 | <2 x i16> addrspace(1)* %a) { |
| 60 | entry: |
| 61 | %a.val = load <2 x i16>, <2 x i16> addrspace(1)* %a |
| 62 | %r.val = uitofp <2 x i16> %a.val to <2 x half> |
| 63 | store <2 x half> %r.val, <2 x half> addrspace(1)* %r |
| 64 | ret void |
| 65 | } |
| 66 | |
| 67 | ; GCN-LABEL: {{^}}uitofp_v2i32_to_v2f16 |
| 68 | ; GCN: buffer_load_dwordx2 |
Sam Kolton | 9fa1696 | 2017-04-06 15:03:28 +0000 | [diff] [blame] | 69 | |
| 70 | ; SI: v_cvt_f32_u32_e32 |
| 71 | ; SI: v_cvt_f32_u32_e32 |
| 72 | ; SI: v_cvt_f16_f32_e32 |
| 73 | ; SI: v_cvt_f16_f32_e32 |
| 74 | ; SI-DAG: v_lshlrev_b32_e32 |
| 75 | ; SI: v_or_b32_e32 |
| 76 | |
| 77 | ; VI-DAG: v_cvt_f32_u32_e32 |
| 78 | ; VI-DAG: v_cvt_f32_u32_e32 |
| 79 | ; VI-DAG: v_cvt_f16_f32_e32 |
| 80 | ; VI-DAG: v_cvt_f16_f32_sdwa |
| 81 | ; VI: v_or_b32_e32 |
| 82 | |
Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 83 | ; GCN: buffer_store_dword |
| 84 | ; GCN: s_endpgm |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 85 | define amdgpu_kernel void @uitofp_v2i32_to_v2f16( |
Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 86 | <2 x half> addrspace(1)* %r, |
| 87 | <2 x i32> addrspace(1)* %a) { |
| 88 | entry: |
| 89 | %a.val = load <2 x i32>, <2 x i32> addrspace(1)* %a |
| 90 | %r.val = uitofp <2 x i32> %a.val to <2 x half> |
| 91 | store <2 x half> %r.val, <2 x half> addrspace(1)* %r |
| 92 | ret void |
| 93 | } |
| 94 | |
Carl Ritson | 6b8d754 | 2018-09-19 16:32:12 +0000 | [diff] [blame] | 95 | ; FUNC-LABEL: {{^}}s_uint_to_fp_i1_to_f16: |
| 96 | ; GCN-DAG: v_cmp_le_f32_e32 [[CMP0:vcc]], 1.0, {{v[0-9]+}} |
| 97 | ; GCN-DAG: v_cmp_le_f32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]], 0, {{v[0-9]+}} |
| 98 | ; GCN: s_xor_b64 [[R_CMP:s\[[0-9]+:[0-9]+\]]], [[CMP1]], [[CMP0]] |
| 99 | ; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1.0, [[R_CMP]] |
| 100 | ; GCN-NEXT: v_cvt_f16_f32_e32 [[R_F16:v[0-9]+]], [[RESULT]] |
| 101 | ; GCN: buffer_store_short |
| 102 | ; GCN: s_endpgm |
| 103 | define amdgpu_kernel void @s_uint_to_fp_i1_to_f16(half addrspace(1)* %out, float addrspace(1)* %in0, float addrspace(1)* %in1) { |
| 104 | %a = load float, float addrspace(1) * %in0 |
| 105 | %b = load float, float addrspace(1) * %in1 |
| 106 | %acmp = fcmp oge float %a, 0.000000e+00 |
| 107 | %bcmp = fcmp oge float %b, 1.000000e+00 |
| 108 | %result = xor i1 %acmp, %bcmp |
| 109 | %fp = uitofp i1 %result to half |
| 110 | store half %fp, half addrspace(1)* %out |
| 111 | ret void |
| 112 | } |
| 113 | |
Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 114 | ; f16 = uitofp i64 is in uint_to_fp.i64.ll |