Matt Arsenault | fcc213f | 2017-09-20 03:20:09 +0000 | [diff] [blame] | 1 | ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=kaveri -mtriple=amdgcn--amdhsa -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CI,CIVI %s |
| 2 | ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mtriple=amdgcn--amdhsa -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI,CIVI,GFX89 %s |
Konstantin Zhuravlyov | c40d9f2 | 2017-12-08 20:52:28 +0000 | [diff] [blame] | 3 | ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx900 -mtriple=amdgcn--amdhsa -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX89,GFX9 %s |
Matt Arsenault | c79dc70 | 2016-11-15 02:25:28 +0000 | [diff] [blame] | 4 | |
| 5 | ; FIXME: Should be able to do scalar op |
Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 6 | ; GCN-LABEL: {{^}}s_fneg_f16: |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 7 | define amdgpu_kernel void @s_fneg_f16(half addrspace(1)* %out, half %in) #0 { |
Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 8 | %fneg = fsub half -0.0, %in |
Matt Arsenault | c79dc70 | 2016-11-15 02:25:28 +0000 | [diff] [blame] | 9 | store half %fneg, half addrspace(1)* %out |
| 10 | ret void |
| 11 | } |
| 12 | |
| 13 | ; FIXME: Should be able to use bit operations when illegal type as |
| 14 | ; well. |
| 15 | |
Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 16 | ; GCN-LABEL: {{^}}v_fneg_f16: |
Matt Arsenault | 4e309b0 | 2017-07-29 01:03:53 +0000 | [diff] [blame] | 17 | ; GCN: {{flat|global}}_load_ushort [[VAL:v[0-9]+]], |
Matt Arsenault | 9dba9bd | 2017-02-02 02:27:04 +0000 | [diff] [blame] | 18 | ; GCN: v_xor_b32_e32 [[XOR:v[0-9]+]], 0x8000, [[VAL]] |
Matt Arsenault | c79dc70 | 2016-11-15 02:25:28 +0000 | [diff] [blame] | 19 | ; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[XOR]] |
Matt Arsenault | 9dba9bd | 2017-02-02 02:27:04 +0000 | [diff] [blame] | 20 | ; SI: buffer_store_short [[XOR]] |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 21 | define amdgpu_kernel void @v_fneg_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 { |
Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 22 | %tid = call i32 @llvm.amdgcn.workitem.id.x() |
| 23 | %gep.in = getelementptr inbounds half, half addrspace(1)* %in, i32 %tid |
| 24 | %gep.out = getelementptr inbounds half, half addrspace(1)* %in, i32 %tid |
| 25 | %val = load half, half addrspace(1)* %gep.in, align 2 |
| 26 | %fneg = fsub half -0.0, %val |
| 27 | store half %fneg, half addrspace(1)* %gep.out |
Matt Arsenault | c79dc70 | 2016-11-15 02:25:28 +0000 | [diff] [blame] | 28 | ret void |
| 29 | } |
| 30 | |
Matt Arsenault | 90083d3 | 2018-06-07 09:54:49 +0000 | [diff] [blame] | 31 | ; GCN-LABEL: {{^}}s_fneg_free_f16: |
| 32 | ; GCN: s_load_dword [[NEG_VALUE:s[0-9]+]], |
Matt Arsenault | 697300b | 2018-06-07 10:15:20 +0000 | [diff] [blame] | 33 | ; GCN: s_xor_b32 [[XOR:s[0-9]+]], [[NEG_VALUE]], 0x8000{{$}} |
| 34 | ; GCN: v_mov_b32_e32 [[V_XOR:v[0-9]+]], [[XOR]] |
| 35 | ; GCN: {{flat|global}}_store_short v{{\[[0-9]+:[0-9]+\]}}, [[V_XOR]] |
Matt Arsenault | 90083d3 | 2018-06-07 09:54:49 +0000 | [diff] [blame] | 36 | define amdgpu_kernel void @s_fneg_free_f16(half addrspace(1)* %out, i16 %in) #0 { |
Matt Arsenault | c79dc70 | 2016-11-15 02:25:28 +0000 | [diff] [blame] | 37 | %bc = bitcast i16 %in to half |
| 38 | %fsub = fsub half -0.0, %bc |
| 39 | store half %fsub, half addrspace(1)* %out |
| 40 | ret void |
| 41 | } |
| 42 | |
Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 43 | ; GCN-LABEL: {{^}}v_fneg_fold_f16: |
Matt Arsenault | 4e309b0 | 2017-07-29 01:03:53 +0000 | [diff] [blame] | 44 | ; GCN: {{flat|global}}_load_ushort [[NEG_VALUE:v[0-9]+]] |
Matt Arsenault | c79dc70 | 2016-11-15 02:25:28 +0000 | [diff] [blame] | 45 | |
Matt Arsenault | 9dba9bd | 2017-02-02 02:27:04 +0000 | [diff] [blame] | 46 | ; CI-DAG: v_cvt_f32_f16_e32 [[CVT_VAL:v[0-9]+]], [[NEG_VALUE]] |
| 47 | ; CI-DAG: v_cvt_f32_f16_e64 [[NEG_CVT0:v[0-9]+]], -[[NEG_VALUE]] |
Matt Arsenault | 6c29c5a | 2017-07-10 19:53:57 +0000 | [diff] [blame] | 48 | ; CI: v_mul_f32_e32 [[MUL:v[0-9]+]], [[NEG_CVT0]], [[CVT_VAL]] |
Matt Arsenault | c79dc70 | 2016-11-15 02:25:28 +0000 | [diff] [blame] | 49 | ; CI: v_cvt_f16_f32_e32 [[CVT1:v[0-9]+]], [[MUL]] |
| 50 | ; CI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[CVT1]] |
| 51 | |
| 52 | ; VI-NOT: [[NEG_VALUE]] |
| 53 | ; VI: v_mul_f16_e64 v{{[0-9]+}}, -[[NEG_VALUE]], [[NEG_VALUE]] |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 54 | define amdgpu_kernel void @v_fneg_fold_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 { |
Matt Arsenault | c79dc70 | 2016-11-15 02:25:28 +0000 | [diff] [blame] | 55 | %val = load half, half addrspace(1)* %in |
| 56 | %fsub = fsub half -0.0, %val |
| 57 | %fmul = fmul half %fsub, %val |
| 58 | store half %fmul, half addrspace(1)* %out |
| 59 | ret void |
| 60 | } |
Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 61 | |
Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 62 | ; GCN-LABEL: {{^}}s_fneg_v2f16: |
Matt Arsenault | 697300b | 2018-06-07 10:15:20 +0000 | [diff] [blame] | 63 | ; GCN: s_xor_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80008000 |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 64 | define amdgpu_kernel void @s_fneg_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %in) #0 { |
Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 65 | %fneg = fsub <2 x half> <half -0.0, half -0.0>, %in |
| 66 | store <2 x half> %fneg, <2 x half> addrspace(1)* %out |
| 67 | ret void |
| 68 | } |
| 69 | |
Matt Arsenault | e9524f1 | 2018-06-06 21:28:11 +0000 | [diff] [blame] | 70 | ; GCN-LABEL: {{^}}s_fneg_v2f16_nonload: |
Matt Arsenault | 697300b | 2018-06-07 10:15:20 +0000 | [diff] [blame] | 71 | ; GCN: s_xor_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80008000 |
Matt Arsenault | e9524f1 | 2018-06-06 21:28:11 +0000 | [diff] [blame] | 72 | define amdgpu_kernel void @s_fneg_v2f16_nonload(<2 x half> addrspace(1)* %out) #0 { |
| 73 | %in = call i32 asm sideeffect "; def $0", "=s"() |
| 74 | %in.bc = bitcast i32 %in to <2 x half> |
| 75 | %fneg = fsub <2 x half> <half -0.0, half -0.0>, %in.bc |
| 76 | store <2 x half> %fneg, <2 x half> addrspace(1)* %out |
| 77 | ret void |
| 78 | } |
| 79 | |
Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 80 | ; GCN-LABEL: {{^}}v_fneg_v2f16: |
Matt Arsenault | 4e309b0 | 2017-07-29 01:03:53 +0000 | [diff] [blame] | 81 | ; GCN: {{flat|global}}_load_dword [[VAL:v[0-9]+]] |
Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 82 | ; GCN: v_xor_b32_e32 v{{[0-9]+}}, 0x80008000, [[VAL]] |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 83 | define amdgpu_kernel void @v_fneg_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 { |
Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 84 | %tid = call i32 @llvm.amdgcn.workitem.id.x() |
| 85 | %gep.in = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %in, i32 %tid |
| 86 | %gep.out = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %in, i32 %tid |
| 87 | %val = load <2 x half>, <2 x half> addrspace(1)* %gep.in, align 2 |
| 88 | %fneg = fsub <2 x half> <half -0.0, half -0.0>, %val |
| 89 | store <2 x half> %fneg, <2 x half> addrspace(1)* %gep.out |
| 90 | ret void |
| 91 | } |
| 92 | |
| 93 | ; GCN-LABEL: {{^}}fneg_free_v2f16: |
| 94 | ; GCN: s_load_dword [[VAL:s[0-9]+]] |
Matt Arsenault | 697300b | 2018-06-07 10:15:20 +0000 | [diff] [blame] | 95 | ; GCN: s_xor_b32 s{{[0-9]+}}, [[VAL]], 0x80008000 |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 96 | define amdgpu_kernel void @fneg_free_v2f16(<2 x half> addrspace(1)* %out, i32 %in) #0 { |
Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 97 | %bc = bitcast i32 %in to <2 x half> |
| 98 | %fsub = fsub <2 x half> <half -0.0, half -0.0>, %bc |
| 99 | store <2 x half> %fsub, <2 x half> addrspace(1)* %out |
| 100 | ret void |
| 101 | } |
| 102 | |
| 103 | ; GCN-LABEL: {{^}}v_fneg_fold_v2f16: |
Matt Arsenault | 4e309b0 | 2017-07-29 01:03:53 +0000 | [diff] [blame] | 104 | ; GCN: {{flat|global}}_load_dword [[VAL:v[0-9]+]] |
Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 105 | |
Matt Arsenault | e9524f1 | 2018-06-06 21:28:11 +0000 | [diff] [blame] | 106 | ; CI: v_xor_b32_e32 [[FNEG:v[0-9]+]], 0x80008000, [[VAL]] |
| 107 | ; CI: v_lshrrev_b32_e32 |
| 108 | ; CI: v_lshrrev_b32_e32 |
| 109 | |
| 110 | ; CI: v_cvt_f32_f16_e32 v{{[0-9]+}}, v{{[0-9]+}} |
| 111 | ; CI: v_cvt_f32_f16_e32 v{{[0-9]+}}, v{{[0-9]+}} |
Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 112 | ; CI: v_mul_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} |
| 113 | ; CI: v_cvt_f16_f32 |
| 114 | ; CI: v_mul_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} |
| 115 | ; CI: v_cvt_f16_f32 |
| 116 | |
Sam Kolton | 5f7f32c | 2017-12-04 16:22:32 +0000 | [diff] [blame] | 117 | ; VI: v_mul_f16_sdwa v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 |
Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 118 | ; VI: v_mul_f16_e64 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}} |
| 119 | |
| 120 | ; GFX9: v_pk_mul_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} neg_lo:[1,0] neg_hi:[1,0]{{$}} |
Matt Arsenault | 3dbeefa | 2017-03-21 21:39:51 +0000 | [diff] [blame] | 121 | define amdgpu_kernel void @v_fneg_fold_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 { |
Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 122 | %val = load <2 x half>, <2 x half> addrspace(1)* %in |
| 123 | %fsub = fsub <2 x half> <half -0.0, half -0.0>, %val |
| 124 | %fmul = fmul <2 x half> %fsub, %val |
| 125 | store <2 x half> %fmul, <2 x half> addrspace(1)* %out |
| 126 | ret void |
| 127 | } |
| 128 | |
Matt Arsenault | bf5482e | 2017-05-11 17:26:25 +0000 | [diff] [blame] | 129 | ; GCN-LABEL: {{^}}v_extract_fneg_fold_v2f16: |
Matt Arsenault | 4e309b0 | 2017-07-29 01:03:53 +0000 | [diff] [blame] | 130 | ; GCN-DAG: {{flat|global}}_load_dword [[VAL:v[0-9]+]] |
Matt Arsenault | bf5482e | 2017-05-11 17:26:25 +0000 | [diff] [blame] | 131 | ; CI-DAG: v_mul_f32_e32 v{{[0-9]+}}, -4.0, v{{[0-9]+}} |
| 132 | ; CI-DAG: v_sub_f32_e32 v{{[0-9]+}}, 2.0, v{{[0-9]+}} |
| 133 | |
Matt Arsenault | bf5482e | 2017-05-11 17:26:25 +0000 | [diff] [blame] | 134 | ; GFX89-DAG: v_mul_f16_e32 v{{[0-9]+}}, -4.0, [[VAL]] |
Sam Kolton | 3c4933f | 2017-06-22 06:26:41 +0000 | [diff] [blame] | 135 | ; GFX89-DAG: v_mov_b32_e32 [[CONST2:v[0-9]+]], 0x4000 |
| 136 | ; GFX89-DAG: v_sub_f16_sdwa v{{[0-9]+}}, [[CONST2]], [[VAL]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1 |
| 137 | |
Matt Arsenault | bf5482e | 2017-05-11 17:26:25 +0000 | [diff] [blame] | 138 | define amdgpu_kernel void @v_extract_fneg_fold_v2f16(<2 x half> addrspace(1)* %in) #0 { |
| 139 | %val = load <2 x half>, <2 x half> addrspace(1)* %in |
| 140 | %fneg = fsub <2 x half> <half -0.0, half -0.0>, %val |
| 141 | %elt0 = extractelement <2 x half> %fneg, i32 0 |
| 142 | %elt1 = extractelement <2 x half> %fneg, i32 1 |
| 143 | |
| 144 | %fmul0 = fmul half %elt0, 4.0 |
| 145 | %fadd1 = fadd half %elt1, 2.0 |
| 146 | store volatile half %fmul0, half addrspace(1)* undef |
| 147 | store volatile half %fadd1, half addrspace(1)* undef |
| 148 | ret void |
| 149 | } |
| 150 | |
| 151 | ; GCN-LABEL: {{^}}v_extract_fneg_no_fold_v2f16: |
Matt Arsenault | 4e309b0 | 2017-07-29 01:03:53 +0000 | [diff] [blame] | 152 | ; GCN: {{flat|global}}_load_dword [[VAL:v[0-9]+]] |
Matt Arsenault | bf5482e | 2017-05-11 17:26:25 +0000 | [diff] [blame] | 153 | ; GCN: v_xor_b32_e32 [[NEG:v[0-9]+]], 0x80008000, [[VAL]] |
Matt Arsenault | fcc213f | 2017-09-20 03:20:09 +0000 | [diff] [blame] | 154 | ; CIVI: v_lshrrev_b32_e32 [[ELT1:v[0-9]+]], 16, [[NEG]] |
| 155 | ; GFX9: global_store_short_d16_hi v{{\[[0-9]+:[0-9]+\]}}, [[NEG]], off |
Matt Arsenault | bf5482e | 2017-05-11 17:26:25 +0000 | [diff] [blame] | 156 | define amdgpu_kernel void @v_extract_fneg_no_fold_v2f16(<2 x half> addrspace(1)* %in) #0 { |
| 157 | %val = load <2 x half>, <2 x half> addrspace(1)* %in |
| 158 | %fneg = fsub <2 x half> <half -0.0, half -0.0>, %val |
| 159 | %elt0 = extractelement <2 x half> %fneg, i32 0 |
| 160 | %elt1 = extractelement <2 x half> %fneg, i32 1 |
| 161 | store volatile half %elt0, half addrspace(1)* undef |
| 162 | store volatile half %elt1, half addrspace(1)* undef |
| 163 | ret void |
| 164 | } |
| 165 | |
Matt Arsenault | eb522e6 | 2017-02-27 22:15:25 +0000 | [diff] [blame] | 166 | declare i32 @llvm.amdgcn.workitem.id.x() #1 |
| 167 | |
| 168 | attributes #0 = { nounwind } |
| 169 | attributes #1 = { nounwind readnone } |