| Matt Arsenault | c79dc70 | 2016-11-15 02:25:28 +0000 | [diff] [blame^] | 1 | ; RUN: llc -march=amdgcn -mcpu=kaveri -mtriple=amdgcn--amdhsa -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=GCN -check-prefix=FUNC %s |
| 2 | ; RUN: llc -march=amdgcn -mcpu=tonga -mtriple=amdgcn--amdhsa -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN -check-prefix=FUNC %s |
| 3 | |
| 4 | ; FIXME: Should be able to do scalar op |
| 5 | ; FUNC-LABEL: {{^}}s_fneg_f16: |
| 6 | |
| 7 | define void @s_fneg_f16(half addrspace(1)* %out, half %in) { |
| 8 | %fneg = fsub half -0.000000e+00, %in |
| 9 | store half %fneg, half addrspace(1)* %out |
| 10 | ret void |
| 11 | } |
| 12 | |
| 13 | ; FIXME: Should be able to use bit operations when illegal type as |
| 14 | ; well. |
| 15 | |
| 16 | ; FUNC-LABEL: {{^}}v_fneg_f16: |
| 17 | ; GCN: flat_load_ushort [[VAL:v[0-9]+]], |
| 18 | |
| 19 | ; CI: v_cvt_f32_f16_e32 [[CVT0:v[0-9]+]], [[VAL]] |
| 20 | ; CI: v_cvt_f16_f32_e64 [[CVT1:v[0-9]+]], -[[CVT0]] |
| 21 | ; CI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[CVT1]] |
| 22 | |
| 23 | ; VI: v_xor_b32_e32 [[XOR:v[0-9]+]], 0x8000, [[VAL]] |
| 24 | ; VI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[XOR]] |
| 25 | define void @v_fneg_f16(half addrspace(1)* %out, half addrspace(1)* %in) { |
| 26 | %val = load half, half addrspace(1)* %in, align 2 |
| 27 | %fneg = fsub half -0.000000e+00, %val |
| 28 | store half %fneg, half addrspace(1)* %out |
| 29 | ret void |
| 30 | } |
| 31 | |
| 32 | ; FUNC-LABEL: {{^}}fneg_free_f16: |
| 33 | ; GCN: flat_load_ushort [[NEG_VALUE:v[0-9]+]], |
| 34 | |
| 35 | ; XCI: s_xor_b32 [[XOR:s[0-9]+]], [[NEG_VALUE]], 0x8000{{$}} |
| 36 | ; CI: v_xor_b32_e32 [[XOR:v[0-9]+]], 0x8000, [[NEG_VALUE]] |
| 37 | ; CI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[XOR]] |
| 38 | define void @fneg_free_f16(half addrspace(1)* %out, i16 %in) { |
| 39 | %bc = bitcast i16 %in to half |
| 40 | %fsub = fsub half -0.0, %bc |
| 41 | store half %fsub, half addrspace(1)* %out |
| 42 | ret void |
| 43 | } |
| 44 | |
| 45 | ; FUNC-LABEL: {{^}}v_fneg_fold_f16: |
| 46 | ; GCN: flat_load_ushort [[NEG_VALUE:v[0-9]+]] |
| 47 | |
| 48 | ; CI: v_cvt_f32_f16_e32 [[CVT0:v[0-9]+]], [[CVT0]] |
| 49 | ; CI: v_mul_f32_e64 [[MUL:v[0-9]+]], -[[CVT0]], [[CVT0]] |
| 50 | ; CI: v_cvt_f16_f32_e32 [[CVT1:v[0-9]+]], [[MUL]] |
| 51 | ; CI: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[CVT1]] |
| 52 | |
| 53 | ; VI-NOT: [[NEG_VALUE]] |
| 54 | ; VI: v_mul_f16_e64 v{{[0-9]+}}, -[[NEG_VALUE]], [[NEG_VALUE]] |
| 55 | define void @v_fneg_fold_f16(half addrspace(1)* %out, half addrspace(1)* %in) { |
| 56 | %val = load half, half addrspace(1)* %in |
| 57 | %fsub = fsub half -0.0, %val |
| 58 | %fmul = fmul half %fsub, %val |
| 59 | store half %fmul, half addrspace(1)* %out |
| 60 | ret void |
| 61 | } |