| Matt Arsenault | ee324ff | 2017-05-17 19:25:06 +0000 | [diff] [blame^] | 1 | ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI %s |
| 2 | ; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI %s |
| 3 | ; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9 %s |
| Matt Arsenault | cc3c2b3 | 2014-11-14 20:08:52 +0000 | [diff] [blame] | 4 | |
| Matt Arsenault | ee324ff | 2017-05-17 19:25:06 +0000 | [diff] [blame^] | 5 | ; GCN-LABEL: {{^}}test_fmax3_olt_0_f32: |
| 6 | ; GCN: buffer_load_dword [[REGC:v[0-9]+]] |
| 7 | ; GCN: buffer_load_dword [[REGB:v[0-9]+]] |
| 8 | ; GCN: buffer_load_dword [[REGA:v[0-9]+]] |
| 9 | ; GCN: v_max3_f32 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]] |
| 10 | ; GCN: buffer_store_dword [[RESULT]], |
| 11 | ; GCN: s_endpgm |
| 12 | define amdgpu_kernel void @test_fmax3_olt_0_f32(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #0 { |
| Changpeng Fang | 71369b3 | 2016-05-26 19:35:29 +0000 | [diff] [blame] | 13 | %a = load volatile float, float addrspace(1)* %aptr, align 4 |
| 14 | %b = load volatile float, float addrspace(1)* %bptr, align 4 |
| 15 | %c = load volatile float, float addrspace(1)* %cptr, align 4 |
| Matt Arsenault | ee324ff | 2017-05-17 19:25:06 +0000 | [diff] [blame^] | 16 | %f0 = call float @llvm.maxnum.f32(float %a, float %b) |
| 17 | %f1 = call float @llvm.maxnum.f32(float %f0, float %c) |
| Matt Arsenault | cc3c2b3 | 2014-11-14 20:08:52 +0000 | [diff] [blame] | 18 | store float %f1, float addrspace(1)* %out, align 4 |
| 19 | ret void |
| 20 | } |
| 21 | |
| 22 | ; Commute operand of second fmax |
| Matt Arsenault | ee324ff | 2017-05-17 19:25:06 +0000 | [diff] [blame^] | 23 | ; GCN-LABEL: {{^}}test_fmax3_olt_1_f32: |
| 24 | ; GCN: buffer_load_dword [[REGB:v[0-9]+]] |
| 25 | ; GCN: buffer_load_dword [[REGA:v[0-9]+]] |
| 26 | ; GCN: buffer_load_dword [[REGC:v[0-9]+]] |
| 27 | ; GCN: v_max3_f32 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]] |
| 28 | ; GCN: buffer_store_dword [[RESULT]], |
| 29 | ; GCN: s_endpgm |
| 30 | define amdgpu_kernel void @test_fmax3_olt_1_f32(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #0 { |
| Changpeng Fang | 71369b3 | 2016-05-26 19:35:29 +0000 | [diff] [blame] | 31 | %a = load volatile float, float addrspace(1)* %aptr, align 4 |
| 32 | %b = load volatile float, float addrspace(1)* %bptr, align 4 |
| 33 | %c = load volatile float, float addrspace(1)* %cptr, align 4 |
| Matt Arsenault | ee324ff | 2017-05-17 19:25:06 +0000 | [diff] [blame^] | 34 | %f0 = call float @llvm.maxnum.f32(float %a, float %b) |
| 35 | %f1 = call float @llvm.maxnum.f32(float %c, float %f0) |
| Matt Arsenault | cc3c2b3 | 2014-11-14 20:08:52 +0000 | [diff] [blame] | 36 | store float %f1, float addrspace(1)* %out, align 4 |
| 37 | ret void |
| 38 | } |
| Matt Arsenault | ee324ff | 2017-05-17 19:25:06 +0000 | [diff] [blame^] | 39 | |
| 40 | ; GCN-LABEL: {{^}}test_fmax3_olt_0_f16: |
| 41 | ; GCN: buffer_load_ushort [[REGC:v[0-9]+]] |
| 42 | ; GCN: buffer_load_ushort [[REGB:v[0-9]+]] |
| 43 | ; GCN: buffer_load_ushort [[REGA:v[0-9]+]] |
| 44 | |
| 45 | ; SI: v_max3_f32 [[RESULT_F32:v[0-9]+]], |
| 46 | ; SI: v_cvt_f16_f32_e32 [[RESULT:v[0-9]+]], [[RESULT]] |
| 47 | |
| 48 | ; VI: v_max_f16_e32 |
| 49 | ; VI: v_max_f16_e32 [[RESULT:v[0-9]+]], |
| 50 | |
| 51 | ; GFX9: v_max3_f16 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]] |
| 52 | ; GCN: buffer_store_short [[RESULT]], |
| 53 | define amdgpu_kernel void @test_fmax3_olt_0_f16(half addrspace(1)* %out, half addrspace(1)* %aptr, half addrspace(1)* %bptr, half addrspace(1)* %cptr) #0 { |
| 54 | %a = load volatile half, half addrspace(1)* %aptr, align 2 |
| 55 | %b = load volatile half, half addrspace(1)* %bptr, align 2 |
| 56 | %c = load volatile half, half addrspace(1)* %cptr, align 2 |
| 57 | %f0 = call half @llvm.maxnum.f16(half %a, half %b) |
| 58 | %f1 = call half @llvm.maxnum.f16(half %f0, half %c) |
| 59 | store half %f1, half addrspace(1)* %out, align 2 |
| 60 | ret void |
| 61 | } |
| 62 | |
| 63 | ; Commute operand of second fmax |
| 64 | ; GCN-LABEL: {{^}}test_fmax3_olt_1_f16: |
| 65 | ; GCN: buffer_load_ushort [[REGB:v[0-9]+]] |
| 66 | ; GCN: buffer_load_ushort [[REGA:v[0-9]+]] |
| 67 | ; GCN: buffer_load_ushort [[REGC:v[0-9]+]] |
| 68 | |
| 69 | ; SI: v_max3_f32 [[RESULT_F32:v[0-9]+]], |
| 70 | ; SI: v_cvt_f16_f32_e32 [[RESULT:v[0-9]+]], [[RESULT]] |
| 71 | |
| 72 | ; VI: v_max_f16_e32 |
| 73 | ; VI: v_max_f16_e32 [[RESULT:v[0-9]+]], |
| 74 | |
| 75 | ; GFX9: v_max3_f16 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]] |
| 76 | ; GCN: buffer_store_short [[RESULT]], |
| 77 | define amdgpu_kernel void @test_fmax3_olt_1_f16(half addrspace(1)* %out, half addrspace(1)* %aptr, half addrspace(1)* %bptr, half addrspace(1)* %cptr) #0 { |
| 78 | %a = load volatile half, half addrspace(1)* %aptr, align 2 |
| 79 | %b = load volatile half, half addrspace(1)* %bptr, align 2 |
| 80 | %c = load volatile half, half addrspace(1)* %cptr, align 2 |
| 81 | %f0 = call half @llvm.maxnum.f16(half %a, half %b) |
| 82 | %f1 = call half @llvm.maxnum.f16(half %c, half %f0) |
| 83 | store half %f1, half addrspace(1)* %out, align 2 |
| 84 | ret void |
| 85 | } |
| 86 | |
| 87 | declare i32 @llvm.amdgcn.workitem.id.x() #1 |
| 88 | declare float @llvm.maxnum.f32(float, float) #1 |
| 89 | declare half @llvm.maxnum.f16(half, half) #1 |
| 90 | |
| 91 | attributes #0 = { nounwind } |
| 92 | attributes #1 = { nounwind readnone speculatable } |