blob: 937bd74a0fe4f81aef6a81a47e9147da21a7df40 [file] [log] [blame]
Matt Arsenaultee324ff2017-05-17 19:25:06 +00001; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI %s
2; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI %s
3; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9 %s
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00004
Matt Arsenaultee324ff2017-05-17 19:25:06 +00005; GCN-LABEL: {{^}}test_fmax3_olt_0_f32:
6; GCN: buffer_load_dword [[REGC:v[0-9]+]]
7; GCN: buffer_load_dword [[REGB:v[0-9]+]]
8; GCN: buffer_load_dword [[REGA:v[0-9]+]]
9; GCN: v_max3_f32 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]]
10; GCN: buffer_store_dword [[RESULT]],
11; GCN: s_endpgm
12define amdgpu_kernel void @test_fmax3_olt_0_f32(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #0 {
Changpeng Fang71369b32016-05-26 19:35:29 +000013 %a = load volatile float, float addrspace(1)* %aptr, align 4
14 %b = load volatile float, float addrspace(1)* %bptr, align 4
15 %c = load volatile float, float addrspace(1)* %cptr, align 4
Matt Arsenaultee324ff2017-05-17 19:25:06 +000016 %f0 = call float @llvm.maxnum.f32(float %a, float %b)
17 %f1 = call float @llvm.maxnum.f32(float %f0, float %c)
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +000018 store float %f1, float addrspace(1)* %out, align 4
19 ret void
20}
21
22; Commute operand of second fmax
Matt Arsenaultee324ff2017-05-17 19:25:06 +000023; GCN-LABEL: {{^}}test_fmax3_olt_1_f32:
24; GCN: buffer_load_dword [[REGB:v[0-9]+]]
25; GCN: buffer_load_dword [[REGA:v[0-9]+]]
26; GCN: buffer_load_dword [[REGC:v[0-9]+]]
27; GCN: v_max3_f32 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]]
28; GCN: buffer_store_dword [[RESULT]],
29; GCN: s_endpgm
30define amdgpu_kernel void @test_fmax3_olt_1_f32(float addrspace(1)* %out, float addrspace(1)* %aptr, float addrspace(1)* %bptr, float addrspace(1)* %cptr) #0 {
Changpeng Fang71369b32016-05-26 19:35:29 +000031 %a = load volatile float, float addrspace(1)* %aptr, align 4
32 %b = load volatile float, float addrspace(1)* %bptr, align 4
33 %c = load volatile float, float addrspace(1)* %cptr, align 4
Matt Arsenaultee324ff2017-05-17 19:25:06 +000034 %f0 = call float @llvm.maxnum.f32(float %a, float %b)
35 %f1 = call float @llvm.maxnum.f32(float %c, float %f0)
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +000036 store float %f1, float addrspace(1)* %out, align 4
37 ret void
38}
Matt Arsenaultee324ff2017-05-17 19:25:06 +000039
40; GCN-LABEL: {{^}}test_fmax3_olt_0_f16:
41; GCN: buffer_load_ushort [[REGC:v[0-9]+]]
42; GCN: buffer_load_ushort [[REGB:v[0-9]+]]
43; GCN: buffer_load_ushort [[REGA:v[0-9]+]]
44
45; SI: v_max3_f32 [[RESULT_F32:v[0-9]+]],
46; SI: v_cvt_f16_f32_e32 [[RESULT:v[0-9]+]], [[RESULT]]
47
48; VI: v_max_f16_e32
49; VI: v_max_f16_e32 [[RESULT:v[0-9]+]],
50
51; GFX9: v_max3_f16 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]]
52; GCN: buffer_store_short [[RESULT]],
53define amdgpu_kernel void @test_fmax3_olt_0_f16(half addrspace(1)* %out, half addrspace(1)* %aptr, half addrspace(1)* %bptr, half addrspace(1)* %cptr) #0 {
54 %a = load volatile half, half addrspace(1)* %aptr, align 2
55 %b = load volatile half, half addrspace(1)* %bptr, align 2
56 %c = load volatile half, half addrspace(1)* %cptr, align 2
57 %f0 = call half @llvm.maxnum.f16(half %a, half %b)
58 %f1 = call half @llvm.maxnum.f16(half %f0, half %c)
59 store half %f1, half addrspace(1)* %out, align 2
60 ret void
61}
62
63; Commute operand of second fmax
64; GCN-LABEL: {{^}}test_fmax3_olt_1_f16:
65; GCN: buffer_load_ushort [[REGB:v[0-9]+]]
66; GCN: buffer_load_ushort [[REGA:v[0-9]+]]
67; GCN: buffer_load_ushort [[REGC:v[0-9]+]]
68
69; SI: v_max3_f32 [[RESULT_F32:v[0-9]+]],
70; SI: v_cvt_f16_f32_e32 [[RESULT:v[0-9]+]], [[RESULT]]
71
72; VI: v_max_f16_e32
73; VI: v_max_f16_e32 [[RESULT:v[0-9]+]],
74
75; GFX9: v_max3_f16 [[RESULT:v[0-9]+]], [[REGC]], [[REGB]], [[REGA]]
76; GCN: buffer_store_short [[RESULT]],
77define amdgpu_kernel void @test_fmax3_olt_1_f16(half addrspace(1)* %out, half addrspace(1)* %aptr, half addrspace(1)* %bptr, half addrspace(1)* %cptr) #0 {
78 %a = load volatile half, half addrspace(1)* %aptr, align 2
79 %b = load volatile half, half addrspace(1)* %bptr, align 2
80 %c = load volatile half, half addrspace(1)* %cptr, align 2
81 %f0 = call half @llvm.maxnum.f16(half %a, half %b)
82 %f1 = call half @llvm.maxnum.f16(half %c, half %f0)
83 store half %f1, half addrspace(1)* %out, align 2
84 ret void
85}
86
Farhana Aleene80aeac2018-04-03 23:00:30 +000087; Checks whether the test passes; performMinMaxCombine() should not optimize vector patterns of max3
88; since there are no pack instructions for fmax3.
89; GCN-LABEL: {{^}}no_fmax3_v2f16:
90
91; SI: v_cvt_f16_f32_e32
92; SI: v_max_f32_e32
93; SI-NEXT: v_max_f32_e32
94; SI-NEXT: v_max3_f32
95; SI-NEXT: v_max3_f32
96
97; VI: v_max_f16_e32
98; VI-NEXT: v_max_f16_e32
99; VI-NEXT: v_max_f16_e32
100; VI-NEXT: v_max_f16_e32
101; VI-NEXT: v_max_f16_e32
102; VI-NEXT: v_max_f16_e32
103
104; GFX9: v_pk_max_f16
105; GFX9-NEXT: v_pk_max_f16
106; GFX9-NEXT: v_pk_max_f16
107define <2 x half> @no_fmax3_v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c, <2 x half> %d) {
108entry:
109 %max = tail call fast <2 x half> @llvm.maxnum.v2f16(<2 x half> %a, <2 x half> %b)
110 %max1 = tail call fast <2 x half> @llvm.maxnum.v2f16(<2 x half> %c, <2 x half> %max)
111 %res = tail call fast <2 x half> @llvm.maxnum.v2f16(<2 x half> %max1, <2 x half> %d)
112 ret <2 x half> %res
113}
114
Matt Arsenaultee324ff2017-05-17 19:25:06 +0000115declare i32 @llvm.amdgcn.workitem.id.x() #1
116declare float @llvm.maxnum.f32(float, float) #1
117declare half @llvm.maxnum.f16(half, half) #1
Farhana Aleene80aeac2018-04-03 23:00:30 +0000118declare <2 x half> @llvm.maxnum.v2f16(<2 x half>, <2 x half>)
Matt Arsenaultee324ff2017-05-17 19:25:06 +0000119
120attributes #0 = { nounwind }
121attributes #1 = { nounwind readnone speculatable }