blob: 4cc1deb2095c232b1ece2837a50f9c9ca0f29db1 [file] [log] [blame]
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
Matt Arsenault7aad8fd2017-01-24 22:02:15 +00002; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003
4declare half @llvm.minnum.f16(half %a, half %b)
5declare <2 x half> @llvm.minnum.v2f16(<2 x half> %a, <2 x half> %b)
6
7; GCN-LABEL: {{^}}minnum_f16
8; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
9; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
10; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
11; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
12; SI: v_min_f32_e32 v[[R_F32:[0-9]+]], v[[B_F32]], v[[A_F32]]
13; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
14; VI: v_min_f16_e32 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]]
15; GCN: buffer_store_short v[[R_F16]]
16; GCN: s_endpgm
17define void @minnum_f16(
18 half addrspace(1)* %r,
19 half addrspace(1)* %a,
20 half addrspace(1)* %b) {
21entry:
22 %a.val = load half, half addrspace(1)* %a
23 %b.val = load half, half addrspace(1)* %b
24 %r.val = call half @llvm.minnum.f16(half %a.val, half %b.val)
25 store half %r.val, half addrspace(1)* %r
26 ret void
27}
28
29; GCN-LABEL: {{^}}minnum_f16_imm_a
30; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
31; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], 0x4200{{$}}
32; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
33; SI: v_min_f32_e32 v[[R_F32:[0-9]+]], v[[A_F32]], v[[B_F32]]
34; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
35; VI: v_min_f16_e32 v[[R_F16:[0-9]+]], 0x4200, v[[B_F16]]
36; GCN: buffer_store_short v[[R_F16]]
37; GCN: s_endpgm
38define void @minnum_f16_imm_a(
39 half addrspace(1)* %r,
40 half addrspace(1)* %b) {
41entry:
42 %b.val = load half, half addrspace(1)* %b
43 %r.val = call half @llvm.minnum.f16(half 3.0, half %b.val)
44 store half %r.val, half addrspace(1)* %r
45 ret void
46}
47
48; GCN-LABEL: {{^}}minnum_f16_imm_b
49; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
50; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], 0x4400{{$}}
51; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
52; SI: v_min_f32_e32 v[[R_F32:[0-9]+]], v[[B_F32]], v[[A_F32]]
53; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
Matt Arsenault4bd72362016-12-10 00:39:12 +000054; VI: v_min_f16_e32 v[[R_F16:[0-9]+]], 4.0, v[[A_F16]]
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +000055; GCN: buffer_store_short v[[R_F16]]
56; GCN: s_endpgm
57define void @minnum_f16_imm_b(
58 half addrspace(1)* %r,
59 half addrspace(1)* %a) {
60entry:
61 %a.val = load half, half addrspace(1)* %a
62 %r.val = call half @llvm.minnum.f16(half %a.val, half 4.0)
63 store half %r.val, half addrspace(1)* %r
64 ret void
65}
66
67; GCN-LABEL: {{^}}minnum_v2f16
68; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
69; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
70; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
71; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
72; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
73; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
74; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
75; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
76; SI: v_min_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
77; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
78; SI: v_min_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
79; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
80; VI: v_min_f16_e32 v[[R_F16_0:[0-9]+]], v[[B_V2_F16]], v[[A_V2_F16]]
81; VI: v_min_f16_e32 v[[R_F16_1:[0-9]+]], v[[B_F16_1]], v[[A_F16_1]]
82; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
83; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
84; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
85; GCN: buffer_store_dword v[[R_V2_F16]]
86; GCN: s_endpgm
87define void @minnum_v2f16(
88 <2 x half> addrspace(1)* %r,
89 <2 x half> addrspace(1)* %a,
90 <2 x half> addrspace(1)* %b) {
91entry:
92 %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
93 %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
94 %r.val = call <2 x half> @llvm.minnum.v2f16(<2 x half> %a.val, <2 x half> %b.val)
95 store <2 x half> %r.val, <2 x half> addrspace(1)* %r
96 ret void
97}
98
99; GCN-LABEL: {{^}}minnum_v2f16_imm_a
100; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
101; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], 0x4200{{$}}
102; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], 0x4400{{$}}
103; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
104; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
105; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
106; SI: v_min_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]], v[[B_F32_0]]
107; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
108; SI: v_min_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]], v[[B_F32_1]]
109; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
110; VI: v_min_f16_e32 v[[R_F16_0:[0-9]+]], 0x4200, v[[B_V2_F16]]
Matt Arsenault4bd72362016-12-10 00:39:12 +0000111; VI: v_min_f16_e32 v[[R_F16_1:[0-9]+]], 4.0, v[[B_F16_1]]
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000112; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
113; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
114; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
115; GCN: buffer_store_dword v[[R_V2_F16]]
116; GCN: s_endpgm
117define void @minnum_v2f16_imm_a(
118 <2 x half> addrspace(1)* %r,
119 <2 x half> addrspace(1)* %b) {
120entry:
121 %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
122 %r.val = call <2 x half> @llvm.minnum.v2f16(<2 x half> <half 3.0, half 4.0>, <2 x half> %b.val)
123 store <2 x half> %r.val, <2 x half> addrspace(1)* %r
124 ret void
125}
126
127; GCN-LABEL: {{^}}minnum_v2f16_imm_b
128; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
129; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], 0x4400{{$}}
130; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], 0x4200{{$}}
131; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
132; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
133; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
134; SI: v_min_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
135; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
136; SI: v_min_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
137; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
Matt Arsenault4bd72362016-12-10 00:39:12 +0000138; VI: v_min_f16_e32 v[[R_F16_0:[0-9]+]], 4.0, v[[A_V2_F16]]
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000139; VI: v_min_f16_e32 v[[R_F16_1:[0-9]+]], 0x4200, v[[A_F16_1]]
140; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
141; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
142; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
143; GCN: buffer_store_dword v[[R_V2_F16]]
144; GCN: s_endpgm
145define void @minnum_v2f16_imm_b(
146 <2 x half> addrspace(1)* %r,
147 <2 x half> addrspace(1)* %a) {
148entry:
149 %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
150 %r.val = call <2 x half> @llvm.minnum.v2f16(<2 x half> %a.val, <2 x half> <half 4.0, half 3.0>)
151 store <2 x half> %r.val, <2 x half> addrspace(1)* %r
152 ret void
153}