blob: f818ac38ee56b35454e0f2baf3ec4fce9c655eee [file] [log] [blame]
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
Matt Arsenault7aad8fd2017-01-24 22:02:15 +00002; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003
4declare half @llvm.fma.f16(half %a, half %b, half %c)
5declare <2 x half> @llvm.fma.v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c)
6
7; GCN-LABEL: {{^}}fma_f16
8; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
9; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
10; GCN: buffer_load_ushort v[[C_F16:[0-9]+]]
11; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
12; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
13; SI: v_cvt_f32_f16_e32 v[[C_F32:[0-9]+]], v[[C_F16]]
14; SI: v_fma_f32 v[[R_F32:[0-9]+]], v[[A_F32:[0-9]]], v[[B_F32:[0-9]]], v[[C_F32:[0-9]]]
15; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
16; VI: v_fma_f16 v[[R_F16:[0-9]+]], v[[A_F16]], v[[B_F16]], v[[C_F16]]
17; GCN: buffer_store_short v[[R_F16]]
18; GCN: s_endpgm
19define void @fma_f16(
20 half addrspace(1)* %r,
21 half addrspace(1)* %a,
22 half addrspace(1)* %b,
23 half addrspace(1)* %c) {
24 %a.val = load half, half addrspace(1)* %a
25 %b.val = load half, half addrspace(1)* %b
26 %c.val = load half, half addrspace(1)* %c
27 %r.val = call half @llvm.fma.f16(half %a.val, half %b.val, half %c.val)
28 store half %r.val, half addrspace(1)* %r
29 ret void
30}
31
32; GCN-LABEL: {{^}}fma_f16_imm_a
33; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
34; GCN: buffer_load_ushort v[[C_F16:[0-9]+]]
Matt Arsenault0c687392017-01-30 16:57:41 +000035
36; SI: v_mov_b32_e32 v[[A_F32:[0-9]+]], 0x40400000{{$}}
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +000037; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
38; SI: v_cvt_f32_f16_e32 v[[C_F32:[0-9]+]], v[[C_F16]]
39; SI: v_fma_f32 v[[R_F32:[0-9]+]], v[[A_F32:[0-9]]], v[[B_F32:[0-9]]], v[[C_F32:[0-9]]]
40; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
41; VI: v_mov_b32_e32 v[[A_F16:[0-9]+]], 0x4200{{$}}
42; VI: v_fma_f16 v[[R_F16:[0-9]+]], v[[A_F16]], v[[B_F16]], v[[C_F16]]
43; GCN: buffer_store_short v[[R_F16]]
44; GCN: s_endpgm
45define void @fma_f16_imm_a(
46 half addrspace(1)* %r,
47 half addrspace(1)* %b,
48 half addrspace(1)* %c) {
49 %b.val = load half, half addrspace(1)* %b
50 %c.val = load half, half addrspace(1)* %c
51 %r.val = call half @llvm.fma.f16(half 3.0, half %b.val, half %c.val)
52 store half %r.val, half addrspace(1)* %r
53 ret void
54}
55
56; GCN-LABEL: {{^}}fma_f16_imm_b
57; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
58; GCN: buffer_load_ushort v[[C_F16:[0-9]+]]
Matt Arsenault0c687392017-01-30 16:57:41 +000059; SI: v_mov_b32_e32 v[[B_F32:[0-9]+]], 0x40400000{{$}}
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +000060; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
61; SI: v_cvt_f32_f16_e32 v[[C_F32:[0-9]+]], v[[C_F16]]
62; SI: v_fma_f32 v[[R_F32:[0-9]+]], v[[A_F32:[0-9]]], v[[B_F32:[0-9]]], v[[C_F32:[0-9]]]
63; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
64; VI: v_mov_b32_e32 v[[B_F16:[0-9]+]], 0x4200{{$}}
65; VI: v_fma_f16 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]], v[[C_F16]]
66; GCN: buffer_store_short v[[R_F16]]
67; GCN: s_endpgm
68define void @fma_f16_imm_b(
69 half addrspace(1)* %r,
70 half addrspace(1)* %a,
71 half addrspace(1)* %c) {
72 %a.val = load half, half addrspace(1)* %a
73 %c.val = load half, half addrspace(1)* %c
74 %r.val = call half @llvm.fma.f16(half %a.val, half 3.0, half %c.val)
75 store half %r.val, half addrspace(1)* %r
76 ret void
77}
78
79; GCN-LABEL: {{^}}fma_f16_imm_c
80; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
81; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
Matt Arsenault0c687392017-01-30 16:57:41 +000082; SI: v_mov_b32_e32 v[[C_F32:[0-9]+]], 0x40400000{{$}}
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +000083; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
84; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
85; SI: v_fma_f32 v[[R_F32:[0-9]+]], v[[A_F32:[0-9]]], v[[B_F32:[0-9]]], v[[C_F32:[0-9]]]
86; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
87; VI: v_mov_b32_e32 v[[C_F16:[0-9]+]], 0x4200{{$}}
88; VI: v_fma_f16 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]], v[[C_F16]]
89; GCN: buffer_store_short v[[R_F16]]
90; GCN: s_endpgm
91define void @fma_f16_imm_c(
92 half addrspace(1)* %r,
93 half addrspace(1)* %a,
94 half addrspace(1)* %b) {
95 %a.val = load half, half addrspace(1)* %a
96 %b.val = load half, half addrspace(1)* %b
97 %r.val = call half @llvm.fma.f16(half %a.val, half %b.val, half 3.0)
98 store half %r.val, half addrspace(1)* %r
99 ret void
100}
101
102; GCN-LABEL: {{^}}fma_v2f16
103; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
104; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
105; GCN: buffer_load_dword v[[C_V2_F16:[0-9]+]]
106; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
107; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
108; GCN: v_lshrrev_b32_e32 v[[C_F16_1:[0-9]+]], 16, v[[C_V2_F16]]
109; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
110; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
111; SI: v_cvt_f32_f16_e32 v[[C_F32_0:[0-9]+]], v[[C_V2_F16]]
112; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
113; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
114; SI: v_cvt_f32_f16_e32 v[[C_F32_1:[0-9]+]], v[[C_F16_1]]
115; SI: v_fma_f32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]], v[[B_F32_0]], v[[C_F32_0]]
116; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
117; SI: v_fma_f32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]], v[[B_F32_1]], v[[C_F32_1]]
118; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
119; VI: v_fma_f16 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]], v[[B_V2_F16]], v[[C_V2_F16]]
120; VI: v_fma_f16 v[[R_F16_1:[0-9]+]], v[[A_F16_1]], v[[B_F16_1]], v[[C_F16_1]]
121; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
122; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
123; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
124; GCN: buffer_store_dword v[[R_V2_F16]]
125; GCN: s_endpgm
126define void @fma_v2f16(
127 <2 x half> addrspace(1)* %r,
128 <2 x half> addrspace(1)* %a,
129 <2 x half> addrspace(1)* %b,
130 <2 x half> addrspace(1)* %c) {
131 %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
132 %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
133 %c.val = load <2 x half>, <2 x half> addrspace(1)* %c
134 %r.val = call <2 x half> @llvm.fma.v2f16(<2 x half> %a.val, <2 x half> %b.val, <2 x half> %c.val)
135 store <2 x half> %r.val, <2 x half> addrspace(1)* %r
136 ret void
137}
138
139; GCN-LABEL: {{^}}fma_v2f16_imm_a
140; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
141; GCN: buffer_load_dword v[[C_V2_F16:[0-9]+]]
Matt Arsenault0c687392017-01-30 16:57:41 +0000142; SI: v_mov_b32_e32 v[[A_F32:[0-9]+]], 0x40400000{{$}}
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000143; VI: v_mov_b32_e32 v[[A_F16:[0-9]+]], 0x4200{{$}}
144; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
145; GCN: v_lshrrev_b32_e32 v[[C_F16_1:[0-9]+]], 16, v[[C_V2_F16]]
146; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
147; SI: v_cvt_f32_f16_e32 v[[C_F32_0:[0-9]+]], v[[C_V2_F16]]
148; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
149; SI: v_cvt_f32_f16_e32 v[[C_F32_1:[0-9]+]], v[[C_F16_1]]
150; SI: v_fma_f32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32]], v[[C_F32_0]]
151; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
152; SI: v_fma_f32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32]], v[[C_F32_1]]
153; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
154; VI: v_fma_f16 v[[R_F16_0:[0-9]+]], v[[B_V2_F16]], v[[A_F16]], v[[C_V2_F16]]
155; VI: v_fma_f16 v[[R_F16_1:[0-9]+]], v[[B_F16_1]], v[[A_F16]], v[[C_F16_1]]
156; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
157; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
158; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
159; GCN: buffer_store_dword v[[R_V2_F16]]
160; GCN: s_endpgm
161define void @fma_v2f16_imm_a(
162 <2 x half> addrspace(1)* %r,
163 <2 x half> addrspace(1)* %b,
164 <2 x half> addrspace(1)* %c) {
165 %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
166 %c.val = load <2 x half>, <2 x half> addrspace(1)* %c
167 %r.val = call <2 x half> @llvm.fma.v2f16(<2 x half> <half 3.0, half 3.0>, <2 x half> %b.val, <2 x half> %c.val)
168 store <2 x half> %r.val, <2 x half> addrspace(1)* %r
169 ret void
170}
171
172; GCN-LABEL: {{^}}fma_v2f16_imm_b
173; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
174; GCN: buffer_load_dword v[[C_V2_F16:[0-9]+]]
Matt Arsenault0c687392017-01-30 16:57:41 +0000175; SI: v_mov_b32_e32 v[[B_F32:[0-9]+]], 0x40400000{{$}}
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000176; VI: v_mov_b32_e32 v[[B_F16:[0-9]+]], 0x4200{{$}}
177; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
178; GCN: v_lshrrev_b32_e32 v[[C_F16_1:[0-9]+]], 16, v[[C_V2_F16]]
179; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
180; SI: v_cvt_f32_f16_e32 v[[C_F32_0:[0-9]+]], v[[C_V2_F16]]
181; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
182; SI: v_cvt_f32_f16_e32 v[[C_F32_1:[0-9]+]], v[[C_F16_1]]
183; SI: v_fma_f32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]], v[[B_F32]], v[[C_F32_0]]
184; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
185; SI: v_fma_f32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]], v[[B_F32]], v[[C_F32_1]]
186; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
187; VI: v_fma_f16 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]], v[[B_F16]], v[[C_V2_F16]]
188; VI: v_fma_f16 v[[R_F16_1:[0-9]+]], v[[A_F16_1]], v[[B_F16]], v[[C_F16_1]]
189; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
190; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
191; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
192; GCN: buffer_store_dword v[[R_V2_F16]]
193; GCN: s_endpgm
194define void @fma_v2f16_imm_b(
195 <2 x half> addrspace(1)* %r,
196 <2 x half> addrspace(1)* %a,
197 <2 x half> addrspace(1)* %c) {
198 %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
199 %c.val = load <2 x half>, <2 x half> addrspace(1)* %c
200 %r.val = call <2 x half> @llvm.fma.v2f16(<2 x half> %a.val, <2 x half> <half 3.0, half 3.0>, <2 x half> %c.val)
201 store <2 x half> %r.val, <2 x half> addrspace(1)* %r
202 ret void
203}
204
205; GCN-LABEL: {{^}}fma_v2f16_imm_c
206; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
207; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
Matt Arsenault0c687392017-01-30 16:57:41 +0000208; SI: v_mov_b32_e32 v[[C_F32:[0-9]+]], 0x40400000{{$}}
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000209; VI: v_mov_b32_e32 v[[C_F16:[0-9]+]], 0x4200{{$}}
210; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
211; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
212; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
213; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
214; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
215; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
216; SI: v_fma_f32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]], v[[B_F32_0]], v[[C_F32]]
217; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
218; SI: v_fma_f32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]], v[[B_F32_1]], v[[C_F32]]
219; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
220; VI: v_fma_f16 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]], v[[B_V2_F16]], v[[C_F16]]
221; VI: v_fma_f16 v[[R_F16_1:[0-9]+]], v[[A_F16_1]], v[[B_F16_1]], v[[C_F16]]
222; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
223; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
224; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
225; GCN: buffer_store_dword v[[R_V2_F16]]
226; GCN: s_endpgm
227define void @fma_v2f16_imm_c(
228 <2 x half> addrspace(1)* %r,
229 <2 x half> addrspace(1)* %a,
230 <2 x half> addrspace(1)* %b) {
231 %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
232 %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
233 %r.val = call <2 x half> @llvm.fma.v2f16(<2 x half> %a.val, <2 x half> %b.val, <2 x half> <half 3.0, half 3.0>)
234 store <2 x half> %r.val, <2 x half> addrspace(1)* %r
235 ret void
236}