blob: 9d876c39674db0736948d1b983db7e05dafe483d [file] [log] [blame]
Valery Pykhtin355103f2016-09-23 09:08:07 +00001//===-- VOP1Instructions.td - Vector Instruction Defintions ---------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10//===----------------------------------------------------------------------===//
11// VOP1 Classes
12//===----------------------------------------------------------------------===//
13
14class VOP1e <bits<8> op, VOPProfile P> : Enc32 {
15 bits<8> vdst;
16 bits<9> src0;
17
18 let Inst{8-0} = !if(P.HasSrc0, src0{8-0}, 0);
19 let Inst{16-9} = op;
20 let Inst{24-17} = !if(P.EmitDst, vdst{7-0}, 0);
21 let Inst{31-25} = 0x3f; //encoding
22}
23
24class VOP1_Pseudo <string opName, VOPProfile P, list<dag> pattern=[]> :
25 InstSI <P.Outs32, P.Ins32, "", pattern>,
26 VOP <opName>,
27 SIMCInstr <opName#"_e32", SIEncodingFamily.NONE>,
28 MnemonicAlias<opName#"_e32", opName> {
29
30 let isPseudo = 1;
31 let isCodeGenOnly = 1;
32 let UseNamedOperandTable = 1;
33
34 string Mnemonic = opName;
35 string AsmOperands = P.Asm32;
36
37 let Size = 4;
38 let mayLoad = 0;
39 let mayStore = 0;
40 let hasSideEffects = 0;
41 let SubtargetPredicate = isGCN;
42
43 let VOP1 = 1;
44 let VALU = 1;
45 let Uses = [EXEC];
46
47 let AsmVariantName = AMDGPUAsmVariants.Default;
48
49 VOPProfile Pfl = P;
50}
51
52class VOP1_Real <VOP1_Pseudo ps, int EncodingFamily> :
53 InstSI <ps.OutOperandList, ps.InOperandList, ps.Mnemonic # ps.AsmOperands, []>,
54 SIMCInstr <ps.PseudoInstr, EncodingFamily> {
55
56 let isPseudo = 0;
57 let isCodeGenOnly = 0;
58
Sam Koltona6792a32016-12-22 11:30:48 +000059 let Constraints = ps.Constraints;
60 let DisableEncoding = ps.DisableEncoding;
61
Valery Pykhtin355103f2016-09-23 09:08:07 +000062 // copy relevant pseudo op flags
63 let SubtargetPredicate = ps.SubtargetPredicate;
64 let AsmMatchConverter = ps.AsmMatchConverter;
65 let AsmVariantName = ps.AsmVariantName;
66 let Constraints = ps.Constraints;
67 let DisableEncoding = ps.DisableEncoding;
68 let TSFlags = ps.TSFlags;
69}
70
71class getVOP1Pat64 <SDPatternOperator node, VOPProfile P> : LetDummies {
72 list<dag> ret = !if(P.HasModifiers,
73 [(set P.DstVT:$vdst, (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0,
74 i32:$src0_modifiers, i1:$clamp, i32:$omod))))],
75 [(set P.DstVT:$vdst, (node P.Src0VT:$src0))]);
76}
77
78multiclass VOP1Inst <string opName, VOPProfile P,
79 SDPatternOperator node = null_frag> {
80 def _e32 : VOP1_Pseudo <opName, P>;
81 def _e64 : VOP3_Pseudo <opName, P, getVOP1Pat64<node, P>.ret>;
82}
83
84//===----------------------------------------------------------------------===//
85// VOP1 Instructions
86//===----------------------------------------------------------------------===//
87
88let VOPAsmPrefer32Bit = 1 in {
89defm V_NOP : VOP1Inst <"v_nop", VOP_NONE>;
90}
91
92let isMoveImm = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in {
93defm V_MOV_B32 : VOP1Inst <"v_mov_b32", VOP_I32_I32>;
94} // End isMoveImm = 1
95
96// FIXME: Specify SchedRW for READFIRSTLANE_B32
97// TODO: Make profile for this, there is VOP3 encoding also
98def V_READFIRSTLANE_B32 :
99 InstSI <(outs SReg_32:$vdst),
100 (ins VGPR_32:$src0),
101 "v_readfirstlane_b32 $vdst, $src0",
102 [(set i32:$vdst, (int_amdgcn_readfirstlane i32:$src0))]>,
103 Enc32 {
104
105 let isCodeGenOnly = 0;
106 let UseNamedOperandTable = 1;
107
108 let Size = 4;
109 let mayLoad = 0;
110 let mayStore = 0;
111 let hasSideEffects = 0;
112 let SubtargetPredicate = isGCN;
113
114 let VOP1 = 1;
115 let VALU = 1;
116 let Uses = [EXEC];
117 let isConvergent = 1;
118
119 bits<8> vdst;
120 bits<9> src0;
121
122 let Inst{8-0} = src0;
123 let Inst{16-9} = 0x2;
124 let Inst{24-17} = vdst;
125 let Inst{31-25} = 0x3f; //encoding
126}
127
128let SchedRW = [WriteQuarterRate32] in {
129defm V_CVT_I32_F64 : VOP1Inst <"v_cvt_i32_f64", VOP_I32_F64, fp_to_sint>;
130defm V_CVT_F64_I32 : VOP1Inst <"v_cvt_f64_i32", VOP_F64_I32, sint_to_fp>;
131defm V_CVT_F32_I32 : VOP1Inst <"v_cvt_f32_i32", VOP_F32_I32, sint_to_fp>;
132defm V_CVT_F32_U32 : VOP1Inst <"v_cvt_f32_u32", VOP_F32_I32, uint_to_fp>;
133defm V_CVT_U32_F32 : VOP1Inst <"v_cvt_u32_f32", VOP_I32_F32, fp_to_uint>;
134defm V_CVT_I32_F32 : VOP1Inst <"v_cvt_i32_f32", VOP_I32_F32, fp_to_sint>;
135defm V_CVT_F16_F32 : VOP1Inst <"v_cvt_f16_f32", VOP_I32_F32, fp_to_f16>;
136defm V_CVT_F32_F16 : VOP1Inst <"v_cvt_f32_f16", VOP_F32_I32, f16_to_fp>;
137defm V_CVT_RPI_I32_F32 : VOP1Inst <"v_cvt_rpi_i32_f32", VOP_I32_F32, cvt_rpi_i32_f32>;
138defm V_CVT_FLR_I32_F32 : VOP1Inst <"v_cvt_flr_i32_f32", VOP_I32_F32, cvt_flr_i32_f32>;
139defm V_CVT_OFF_F32_I4 : VOP1Inst <"v_cvt_off_f32_i4", VOP_F32_I32>;
140defm V_CVT_F32_F64 : VOP1Inst <"v_cvt_f32_f64", VOP_F32_F64, fpround>;
141defm V_CVT_F64_F32 : VOP1Inst <"v_cvt_f64_f32", VOP_F64_F32, fpextend>;
142defm V_CVT_F32_UBYTE0 : VOP1Inst <"v_cvt_f32_ubyte0", VOP_F32_I32, AMDGPUcvt_f32_ubyte0>;
143defm V_CVT_F32_UBYTE1 : VOP1Inst <"v_cvt_f32_ubyte1", VOP_F32_I32, AMDGPUcvt_f32_ubyte1>;
144defm V_CVT_F32_UBYTE2 : VOP1Inst <"v_cvt_f32_ubyte2", VOP_F32_I32, AMDGPUcvt_f32_ubyte2>;
145defm V_CVT_F32_UBYTE3 : VOP1Inst <"v_cvt_f32_ubyte3", VOP_F32_I32, AMDGPUcvt_f32_ubyte3>;
146defm V_CVT_U32_F64 : VOP1Inst <"v_cvt_u32_f64", VOP_I32_F64, fp_to_uint>;
147defm V_CVT_F64_U32 : VOP1Inst <"v_cvt_f64_u32", VOP_F64_I32, uint_to_fp>;
148} // End SchedRW = [WriteQuarterRate32]
149
150defm V_FRACT_F32 : VOP1Inst <"v_fract_f32", VOP_F32_F32, AMDGPUfract>;
151defm V_TRUNC_F32 : VOP1Inst <"v_trunc_f32", VOP_F32_F32, ftrunc>;
152defm V_CEIL_F32 : VOP1Inst <"v_ceil_f32", VOP_F32_F32, fceil>;
153defm V_RNDNE_F32 : VOP1Inst <"v_rndne_f32", VOP_F32_F32, frint>;
154defm V_FLOOR_F32 : VOP1Inst <"v_floor_f32", VOP_F32_F32, ffloor>;
155defm V_EXP_F32 : VOP1Inst <"v_exp_f32", VOP_F32_F32, fexp2>;
156
157let SchedRW = [WriteQuarterRate32] in {
158defm V_LOG_F32 : VOP1Inst <"v_log_f32", VOP_F32_F32, flog2>;
159defm V_RCP_F32 : VOP1Inst <"v_rcp_f32", VOP_F32_F32, AMDGPUrcp>;
160defm V_RCP_IFLAG_F32 : VOP1Inst <"v_rcp_iflag_f32", VOP_F32_F32>;
161defm V_RSQ_F32 : VOP1Inst <"v_rsq_f32", VOP_F32_F32, AMDGPUrsq>;
162} // End SchedRW = [WriteQuarterRate32]
163
164let SchedRW = [WriteDouble] in {
165defm V_RCP_F64 : VOP1Inst <"v_rcp_f64", VOP_F64_F64, AMDGPUrcp>;
166defm V_RSQ_F64 : VOP1Inst <"v_rsq_f64", VOP_F64_F64, AMDGPUrsq>;
167} // End SchedRW = [WriteDouble];
168
169defm V_SQRT_F32 : VOP1Inst <"v_sqrt_f32", VOP_F32_F32, fsqrt>;
170
171let SchedRW = [WriteDouble] in {
172defm V_SQRT_F64 : VOP1Inst <"v_sqrt_f64", VOP_F64_F64, fsqrt>;
173} // End SchedRW = [WriteDouble]
174
175let SchedRW = [WriteQuarterRate32] in {
176defm V_SIN_F32 : VOP1Inst <"v_sin_f32", VOP_F32_F32, AMDGPUsin>;
177defm V_COS_F32 : VOP1Inst <"v_cos_f32", VOP_F32_F32, AMDGPUcos>;
178} // End SchedRW = [WriteQuarterRate32]
179
180defm V_NOT_B32 : VOP1Inst <"v_not_b32", VOP_I32_I32>;
181defm V_BFREV_B32 : VOP1Inst <"v_bfrev_b32", VOP_I32_I32>;
182defm V_FFBH_U32 : VOP1Inst <"v_ffbh_u32", VOP_I32_I32>;
183defm V_FFBL_B32 : VOP1Inst <"v_ffbl_b32", VOP_I32_I32>;
184defm V_FFBH_I32 : VOP1Inst <"v_ffbh_i32", VOP_I32_I32>;
185defm V_FREXP_EXP_I32_F64 : VOP1Inst <"v_frexp_exp_i32_f64", VOP_I32_F64, int_amdgcn_frexp_exp>;
186
187let SchedRW = [WriteDoubleAdd] in {
188defm V_FREXP_MANT_F64 : VOP1Inst <"v_frexp_mant_f64", VOP_F64_F64, int_amdgcn_frexp_mant>;
189defm V_FRACT_F64 : VOP1Inst <"v_fract_f64", VOP_F64_F64, AMDGPUfract>;
190} // End SchedRW = [WriteDoubleAdd]
191
192defm V_FREXP_EXP_I32_F32 : VOP1Inst <"v_frexp_exp_i32_f32", VOP_I32_F32, int_amdgcn_frexp_exp>;
193defm V_FREXP_MANT_F32 : VOP1Inst <"v_frexp_mant_f32", VOP_F32_F32, int_amdgcn_frexp_mant>;
194
195let VOPAsmPrefer32Bit = 1 in {
196defm V_CLREXCP : VOP1Inst <"v_clrexcp", VOP_NO_EXT<VOP_NONE>>;
197}
198
199// Restrict src0 to be VGPR
200def VOP_I32_VI32_NO_EXT : VOPProfile<[i32, i32, untyped, untyped]> {
201 let Src0RC32 = VRegSrc_32;
202 let Src0RC64 = VRegSrc_32;
203
204 let HasExt = 0;
205}
206
207// Special case because there are no true output operands. Hack vdst
208// to be a src operand. The custom inserter must add a tied implicit
209// def and use of the super register since there seems to be no way to
210// add an implicit def of a virtual register in tablegen.
211def VOP_MOVRELD : VOPProfile<[untyped, i32, untyped, untyped]> {
212 let Src0RC32 = VOPDstOperand<VGPR_32>;
213 let Src0RC64 = VOPDstOperand<VGPR_32>;
214
215 let Outs = (outs);
216 let Ins32 = (ins Src0RC32:$vdst, VSrc_b32:$src0);
217 let Ins64 = (ins Src0RC64:$vdst, VSrc_b32:$src0);
Valery Pykhtin355103f2016-09-23 09:08:07 +0000218 let InsDPP = (ins Src0RC32:$vdst, Src0RC32:$src0, dpp_ctrl:$dpp_ctrl, row_mask:$row_mask,
219 bank_mask:$bank_mask, bound_ctrl:$bound_ctrl);
220 let InsSDWA = (ins Src0RC32:$vdst, Int32InputMods:$src0_modifiers, VCSrc_b32:$src0,
221 clampmod:$clamp, dst_sel:$dst_sel, dst_unused:$dst_unused,
222 src0_sel:$src0_sel);
223
224 let Asm32 = getAsm32<1, 1>.ret;
225 let Asm64 = getAsm64<1, 1, 0>.ret;
226 let AsmDPP = getAsmDPP<1, 1, 0>.ret;
227 let AsmSDWA = getAsmSDWA<1, 1, 0>.ret;
228
229 let HasExt = 0;
230 let HasDst = 0;
231 let EmitDst = 1; // force vdst emission
232}
233
Matt Arsenaultcc88ce32016-10-12 18:00:51 +0000234let SubtargetPredicate = HasMovrel, Uses = [M0, EXEC] in {
Valery Pykhtin355103f2016-09-23 09:08:07 +0000235// v_movreld_b32 is a special case because the destination output
236 // register is really a source. It isn't actually read (but may be
237 // written), and is only to provide the base register to start
238 // indexing from. Tablegen seems to not let you define an implicit
239 // virtual register output for the super register being written into,
240 // so this must have an implicit def of the register added to it.
241defm V_MOVRELD_B32 : VOP1Inst <"v_movreld_b32", VOP_MOVRELD>;
242defm V_MOVRELS_B32 : VOP1Inst <"v_movrels_b32", VOP_I32_VI32_NO_EXT>;
243defm V_MOVRELSD_B32 : VOP1Inst <"v_movrelsd_b32", VOP_NO_EXT<VOP_I32_I32>>;
244} // End Uses = [M0, EXEC]
245
246// These instruction only exist on SI and CI
247let SubtargetPredicate = isSICI in {
248
249let SchedRW = [WriteQuarterRate32] in {
250defm V_MOV_FED_B32 : VOP1Inst <"v_mov_fed_b32", VOP_I32_I32>;
251defm V_LOG_CLAMP_F32 : VOP1Inst <"v_log_clamp_f32", VOP_F32_F32, int_amdgcn_log_clamp>;
252defm V_RCP_CLAMP_F32 : VOP1Inst <"v_rcp_clamp_f32", VOP_F32_F32>;
253defm V_RCP_LEGACY_F32 : VOP1Inst <"v_rcp_legacy_f32", VOP_F32_F32, AMDGPUrcp_legacy>;
254defm V_RSQ_CLAMP_F32 : VOP1Inst <"v_rsq_clamp_f32", VOP_F32_F32, AMDGPUrsq_clamp>;
255defm V_RSQ_LEGACY_F32 : VOP1Inst <"v_rsq_legacy_f32", VOP_F32_F32, AMDGPUrsq_legacy>;
256} // End SchedRW = [WriteQuarterRate32]
257
258let SchedRW = [WriteDouble] in {
259defm V_RCP_CLAMP_F64 : VOP1Inst <"v_rcp_clamp_f64", VOP_F64_F64>;
260defm V_RSQ_CLAMP_F64 : VOP1Inst <"v_rsq_clamp_f64", VOP_F64_F64, AMDGPUrsq_clamp>;
261} // End SchedRW = [WriteDouble]
262
263} // End SubtargetPredicate = isSICI
264
265
266let SubtargetPredicate = isCIVI in {
267
268let SchedRW = [WriteDoubleAdd] in {
269defm V_TRUNC_F64 : VOP1Inst <"v_trunc_f64", VOP_F64_F64, ftrunc>;
270defm V_CEIL_F64 : VOP1Inst <"v_ceil_f64", VOP_F64_F64, fceil>;
271defm V_FLOOR_F64 : VOP1Inst <"v_floor_f64", VOP_F64_F64, ffloor>;
272defm V_RNDNE_F64 : VOP1Inst <"v_rndne_f64", VOP_F64_F64, frint>;
273} // End SchedRW = [WriteDoubleAdd]
274
275let SchedRW = [WriteQuarterRate32] in {
276defm V_LOG_LEGACY_F32 : VOP1Inst <"v_log_legacy_f32", VOP_F32_F32>;
277defm V_EXP_LEGACY_F32 : VOP1Inst <"v_exp_legacy_f32", VOP_F32_F32>;
278} // End SchedRW = [WriteQuarterRate32]
279
280} // End SubtargetPredicate = isCIVI
281
282
283let SubtargetPredicate = isVI in {
284
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000285defm V_CVT_F16_U16 : VOP1Inst <"v_cvt_f16_u16", VOP_F16_I16, uint_to_fp>;
286defm V_CVT_F16_I16 : VOP1Inst <"v_cvt_f16_i16", VOP_F16_I16, sint_to_fp>;
287defm V_CVT_U16_F16 : VOP1Inst <"v_cvt_u16_f16", VOP_I16_F16, fp_to_uint>;
288defm V_CVT_I16_F16 : VOP1Inst <"v_cvt_i16_f16", VOP_I16_F16, fp_to_sint>;
289defm V_RCP_F16 : VOP1Inst <"v_rcp_f16", VOP_F16_F16, AMDGPUrcp>;
290defm V_SQRT_F16 : VOP1Inst <"v_sqrt_f16", VOP_F16_F16, fsqrt>;
291defm V_RSQ_F16 : VOP1Inst <"v_rsq_f16", VOP_F16_F16, AMDGPUrsq>;
292defm V_LOG_F16 : VOP1Inst <"v_log_f16", VOP_F16_F16, flog2>;
293defm V_EXP_F16 : VOP1Inst <"v_exp_f16", VOP_F16_F16, fexp2>;
294defm V_FREXP_MANT_F16 : VOP1Inst <"v_frexp_mant_f16", VOP_F16_F16, int_amdgcn_frexp_mant>;
Konstantin Zhuravlyovaefee422016-11-18 22:31:08 +0000295defm V_FREXP_EXP_I16_F16 : VOP1Inst <"v_frexp_exp_i16_f16", VOP_I16_F16, int_amdgcn_frexp_exp>;
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000296defm V_FLOOR_F16 : VOP1Inst <"v_floor_f16", VOP_F16_F16, ffloor>;
297defm V_CEIL_F16 : VOP1Inst <"v_ceil_f16", VOP_F16_F16, fceil>;
298defm V_TRUNC_F16 : VOP1Inst <"v_trunc_f16", VOP_F16_F16, ftrunc>;
299defm V_RNDNE_F16 : VOP1Inst <"v_rndne_f16", VOP_F16_F16, frint>;
300defm V_FRACT_F16 : VOP1Inst <"v_fract_f16", VOP_F16_F16, AMDGPUfract>;
301defm V_SIN_F16 : VOP1Inst <"v_sin_f16", VOP_F16_F16, AMDGPUsin>;
302defm V_COS_F16 : VOP1Inst <"v_cos_f16", VOP_F16_F16, AMDGPUcos>;
Valery Pykhtin355103f2016-09-23 09:08:07 +0000303
304}
305
Tom Stellard115a6152016-11-10 16:02:37 +0000306let Predicates = [isVI] in {
307
308def : Pat<
309 (f32 (f16_to_fp i16:$src)),
310 (V_CVT_F32_F16_e32 $src)
311>;
312
313def : Pat<
314 (i16 (fp_to_f16 f32:$src)),
315 (V_CVT_F16_F32_e32 $src)
316>;
317
318}
319
Valery Pykhtin355103f2016-09-23 09:08:07 +0000320//===----------------------------------------------------------------------===//
321// Target
322//===----------------------------------------------------------------------===//
323
324//===----------------------------------------------------------------------===//
325// SI
326//===----------------------------------------------------------------------===//
327
328multiclass VOP1_Real_si <bits<9> op> {
329 let AssemblerPredicates = [isSICI], DecoderNamespace = "SICI" in {
330 def _e32_si :
331 VOP1_Real<!cast<VOP1_Pseudo>(NAME#"_e32"), SIEncodingFamily.SI>,
332 VOP1e<op{7-0}, !cast<VOP1_Pseudo>(NAME#"_e32").Pfl>;
333 def _e64_si :
334 VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.SI>,
335 VOP3e_si <{1, 1, op{6-0}}, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>;
336 }
337}
338
339defm V_NOP : VOP1_Real_si <0x0>;
340defm V_MOV_B32 : VOP1_Real_si <0x1>;
341defm V_CVT_I32_F64 : VOP1_Real_si <0x3>;
342defm V_CVT_F64_I32 : VOP1_Real_si <0x4>;
343defm V_CVT_F32_I32 : VOP1_Real_si <0x5>;
344defm V_CVT_F32_U32 : VOP1_Real_si <0x6>;
345defm V_CVT_U32_F32 : VOP1_Real_si <0x7>;
346defm V_CVT_I32_F32 : VOP1_Real_si <0x8>;
347defm V_MOV_FED_B32 : VOP1_Real_si <0x9>;
348defm V_CVT_F16_F32 : VOP1_Real_si <0xa>;
349defm V_CVT_F32_F16 : VOP1_Real_si <0xb>;
350defm V_CVT_RPI_I32_F32 : VOP1_Real_si <0xc>;
351defm V_CVT_FLR_I32_F32 : VOP1_Real_si <0xd>;
352defm V_CVT_OFF_F32_I4 : VOP1_Real_si <0xe>;
353defm V_CVT_F32_F64 : VOP1_Real_si <0xf>;
354defm V_CVT_F64_F32 : VOP1_Real_si <0x10>;
355defm V_CVT_F32_UBYTE0 : VOP1_Real_si <0x11>;
356defm V_CVT_F32_UBYTE1 : VOP1_Real_si <0x12>;
357defm V_CVT_F32_UBYTE2 : VOP1_Real_si <0x13>;
358defm V_CVT_F32_UBYTE3 : VOP1_Real_si <0x14>;
359defm V_CVT_U32_F64 : VOP1_Real_si <0x15>;
360defm V_CVT_F64_U32 : VOP1_Real_si <0x16>;
361defm V_FRACT_F32 : VOP1_Real_si <0x20>;
362defm V_TRUNC_F32 : VOP1_Real_si <0x21>;
363defm V_CEIL_F32 : VOP1_Real_si <0x22>;
364defm V_RNDNE_F32 : VOP1_Real_si <0x23>;
365defm V_FLOOR_F32 : VOP1_Real_si <0x24>;
366defm V_EXP_F32 : VOP1_Real_si <0x25>;
367defm V_LOG_CLAMP_F32 : VOP1_Real_si <0x26>;
368defm V_LOG_F32 : VOP1_Real_si <0x27>;
369defm V_RCP_CLAMP_F32 : VOP1_Real_si <0x28>;
370defm V_RCP_LEGACY_F32 : VOP1_Real_si <0x29>;
371defm V_RCP_F32 : VOP1_Real_si <0x2a>;
372defm V_RCP_IFLAG_F32 : VOP1_Real_si <0x2b>;
373defm V_RSQ_CLAMP_F32 : VOP1_Real_si <0x2c>;
374defm V_RSQ_LEGACY_F32 : VOP1_Real_si <0x2d>;
375defm V_RSQ_F32 : VOP1_Real_si <0x2e>;
376defm V_RCP_F64 : VOP1_Real_si <0x2f>;
377defm V_RCP_CLAMP_F64 : VOP1_Real_si <0x30>;
378defm V_RSQ_F64 : VOP1_Real_si <0x31>;
379defm V_RSQ_CLAMP_F64 : VOP1_Real_si <0x32>;
380defm V_SQRT_F32 : VOP1_Real_si <0x33>;
381defm V_SQRT_F64 : VOP1_Real_si <0x34>;
382defm V_SIN_F32 : VOP1_Real_si <0x35>;
383defm V_COS_F32 : VOP1_Real_si <0x36>;
384defm V_NOT_B32 : VOP1_Real_si <0x37>;
385defm V_BFREV_B32 : VOP1_Real_si <0x38>;
386defm V_FFBH_U32 : VOP1_Real_si <0x39>;
387defm V_FFBL_B32 : VOP1_Real_si <0x3a>;
388defm V_FFBH_I32 : VOP1_Real_si <0x3b>;
389defm V_FREXP_EXP_I32_F64 : VOP1_Real_si <0x3c>;
390defm V_FREXP_MANT_F64 : VOP1_Real_si <0x3d>;
391defm V_FRACT_F64 : VOP1_Real_si <0x3e>;
392defm V_FREXP_EXP_I32_F32 : VOP1_Real_si <0x3f>;
393defm V_FREXP_MANT_F32 : VOP1_Real_si <0x40>;
394defm V_CLREXCP : VOP1_Real_si <0x41>;
395defm V_MOVRELD_B32 : VOP1_Real_si <0x42>;
396defm V_MOVRELS_B32 : VOP1_Real_si <0x43>;
397defm V_MOVRELSD_B32 : VOP1_Real_si <0x44>;
398
399//===----------------------------------------------------------------------===//
400// CI
401//===----------------------------------------------------------------------===//
402
403multiclass VOP1_Real_ci <bits<9> op> {
404 let AssemblerPredicates = [isCIOnly], DecoderNamespace = "CI" in {
405 def _e32_ci :
406 VOP1_Real<!cast<VOP1_Pseudo>(NAME#"_e32"), SIEncodingFamily.SI>,
407 VOP1e<op{7-0}, !cast<VOP1_Pseudo>(NAME#"_e32").Pfl>;
408 def _e64_ci :
409 VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.SI>,
410 VOP3e_si <{1, 1, op{6-0}}, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>;
411 }
412}
413
414defm V_TRUNC_F64 : VOP1_Real_ci <0x17>;
415defm V_CEIL_F64 : VOP1_Real_ci <0x18>;
416defm V_FLOOR_F64 : VOP1_Real_ci <0x1A>;
417defm V_RNDNE_F64 : VOP1_Real_ci <0x19>;
418defm V_LOG_LEGACY_F32 : VOP1_Real_ci <0x45>;
419defm V_EXP_LEGACY_F32 : VOP1_Real_ci <0x46>;
420
421//===----------------------------------------------------------------------===//
422// VI
423//===----------------------------------------------------------------------===//
424
425class VOP1_SDWA <bits<8> op, VOP1_Pseudo ps, VOPProfile P = ps.Pfl> :
426 VOP_SDWA <ps.OpName, P> {
427 let Defs = ps.Defs;
428 let Uses = ps.Uses;
429 let SchedRW = ps.SchedRW;
430 let hasSideEffects = ps.hasSideEffects;
Sam Koltona6792a32016-12-22 11:30:48 +0000431 let Constraints = ps.Constraints;
432 let DisableEncoding = ps.DisableEncoding;
Valery Pykhtin355103f2016-09-23 09:08:07 +0000433 let AsmMatchConverter = "cvtSdwaVOP1";
434
435 bits<8> vdst;
436 let Inst{8-0} = 0xf9; // sdwa
437 let Inst{16-9} = op;
438 let Inst{24-17} = !if(P.EmitDst, vdst{7-0}, 0);
439 let Inst{31-25} = 0x3f; // encoding
440}
441
442class VOP1_DPP <bits<8> op, VOP1_Pseudo ps, VOPProfile P = ps.Pfl> :
443 VOP_DPP <ps.OpName, P> {
444 let Defs = ps.Defs;
445 let Uses = ps.Uses;
446 let SchedRW = ps.SchedRW;
447 let hasSideEffects = ps.hasSideEffects;
Sam Koltona6792a32016-12-22 11:30:48 +0000448 let Constraints = ps.Constraints;
449 let DisableEncoding = ps.DisableEncoding;
Valery Pykhtin355103f2016-09-23 09:08:07 +0000450
451 bits<8> vdst;
452 let Inst{8-0} = 0xfa; // dpp
453 let Inst{16-9} = op;
454 let Inst{24-17} = !if(P.EmitDst, vdst{7-0}, 0);
455 let Inst{31-25} = 0x3f; //encoding
456}
457
458multiclass VOP1_Real_vi <bits<10> op> {
459 let AssemblerPredicates = [isVI], DecoderNamespace = "VI" in {
460 def _e32_vi :
461 VOP1_Real<!cast<VOP1_Pseudo>(NAME#"_e32"), SIEncodingFamily.VI>,
462 VOP1e<op{7-0}, !cast<VOP1_Pseudo>(NAME#"_e32").Pfl>;
463 def _e64_vi :
464 VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>,
465 VOP3e_vi <!add(0x140, op), !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>;
466 }
467
468 // for now left sdwa/dpp only for asm/dasm
469 // TODO: add corresponding pseudo
470 def _sdwa : VOP1_SDWA<op{7-0}, !cast<VOP1_Pseudo>(NAME#"_e32")>;
471 def _dpp : VOP1_DPP<op{7-0}, !cast<VOP1_Pseudo>(NAME#"_e32")>;
472}
473
474defm V_NOP : VOP1_Real_vi <0x0>;
475defm V_MOV_B32 : VOP1_Real_vi <0x1>;
476defm V_CVT_I32_F64 : VOP1_Real_vi <0x3>;
477defm V_CVT_F64_I32 : VOP1_Real_vi <0x4>;
478defm V_CVT_F32_I32 : VOP1_Real_vi <0x5>;
479defm V_CVT_F32_U32 : VOP1_Real_vi <0x6>;
480defm V_CVT_U32_F32 : VOP1_Real_vi <0x7>;
481defm V_CVT_I32_F32 : VOP1_Real_vi <0x8>;
482defm V_CVT_F16_F32 : VOP1_Real_vi <0xa>;
483defm V_CVT_F32_F16 : VOP1_Real_vi <0xb>;
484defm V_CVT_RPI_I32_F32 : VOP1_Real_vi <0xc>;
485defm V_CVT_FLR_I32_F32 : VOP1_Real_vi <0xd>;
486defm V_CVT_OFF_F32_I4 : VOP1_Real_vi <0xe>;
487defm V_CVT_F32_F64 : VOP1_Real_vi <0xf>;
488defm V_CVT_F64_F32 : VOP1_Real_vi <0x10>;
489defm V_CVT_F32_UBYTE0 : VOP1_Real_vi <0x11>;
490defm V_CVT_F32_UBYTE1 : VOP1_Real_vi <0x12>;
491defm V_CVT_F32_UBYTE2 : VOP1_Real_vi <0x13>;
492defm V_CVT_F32_UBYTE3 : VOP1_Real_vi <0x14>;
493defm V_CVT_U32_F64 : VOP1_Real_vi <0x15>;
494defm V_CVT_F64_U32 : VOP1_Real_vi <0x16>;
495defm V_FRACT_F32 : VOP1_Real_vi <0x1b>;
496defm V_TRUNC_F32 : VOP1_Real_vi <0x1c>;
497defm V_CEIL_F32 : VOP1_Real_vi <0x1d>;
498defm V_RNDNE_F32 : VOP1_Real_vi <0x1e>;
499defm V_FLOOR_F32 : VOP1_Real_vi <0x1f>;
500defm V_EXP_F32 : VOP1_Real_vi <0x20>;
501defm V_LOG_F32 : VOP1_Real_vi <0x21>;
502defm V_RCP_F32 : VOP1_Real_vi <0x22>;
503defm V_RCP_IFLAG_F32 : VOP1_Real_vi <0x23>;
504defm V_RSQ_F32 : VOP1_Real_vi <0x24>;
505defm V_RCP_F64 : VOP1_Real_vi <0x25>;
506defm V_RSQ_F64 : VOP1_Real_vi <0x26>;
507defm V_SQRT_F32 : VOP1_Real_vi <0x27>;
508defm V_SQRT_F64 : VOP1_Real_vi <0x28>;
509defm V_SIN_F32 : VOP1_Real_vi <0x29>;
510defm V_COS_F32 : VOP1_Real_vi <0x2a>;
511defm V_NOT_B32 : VOP1_Real_vi <0x2b>;
512defm V_BFREV_B32 : VOP1_Real_vi <0x2c>;
513defm V_FFBH_U32 : VOP1_Real_vi <0x2d>;
514defm V_FFBL_B32 : VOP1_Real_vi <0x2e>;
515defm V_FFBH_I32 : VOP1_Real_vi <0x2f>;
516defm V_FREXP_EXP_I32_F64 : VOP1_Real_vi <0x30>;
517defm V_FREXP_MANT_F64 : VOP1_Real_vi <0x31>;
518defm V_FRACT_F64 : VOP1_Real_vi <0x32>;
519defm V_FREXP_EXP_I32_F32 : VOP1_Real_vi <0x33>;
520defm V_FREXP_MANT_F32 : VOP1_Real_vi <0x34>;
521defm V_CLREXCP : VOP1_Real_vi <0x35>;
522defm V_MOVRELD_B32 : VOP1_Real_vi <0x36>;
523defm V_MOVRELS_B32 : VOP1_Real_vi <0x37>;
524defm V_MOVRELSD_B32 : VOP1_Real_vi <0x38>;
525defm V_TRUNC_F64 : VOP1_Real_vi <0x17>;
526defm V_CEIL_F64 : VOP1_Real_vi <0x18>;
527defm V_FLOOR_F64 : VOP1_Real_vi <0x1A>;
528defm V_RNDNE_F64 : VOP1_Real_vi <0x19>;
529defm V_LOG_LEGACY_F32 : VOP1_Real_vi <0x4c>;
530defm V_EXP_LEGACY_F32 : VOP1_Real_vi <0x4b>;
531defm V_CVT_F16_U16 : VOP1_Real_vi <0x39>;
532defm V_CVT_F16_I16 : VOP1_Real_vi <0x3a>;
533defm V_CVT_U16_F16 : VOP1_Real_vi <0x3b>;
534defm V_CVT_I16_F16 : VOP1_Real_vi <0x3c>;
535defm V_RCP_F16 : VOP1_Real_vi <0x3d>;
536defm V_SQRT_F16 : VOP1_Real_vi <0x3e>;
537defm V_RSQ_F16 : VOP1_Real_vi <0x3f>;
538defm V_LOG_F16 : VOP1_Real_vi <0x40>;
539defm V_EXP_F16 : VOP1_Real_vi <0x41>;
540defm V_FREXP_MANT_F16 : VOP1_Real_vi <0x42>;
541defm V_FREXP_EXP_I16_F16 : VOP1_Real_vi <0x43>;
542defm V_FLOOR_F16 : VOP1_Real_vi <0x44>;
543defm V_CEIL_F16 : VOP1_Real_vi <0x45>;
544defm V_TRUNC_F16 : VOP1_Real_vi <0x46>;
545defm V_RNDNE_F16 : VOP1_Real_vi <0x47>;
546defm V_FRACT_F16 : VOP1_Real_vi <0x48>;
547defm V_SIN_F16 : VOP1_Real_vi <0x49>;
548defm V_COS_F16 : VOP1_Real_vi <0x4a>;
549
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000550
551// Copy of v_mov_b32 with $vdst as a use operand for use with VGPR
552// indexing mode. vdst can't be treated as a def for codegen purposes,
553// and an implicit use and def of the super register should be added.
554def V_MOV_B32_indirect : VPseudoInstSI<(outs),
555 (ins getVALUDstForVT<i32>.ret:$vdst, getVOPSrc0ForVT<i32>.ret:$src0)>,
556 PseudoInstExpansion<(V_MOV_B32_e32_vi getVALUDstForVT<i32>.ret:$vdst,
557 getVOPSrc0ForVT<i32>.ret:$src0)> {
558 let VOP1 = 1;
Daniel Sanders72db2a32016-11-19 13:05:44 +0000559 let SubtargetPredicate = isVI;
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000560}
561
Nicolai Haehnlea7852092016-10-24 14:56:02 +0000562// This is a pseudo variant of the v_movreld_b32 instruction in which the
563// vector operand appears only twice, once as def and once as use. Using this
564// pseudo avoids problems with the Two Address instructions pass.
565class V_MOVRELD_B32_pseudo<RegisterClass rc> : VPseudoInstSI <
566 (outs rc:$vdst),
567 (ins rc:$vsrc, VSrc_b32:$val, i32imm:$offset)> {
568 let VOP1 = 1;
569
570 let Constraints = "$vsrc = $vdst";
571 let Uses = [M0, EXEC];
572
573 let SubtargetPredicate = HasMovrel;
574}
575
576def V_MOVRELD_B32_V1 : V_MOVRELD_B32_pseudo<VGPR_32>;
577def V_MOVRELD_B32_V2 : V_MOVRELD_B32_pseudo<VReg_64>;
578def V_MOVRELD_B32_V4 : V_MOVRELD_B32_pseudo<VReg_128>;
579def V_MOVRELD_B32_V8 : V_MOVRELD_B32_pseudo<VReg_256>;
580def V_MOVRELD_B32_V16 : V_MOVRELD_B32_pseudo<VReg_512>;
581
Valery Pykhtin355103f2016-09-23 09:08:07 +0000582let Predicates = [isVI] in {
583
584def : Pat <
Tom Stellard115a6152016-11-10 16:02:37 +0000585 (i32 (int_amdgcn_mov_dpp i32:$src, imm:$dpp_ctrl, imm:$row_mask, imm:$bank_mask,
586 imm:$bound_ctrl)),
Valery Pykhtin355103f2016-09-23 09:08:07 +0000587 (V_MOV_B32_dpp $src, (as_i32imm $dpp_ctrl), (as_i32imm $row_mask),
588 (as_i32imm $bank_mask), (as_i1imm $bound_ctrl))
589>;
590
Tom Stellard115a6152016-11-10 16:02:37 +0000591
592def : Pat<
593 (i32 (anyext i16:$src)),
594 (COPY $src)
595>;
596
597def : Pat<
598 (i64 (anyext i16:$src)),
599 (REG_SEQUENCE VReg_64,
600 (i32 (COPY $src)), sub0,
601 (V_MOV_B32_e32 (i32 0)), sub1)
602>;
603
604def : Pat<
605 (i16 (trunc i32:$src)),
606 (COPY $src)
607>;
608
609def : Pat<
610 (i1 (trunc i16:$src)),
611 (COPY $src)
612>;
613
614
615def : Pat <
616 (i16 (trunc i64:$src)),
617 (EXTRACT_SUBREG $src, sub0)
618>;
619
Valery Pykhtin355103f2016-09-23 09:08:07 +0000620} // End Predicates = [isVI]