Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 1 | //===-- VOP2Instructions.td - Vector Instruction Defintions ---------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | |
| 10 | //===----------------------------------------------------------------------===// |
| 11 | // VOP2 Classes |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | class VOP2e <bits<6> op, VOPProfile P> : Enc32 { |
| 15 | bits<8> vdst; |
| 16 | bits<9> src0; |
| 17 | bits<8> src1; |
| 18 | |
| 19 | let Inst{8-0} = !if(P.HasSrc0, src0, 0); |
| 20 | let Inst{16-9} = !if(P.HasSrc1, src1, 0); |
| 21 | let Inst{24-17} = !if(P.EmitDst, vdst, 0); |
| 22 | let Inst{30-25} = op; |
| 23 | let Inst{31} = 0x0; //encoding |
| 24 | } |
| 25 | |
| 26 | class VOP2_MADKe <bits<6> op, VOPProfile P> : Enc64 { |
| 27 | bits<8> vdst; |
| 28 | bits<9> src0; |
| 29 | bits<8> src1; |
| 30 | bits<32> imm; |
| 31 | |
| 32 | let Inst{8-0} = !if(P.HasSrc0, src0, 0); |
| 33 | let Inst{16-9} = !if(P.HasSrc1, src1, 0); |
| 34 | let Inst{24-17} = !if(P.EmitDst, vdst, 0); |
| 35 | let Inst{30-25} = op; |
| 36 | let Inst{31} = 0x0; // encoding |
| 37 | let Inst{63-32} = imm; |
| 38 | } |
| 39 | |
| 40 | class VOP2_Pseudo <string opName, VOPProfile P, list<dag> pattern=[], string suffix = "_e32"> : |
| 41 | InstSI <P.Outs32, P.Ins32, "", pattern>, |
| 42 | VOP <opName>, |
| 43 | SIMCInstr <opName#suffix, SIEncodingFamily.NONE>, |
| 44 | MnemonicAlias<opName#suffix, opName> { |
| 45 | |
| 46 | let isPseudo = 1; |
| 47 | let isCodeGenOnly = 1; |
| 48 | let UseNamedOperandTable = 1; |
| 49 | |
| 50 | string Mnemonic = opName; |
| 51 | string AsmOperands = P.Asm32; |
| 52 | |
| 53 | let Size = 4; |
| 54 | let mayLoad = 0; |
| 55 | let mayStore = 0; |
| 56 | let hasSideEffects = 0; |
| 57 | let SubtargetPredicate = isGCN; |
| 58 | |
| 59 | let VOP2 = 1; |
| 60 | let VALU = 1; |
| 61 | let Uses = [EXEC]; |
| 62 | |
| 63 | let AsmVariantName = AMDGPUAsmVariants.Default; |
| 64 | |
| 65 | VOPProfile Pfl = P; |
| 66 | } |
| 67 | |
| 68 | class VOP2_Real <VOP2_Pseudo ps, int EncodingFamily> : |
| 69 | InstSI <ps.OutOperandList, ps.InOperandList, ps.Mnemonic # ps.AsmOperands, []>, |
| 70 | SIMCInstr <ps.PseudoInstr, EncodingFamily> { |
| 71 | |
| 72 | let isPseudo = 0; |
| 73 | let isCodeGenOnly = 0; |
| 74 | |
| 75 | // copy relevant pseudo op flags |
| 76 | let SubtargetPredicate = ps.SubtargetPredicate; |
| 77 | let AsmMatchConverter = ps.AsmMatchConverter; |
| 78 | let AsmVariantName = ps.AsmVariantName; |
| 79 | let Constraints = ps.Constraints; |
| 80 | let DisableEncoding = ps.DisableEncoding; |
| 81 | let TSFlags = ps.TSFlags; |
| 82 | } |
| 83 | |
| 84 | class getVOP2Pat64 <SDPatternOperator node, VOPProfile P> : LetDummies { |
| 85 | list<dag> ret = !if(P.HasModifiers, |
| 86 | [(set P.DstVT:$vdst, |
| 87 | (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod)), |
| 88 | (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers))))], |
| 89 | [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1))]); |
| 90 | } |
| 91 | |
| 92 | multiclass VOP2Inst <string opName, |
| 93 | VOPProfile P, |
| 94 | SDPatternOperator node = null_frag, |
| 95 | string revOp = opName> { |
| 96 | |
| 97 | def _e32 : VOP2_Pseudo <opName, P>, |
| 98 | Commutable_REV<revOp#"_e32", !eq(revOp, opName)>; |
| 99 | |
| 100 | def _e64 : VOP3_Pseudo <opName, P, getVOP2Pat64<node, P>.ret>, |
| 101 | Commutable_REV<revOp#"_e64", !eq(revOp, opName)>; |
| 102 | } |
| 103 | |
| 104 | multiclass VOP2bInst <string opName, |
| 105 | VOPProfile P, |
| 106 | SDPatternOperator node = null_frag, |
| 107 | string revOp = opName, |
| 108 | bit useSGPRInput = !eq(P.NumSrcArgs, 3)> { |
| 109 | |
| 110 | let SchedRW = [Write32Bit, WriteSALU] in { |
| 111 | let Uses = !if(useSGPRInput, [VCC, EXEC], [EXEC]), Defs = [VCC] in { |
| 112 | def _e32 : VOP2_Pseudo <opName, P>, |
| 113 | Commutable_REV<revOp#"_e32", !eq(revOp, opName)>; |
| 114 | } |
| 115 | def _e64 : VOP3_Pseudo <opName, P, getVOP2Pat64<node, P>.ret>, |
| 116 | Commutable_REV<revOp#"_e64", !eq(revOp, opName)>; |
| 117 | } |
| 118 | } |
| 119 | |
| 120 | multiclass VOP2eInst <string opName, |
| 121 | VOPProfile P, |
| 122 | SDPatternOperator node = null_frag, |
| 123 | string revOp = opName, |
| 124 | bit useSGPRInput = !eq(P.NumSrcArgs, 3)> { |
| 125 | |
| 126 | let SchedRW = [Write32Bit] in { |
| 127 | let Uses = !if(useSGPRInput, [VCC, EXEC], [EXEC]) in { |
| 128 | def _e32 : VOP2_Pseudo <opName, P>, |
| 129 | Commutable_REV<revOp#"_e32", !eq(revOp, opName)>; |
| 130 | } |
| 131 | def _e64 : VOP3_Pseudo <opName, P, getVOP2Pat64<node, P>.ret>, |
| 132 | Commutable_REV<revOp#"_e64", !eq(revOp, opName)>; |
| 133 | } |
| 134 | } |
| 135 | |
Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 136 | class VOP_MADAK <ValueType vt> : VOPProfile <[vt, vt, vt, vt]> { |
Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 137 | field Operand ImmOpType = !if(!eq(vt.Size, 32), f32kimm, f16kimm); |
| 138 | field dag Ins32 = (ins VCSrc_f32:$src0, VGPR_32:$src1, ImmOpType:$imm); |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 139 | field string Asm32 = "$vdst, $src0, $src1, $imm"; |
| 140 | field bit HasExt = 0; |
| 141 | } |
| 142 | |
Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 143 | def VOP_MADAK_F16 : VOP_MADAK <f16>; |
| 144 | def VOP_MADAK_F32 : VOP_MADAK <f32>; |
| 145 | |
| 146 | class VOP_MADMK <ValueType vt> : VOPProfile <[vt, vt, vt, vt]> { |
Matt Arsenault | 4bd7236 | 2016-12-10 00:39:12 +0000 | [diff] [blame] | 147 | field Operand ImmOpType = !if(!eq(vt.Size, 32), f32kimm, f16kimm); |
| 148 | field dag Ins32 = (ins VCSrc_f32:$src0, ImmOpType:$imm, VGPR_32:$src1); |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 149 | field string Asm32 = "$vdst, $src0, $imm, $src1"; |
| 150 | field bit HasExt = 0; |
| 151 | } |
| 152 | |
Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 153 | def VOP_MADMK_F16 : VOP_MADMK <f16>; |
| 154 | def VOP_MADMK_F32 : VOP_MADMK <f32>; |
| 155 | |
| 156 | class VOP_MAC <ValueType vt> : VOPProfile <[vt, vt, vt, vt]> { |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 157 | let Ins32 = (ins Src0RC32:$src0, Src1RC32:$src1, VGPR_32:$src2); |
| 158 | let Ins64 = getIns64<Src0RC64, Src1RC64, RegisterOperand<VGPR_32>, 3, |
| 159 | HasModifiers, Src0Mod, Src1Mod, Src2Mod>.ret; |
| 160 | let InsDPP = (ins FP32InputMods:$src0_modifiers, Src0RC32:$src0, |
| 161 | FP32InputMods:$src1_modifiers, Src1RC32:$src1, |
| 162 | VGPR_32:$src2, // stub argument |
| 163 | dpp_ctrl:$dpp_ctrl, row_mask:$row_mask, |
| 164 | bank_mask:$bank_mask, bound_ctrl:$bound_ctrl); |
| 165 | let InsSDWA = (ins FP32InputMods:$src0_modifiers, Src0RC32:$src0, |
| 166 | FP32InputMods:$src1_modifiers, Src1RC32:$src1, |
| 167 | VGPR_32:$src2, // stub argument |
| 168 | clampmod:$clamp, dst_sel:$dst_sel, dst_unused:$dst_unused, |
| 169 | src0_sel:$src0_sel, src1_sel:$src1_sel); |
Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 170 | let Asm32 = getAsm32<1, 2, vt>.ret; |
| 171 | let AsmDPP = getAsmDPP<1, 2, HasModifiers, vt>.ret; |
| 172 | let AsmSDWA = getAsmSDWA<1, 2, HasModifiers, vt>.ret; |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 173 | let HasSrc2 = 0; |
| 174 | let HasSrc2Mods = 0; |
Sam Kolton | a3ec5c1 | 2016-10-07 14:46:06 +0000 | [diff] [blame] | 175 | let HasExt = 1; |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 176 | } |
| 177 | |
Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 178 | def VOP_MAC_F16 : VOP_MAC <f16> { |
| 179 | // FIXME: Move 'Asm64' definition to VOP_MAC, and use 'vt'. Currently it gives |
| 180 | // 'not a string initializer' error. |
| 181 | let Asm64 = getAsm64<1, 2, HasModifiers, f16>.ret; |
| 182 | } |
| 183 | |
| 184 | def VOP_MAC_F32 : VOP_MAC <f32> { |
| 185 | // FIXME: Move 'Asm64' definition to VOP_MAC, and use 'vt'. Currently it gives |
| 186 | // 'not a string initializer' error. |
| 187 | let Asm64 = getAsm64<1, 2, HasModifiers, f32>.ret; |
| 188 | } |
| 189 | |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 190 | // Write out to vcc or arbitrary SGPR. |
| 191 | def VOP2b_I32_I1_I32_I32 : VOPProfile<[i32, i32, i32, untyped]> { |
| 192 | let Asm32 = "$vdst, vcc, $src0, $src1"; |
| 193 | let Asm64 = "$vdst, $sdst, $src0, $src1"; |
| 194 | let Outs32 = (outs DstRC:$vdst); |
| 195 | let Outs64 = (outs DstRC:$vdst, SReg_64:$sdst); |
| 196 | } |
| 197 | |
| 198 | // Write out to vcc or arbitrary SGPR and read in from vcc or |
| 199 | // arbitrary SGPR. |
| 200 | def VOP2b_I32_I1_I32_I32_I1 : VOPProfile<[i32, i32, i32, i1]> { |
| 201 | // We use VCSrc_b32 to exclude literal constants, even though the |
| 202 | // encoding normally allows them since the implicit VCC use means |
| 203 | // using one would always violate the constant bus |
| 204 | // restriction. SGPRs are still allowed because it should |
| 205 | // technically be possible to use VCC again as src0. |
| 206 | let Src0RC32 = VCSrc_b32; |
| 207 | let Asm32 = "$vdst, vcc, $src0, $src1, vcc"; |
| 208 | let Asm64 = "$vdst, $sdst, $src0, $src1, $src2"; |
| 209 | let Outs32 = (outs DstRC:$vdst); |
| 210 | let Outs64 = (outs DstRC:$vdst, SReg_64:$sdst); |
| 211 | |
| 212 | // Suppress src2 implied by type since the 32-bit encoding uses an |
| 213 | // implicit VCC use. |
| 214 | let Ins32 = (ins Src0RC32:$src0, Src1RC32:$src1); |
| 215 | } |
| 216 | |
| 217 | // Read in from vcc or arbitrary SGPR |
| 218 | def VOP2e_I32_I32_I32_I1 : VOPProfile<[i32, i32, i32, i1]> { |
| 219 | let Src0RC32 = VCSrc_b32; // See comment in def VOP2b_I32_I1_I32_I32_I1 above. |
| 220 | let Asm32 = "$vdst, $src0, $src1, vcc"; |
| 221 | let Asm64 = "$vdst, $src0, $src1, $src2"; |
| 222 | let Outs32 = (outs DstRC:$vdst); |
| 223 | let Outs64 = (outs DstRC:$vdst); |
| 224 | |
| 225 | // Suppress src2 implied by type since the 32-bit encoding uses an |
| 226 | // implicit VCC use. |
| 227 | let Ins32 = (ins Src0RC32:$src0, Src1RC32:$src1); |
| 228 | } |
| 229 | |
| 230 | def VOP_READLANE : VOPProfile<[i32, i32, i32]> { |
| 231 | let Outs32 = (outs SReg_32:$vdst); |
| 232 | let Outs64 = Outs32; |
| 233 | let Ins32 = (ins VGPR_32:$src0, SCSrc_b32:$src1); |
| 234 | let Ins64 = Ins32; |
| 235 | let Asm32 = " $vdst, $src0, $src1"; |
| 236 | let Asm64 = Asm32; |
| 237 | } |
| 238 | |
| 239 | def VOP_WRITELANE : VOPProfile<[i32, i32, i32]> { |
| 240 | let Outs32 = (outs VGPR_32:$vdst); |
| 241 | let Outs64 = Outs32; |
| 242 | let Ins32 = (ins SReg_32:$src0, SCSrc_b32:$src1); |
| 243 | let Ins64 = Ins32; |
| 244 | let Asm32 = " $vdst, $src0, $src1"; |
| 245 | let Asm64 = Asm32; |
| 246 | } |
| 247 | |
| 248 | //===----------------------------------------------------------------------===// |
| 249 | // VOP2 Instructions |
| 250 | //===----------------------------------------------------------------------===// |
| 251 | |
| 252 | let SubtargetPredicate = isGCN in { |
| 253 | |
| 254 | defm V_CNDMASK_B32 : VOP2eInst <"v_cndmask_b32", VOP2e_I32_I32_I32_I1>; |
Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 255 | def V_MADMK_F32 : VOP2_Pseudo <"v_madmk_f32", VOP_MADMK_F32>; |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 256 | |
| 257 | let isCommutable = 1 in { |
| 258 | defm V_ADD_F32 : VOP2Inst <"v_add_f32", VOP_F32_F32_F32, fadd>; |
| 259 | defm V_SUB_F32 : VOP2Inst <"v_sub_f32", VOP_F32_F32_F32, fsub>; |
| 260 | defm V_SUBREV_F32 : VOP2Inst <"v_subrev_f32", VOP_F32_F32_F32, null_frag, "v_sub_f32">; |
| 261 | defm V_MUL_LEGACY_F32 : VOP2Inst <"v_mul_legacy_f32", VOP_F32_F32_F32, AMDGPUfmul_legacy>; |
| 262 | defm V_MUL_F32 : VOP2Inst <"v_mul_f32", VOP_F32_F32_F32, fmul>; |
| 263 | defm V_MUL_I32_I24 : VOP2Inst <"v_mul_i32_i24", VOP_I32_I32_I32, AMDGPUmul_i24>; |
| 264 | defm V_MUL_HI_I32_I24 : VOP2Inst <"v_mul_hi_i32_i24", VOP_I32_I32_I32, AMDGPUmulhi_i24>; |
| 265 | defm V_MUL_U32_U24 : VOP2Inst <"v_mul_u32_u24", VOP_I32_I32_I32, AMDGPUmul_u24>; |
| 266 | defm V_MUL_HI_U32_U24 : VOP2Inst <"v_mul_hi_u32_u24", VOP_I32_I32_I32, AMDGPUmulhi_u24>; |
| 267 | defm V_MIN_F32 : VOP2Inst <"v_min_f32", VOP_F32_F32_F32, fminnum>; |
| 268 | defm V_MAX_F32 : VOP2Inst <"v_max_f32", VOP_F32_F32_F32, fmaxnum>; |
| 269 | defm V_MIN_I32 : VOP2Inst <"v_min_i32", VOP_I32_I32_I32>; |
| 270 | defm V_MAX_I32 : VOP2Inst <"v_max_i32", VOP_I32_I32_I32>; |
| 271 | defm V_MIN_U32 : VOP2Inst <"v_min_u32", VOP_I32_I32_I32>; |
| 272 | defm V_MAX_U32 : VOP2Inst <"v_max_u32", VOP_I32_I32_I32>; |
| 273 | defm V_LSHRREV_B32 : VOP2Inst <"v_lshrrev_b32", VOP_I32_I32_I32, null_frag, "v_lshr_b32">; |
| 274 | defm V_ASHRREV_I32 : VOP2Inst <"v_ashrrev_i32", VOP_I32_I32_I32, null_frag, "v_ashr_i32">; |
| 275 | defm V_LSHLREV_B32 : VOP2Inst <"v_lshlrev_b32", VOP_I32_I32_I32, null_frag, "v_lshl_b32">; |
| 276 | defm V_AND_B32 : VOP2Inst <"v_and_b32", VOP_I32_I32_I32>; |
| 277 | defm V_OR_B32 : VOP2Inst <"v_or_b32", VOP_I32_I32_I32>; |
| 278 | defm V_XOR_B32 : VOP2Inst <"v_xor_b32", VOP_I32_I32_I32>; |
| 279 | |
| 280 | let Constraints = "$vdst = $src2", DisableEncoding="$src2", |
| 281 | isConvertibleToThreeAddress = 1 in { |
Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 282 | defm V_MAC_F32 : VOP2Inst <"v_mac_f32", VOP_MAC_F32>; |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 283 | } |
| 284 | |
Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 285 | def V_MADAK_F32 : VOP2_Pseudo <"v_madak_f32", VOP_MADAK_F32>; |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 286 | |
| 287 | // No patterns so that the scalar instructions are always selected. |
| 288 | // The scalar versions will be replaced with vector when needed later. |
| 289 | |
| 290 | // V_ADD_I32, V_SUB_I32, and V_SUBREV_I32 where renamed to *_U32 in VI, |
| 291 | // but the VI instructions behave the same as the SI versions. |
| 292 | defm V_ADD_I32 : VOP2bInst <"v_add_i32", VOP2b_I32_I1_I32_I32>; |
| 293 | defm V_SUB_I32 : VOP2bInst <"v_sub_i32", VOP2b_I32_I1_I32_I32>; |
| 294 | defm V_SUBREV_I32 : VOP2bInst <"v_subrev_i32", VOP2b_I32_I1_I32_I32, null_frag, "v_sub_i32">; |
| 295 | defm V_ADDC_U32 : VOP2bInst <"v_addc_u32", VOP2b_I32_I1_I32_I32_I1>; |
| 296 | defm V_SUBB_U32 : VOP2bInst <"v_subb_u32", VOP2b_I32_I1_I32_I32_I1>; |
| 297 | defm V_SUBBREV_U32 : VOP2bInst <"v_subbrev_u32", VOP2b_I32_I1_I32_I32_I1, null_frag, "v_subb_u32">; |
| 298 | } // End isCommutable = 1 |
| 299 | |
| 300 | // These are special and do not read the exec mask. |
| 301 | let isConvergent = 1, Uses = []<Register> in { |
| 302 | def V_READLANE_B32 : VOP2_Pseudo<"v_readlane_b32", VOP_READLANE, |
| 303 | [(set i32:$vdst, (int_amdgcn_readlane i32:$src0, i32:$src1))], "">; |
| 304 | |
| 305 | def V_WRITELANE_B32 : VOP2_Pseudo<"v_writelane_b32", VOP_WRITELANE, [], "">; |
| 306 | } // End isConvergent = 1 |
| 307 | |
| 308 | defm V_BFM_B32 : VOP2Inst <"v_bfm_b32", VOP_I32_I32_I32>; |
| 309 | defm V_BCNT_U32_B32 : VOP2Inst <"v_bcnt_u32_b32", VOP_I32_I32_I32>; |
| 310 | defm V_MBCNT_LO_U32_B32 : VOP2Inst <"v_mbcnt_lo_u32_b32", VOP_I32_I32_I32, int_amdgcn_mbcnt_lo>; |
| 311 | defm V_MBCNT_HI_U32_B32 : VOP2Inst <"v_mbcnt_hi_u32_b32", VOP_I32_I32_I32, int_amdgcn_mbcnt_hi>; |
| 312 | defm V_LDEXP_F32 : VOP2Inst <"v_ldexp_f32", VOP_F32_F32_I32, AMDGPUldexp>; |
| 313 | defm V_CVT_PKACCUM_U8_F32 : VOP2Inst <"v_cvt_pkaccum_u8_f32", VOP_I32_F32_I32>; // TODO: set "Uses = dst" |
| 314 | defm V_CVT_PKNORM_I16_F32 : VOP2Inst <"v_cvt_pknorm_i16_f32", VOP_I32_F32_F32>; |
| 315 | defm V_CVT_PKNORM_U16_F32 : VOP2Inst <"v_cvt_pknorm_u16_f32", VOP_I32_F32_F32>; |
| 316 | defm V_CVT_PKRTZ_F16_F32 : VOP2Inst <"v_cvt_pkrtz_f16_f32", VOP_I32_F32_F32, int_SI_packf16>; |
| 317 | defm V_CVT_PK_U16_U32 : VOP2Inst <"v_cvt_pk_u16_u32", VOP_I32_I32_I32>; |
| 318 | defm V_CVT_PK_I16_I32 : VOP2Inst <"v_cvt_pk_i16_i32", VOP_I32_I32_I32>; |
| 319 | |
| 320 | } // End SubtargetPredicate = isGCN |
| 321 | |
| 322 | |
| 323 | // These instructions only exist on SI and CI |
| 324 | let SubtargetPredicate = isSICI in { |
| 325 | |
| 326 | defm V_MIN_LEGACY_F32 : VOP2Inst <"v_min_legacy_f32", VOP_F32_F32_F32, AMDGPUfmin_legacy>; |
| 327 | defm V_MAX_LEGACY_F32 : VOP2Inst <"v_max_legacy_f32", VOP_F32_F32_F32, AMDGPUfmax_legacy>; |
| 328 | |
| 329 | let isCommutable = 1 in { |
| 330 | defm V_MAC_LEGACY_F32 : VOP2Inst <"v_mac_legacy_f32", VOP_F32_F32_F32>; |
| 331 | defm V_LSHR_B32 : VOP2Inst <"v_lshr_b32", VOP_I32_I32_I32>; |
| 332 | defm V_ASHR_I32 : VOP2Inst <"v_ashr_i32", VOP_I32_I32_I32>; |
| 333 | defm V_LSHL_B32 : VOP2Inst <"v_lshl_b32", VOP_I32_I32_I32>; |
| 334 | } // End isCommutable = 1 |
| 335 | |
| 336 | } // End let SubtargetPredicate = SICI |
| 337 | |
| 338 | let SubtargetPredicate = isVI in { |
| 339 | |
Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 340 | def V_MADMK_F16 : VOP2_Pseudo <"v_madmk_f16", VOP_MADMK_F16>; |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 341 | defm V_LSHLREV_B16 : VOP2Inst <"v_lshlrev_b16", VOP_I16_I16_I16>; |
| 342 | defm V_LSHRREV_B16 : VOP2Inst <"v_lshrrev_b16", VOP_I16_I16_I16>; |
| 343 | defm V_ASHRREV_B16 : VOP2Inst <"v_ashrrev_b16", VOP_I16_I16_I16>; |
Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 344 | defm V_LDEXP_F16 : VOP2Inst <"v_ldexp_f16", VOP_F16_F16_I32, AMDGPUldexp>; |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 345 | |
| 346 | let isCommutable = 1 in { |
Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 347 | defm V_ADD_F16 : VOP2Inst <"v_add_f16", VOP_F16_F16_F16, fadd>; |
| 348 | defm V_SUB_F16 : VOP2Inst <"v_sub_f16", VOP_F16_F16_F16, fsub>; |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 349 | defm V_SUBREV_F16 : VOP2Inst <"v_subrev_f16", VOP_F16_F16_F16, null_frag, "v_sub_f16">; |
Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 350 | defm V_MUL_F16 : VOP2Inst <"v_mul_f16", VOP_F16_F16_F16, fmul>; |
| 351 | def V_MADAK_F16 : VOP2_Pseudo <"v_madak_f16", VOP_MADAK_F16>; |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 352 | defm V_ADD_U16 : VOP2Inst <"v_add_u16", VOP_I16_I16_I16>; |
| 353 | defm V_SUB_U16 : VOP2Inst <"v_sub_u16" , VOP_I16_I16_I16>; |
Matt Arsenault | 6c06a6f | 2016-12-08 19:52:38 +0000 | [diff] [blame] | 354 | defm V_SUBREV_U16 : VOP2Inst <"v_subrev_u16", VOP_I16_I16_I16, null_frag, "v_sub_u16">; |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 355 | defm V_MUL_LO_U16 : VOP2Inst <"v_mul_lo_u16", VOP_I16_I16_I16>; |
Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 356 | defm V_MAX_F16 : VOP2Inst <"v_max_f16", VOP_F16_F16_F16, fmaxnum>; |
| 357 | defm V_MIN_F16 : VOP2Inst <"v_min_f16", VOP_F16_F16_F16, fminnum>; |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 358 | defm V_MAX_U16 : VOP2Inst <"v_max_u16", VOP_I16_I16_I16>; |
| 359 | defm V_MAX_I16 : VOP2Inst <"v_max_i16", VOP_I16_I16_I16>; |
| 360 | defm V_MIN_U16 : VOP2Inst <"v_min_u16", VOP_I16_I16_I16>; |
| 361 | defm V_MIN_I16 : VOP2Inst <"v_min_i16", VOP_I16_I16_I16>; |
Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 362 | |
| 363 | let Constraints = "$vdst = $src2", DisableEncoding="$src2", |
| 364 | isConvertibleToThreeAddress = 1 in { |
| 365 | defm V_MAC_F16 : VOP2Inst <"v_mac_f16", VOP_MAC_F16>; |
| 366 | } |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 367 | } // End isCommutable = 1 |
| 368 | |
| 369 | } // End SubtargetPredicate = isVI |
| 370 | |
Tom Stellard | 115a615 | 2016-11-10 16:02:37 +0000 | [diff] [blame] | 371 | // Note: 16-bit instructions produce a 0 result in the high 16-bits. |
| 372 | multiclass Arithmetic_i16_Pats <SDPatternOperator op, Instruction inst> { |
| 373 | |
| 374 | def : Pat< |
| 375 | (op i16:$src0, i16:$src1), |
| 376 | (inst $src0, $src1) |
| 377 | >; |
| 378 | |
| 379 | def : Pat< |
| 380 | (i32 (zext (op i16:$src0, i16:$src1))), |
| 381 | (inst $src0, $src1) |
| 382 | >; |
| 383 | |
| 384 | def : Pat< |
| 385 | (i64 (zext (op i16:$src0, i16:$src1))), |
| 386 | (REG_SEQUENCE VReg_64, |
| 387 | (inst $src0, $src1), sub0, |
| 388 | (V_MOV_B32_e32 (i32 0)), sub1) |
| 389 | >; |
| 390 | |
| 391 | } |
| 392 | |
| 393 | multiclass Bits_OpsRev_i16_Pats <SDPatternOperator op, Instruction inst> { |
| 394 | |
| 395 | def : Pat< |
| 396 | (op i16:$src0, i32:$src1), |
| 397 | (inst $src1, $src0) |
| 398 | >; |
| 399 | |
| 400 | def : Pat< |
| 401 | (i32 (zext (op i16:$src0, i32:$src1))), |
| 402 | (inst $src1, $src0) |
| 403 | >; |
| 404 | |
| 405 | |
| 406 | def : Pat< |
| 407 | (i64 (zext (op i16:$src0, i32:$src1))), |
| 408 | (REG_SEQUENCE VReg_64, |
| 409 | (inst $src1, $src0), sub0, |
| 410 | (V_MOV_B32_e32 (i32 0)), sub1) |
| 411 | >; |
| 412 | } |
| 413 | |
| 414 | class ZExt_i16_i1_Pat <SDNode ext> : Pat < |
| 415 | (i16 (ext i1:$src)), |
| 416 | (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src) |
| 417 | >; |
| 418 | |
| 419 | let Predicates = [isVI] in { |
| 420 | |
Matt Arsenault | 27c0629 | 2016-12-09 06:19:12 +0000 | [diff] [blame] | 421 | defm : Arithmetic_i16_Pats<add, V_ADD_U16_e64>; |
| 422 | defm : Arithmetic_i16_Pats<mul, V_MUL_LO_U16_e64>; |
| 423 | defm : Arithmetic_i16_Pats<sub, V_SUB_U16_e64>; |
| 424 | defm : Arithmetic_i16_Pats<smin, V_MIN_I16_e64>; |
| 425 | defm : Arithmetic_i16_Pats<smax, V_MAX_I16_e64>; |
| 426 | defm : Arithmetic_i16_Pats<umin, V_MIN_U16_e64>; |
| 427 | defm : Arithmetic_i16_Pats<umax, V_MAX_U16_e64>; |
Tom Stellard | 115a615 | 2016-11-10 16:02:37 +0000 | [diff] [blame] | 428 | |
Tom Stellard | 01e65d2 | 2016-11-18 13:53:34 +0000 | [diff] [blame] | 429 | def : Pat < |
| 430 | (and i16:$src0, i16:$src1), |
Matt Arsenault | 27c0629 | 2016-12-09 06:19:12 +0000 | [diff] [blame] | 431 | (V_AND_B32_e64 $src0, $src1) |
Tom Stellard | 01e65d2 | 2016-11-18 13:53:34 +0000 | [diff] [blame] | 432 | >; |
| 433 | |
| 434 | def : Pat < |
| 435 | (or i16:$src0, i16:$src1), |
Matt Arsenault | 27c0629 | 2016-12-09 06:19:12 +0000 | [diff] [blame] | 436 | (V_OR_B32_e64 $src0, $src1) |
Tom Stellard | 01e65d2 | 2016-11-18 13:53:34 +0000 | [diff] [blame] | 437 | >; |
| 438 | |
| 439 | def : Pat < |
| 440 | (xor i16:$src0, i16:$src1), |
Matt Arsenault | 27c0629 | 2016-12-09 06:19:12 +0000 | [diff] [blame] | 441 | (V_XOR_B32_e64 $src0, $src1) |
Tom Stellard | 01e65d2 | 2016-11-18 13:53:34 +0000 | [diff] [blame] | 442 | >; |
Tom Stellard | 115a615 | 2016-11-10 16:02:37 +0000 | [diff] [blame] | 443 | |
| 444 | defm : Bits_OpsRev_i16_Pats<shl, V_LSHLREV_B16_e32>; |
| 445 | defm : Bits_OpsRev_i16_Pats<srl, V_LSHRREV_B16_e32>; |
| 446 | defm : Bits_OpsRev_i16_Pats<sra, V_ASHRREV_B16_e32>; |
| 447 | |
| 448 | def : ZExt_i16_i1_Pat<zext>; |
Tom Stellard | 115a615 | 2016-11-10 16:02:37 +0000 | [diff] [blame] | 449 | def : ZExt_i16_i1_Pat<anyext>; |
| 450 | |
Tom Stellard | d23de36 | 2016-11-15 21:25:56 +0000 | [diff] [blame] | 451 | def : Pat < |
| 452 | (i16 (sext i1:$src)), |
| 453 | (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src) |
| 454 | >; |
| 455 | |
Tom Stellard | 115a615 | 2016-11-10 16:02:37 +0000 | [diff] [blame] | 456 | } // End Predicates = [isVI] |
| 457 | |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 458 | //===----------------------------------------------------------------------===// |
| 459 | // SI |
| 460 | //===----------------------------------------------------------------------===// |
| 461 | |
| 462 | let AssemblerPredicates = [isSICI], DecoderNamespace = "SICI" in { |
| 463 | |
| 464 | multiclass VOP2_Real_si <bits<6> op> { |
| 465 | def _si : |
| 466 | VOP2_Real<!cast<VOP2_Pseudo>(NAME), SIEncodingFamily.SI>, |
| 467 | VOP2e<op{5-0}, !cast<VOP2_Pseudo>(NAME).Pfl>; |
| 468 | } |
| 469 | |
| 470 | multiclass VOP2_Real_MADK_si <bits<6> op> { |
| 471 | def _si : VOP2_Real<!cast<VOP2_Pseudo>(NAME), SIEncodingFamily.SI>, |
| 472 | VOP2_MADKe<op{5-0}, !cast<VOP2_Pseudo>(NAME).Pfl>; |
| 473 | } |
| 474 | |
| 475 | multiclass VOP2_Real_e32_si <bits<6> op> { |
| 476 | def _e32_si : |
| 477 | VOP2_Real<!cast<VOP2_Pseudo>(NAME#"_e32"), SIEncodingFamily.SI>, |
| 478 | VOP2e<op{5-0}, !cast<VOP2_Pseudo>(NAME#"_e32").Pfl>; |
| 479 | } |
| 480 | |
| 481 | multiclass VOP2_Real_e32e64_si <bits<6> op> : VOP2_Real_e32_si<op> { |
| 482 | def _e64_si : |
| 483 | VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.SI>, |
| 484 | VOP3e_si <{1, 0, 0, op{5-0}}, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>; |
| 485 | } |
| 486 | |
| 487 | multiclass VOP2be_Real_e32e64_si <bits<6> op> : VOP2_Real_e32_si<op> { |
| 488 | def _e64_si : |
| 489 | VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.SI>, |
| 490 | VOP3be_si <{1, 0, 0, op{5-0}}, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>; |
| 491 | } |
| 492 | |
| 493 | } // End AssemblerPredicates = [isSICI], DecoderNamespace = "SICI" |
| 494 | |
| 495 | defm V_CNDMASK_B32 : VOP2_Real_e32e64_si <0x0>; |
| 496 | defm V_ADD_F32 : VOP2_Real_e32e64_si <0x3>; |
| 497 | defm V_SUB_F32 : VOP2_Real_e32e64_si <0x4>; |
| 498 | defm V_SUBREV_F32 : VOP2_Real_e32e64_si <0x5>; |
| 499 | defm V_MUL_LEGACY_F32 : VOP2_Real_e32e64_si <0x7>; |
| 500 | defm V_MUL_F32 : VOP2_Real_e32e64_si <0x8>; |
| 501 | defm V_MUL_I32_I24 : VOP2_Real_e32e64_si <0x9>; |
| 502 | defm V_MUL_HI_I32_I24 : VOP2_Real_e32e64_si <0xa>; |
| 503 | defm V_MUL_U32_U24 : VOP2_Real_e32e64_si <0xb>; |
| 504 | defm V_MUL_HI_U32_U24 : VOP2_Real_e32e64_si <0xc>; |
| 505 | defm V_MIN_F32 : VOP2_Real_e32e64_si <0xf>; |
| 506 | defm V_MAX_F32 : VOP2_Real_e32e64_si <0x10>; |
| 507 | defm V_MIN_I32 : VOP2_Real_e32e64_si <0x11>; |
| 508 | defm V_MAX_I32 : VOP2_Real_e32e64_si <0x12>; |
| 509 | defm V_MIN_U32 : VOP2_Real_e32e64_si <0x13>; |
| 510 | defm V_MAX_U32 : VOP2_Real_e32e64_si <0x14>; |
| 511 | defm V_LSHRREV_B32 : VOP2_Real_e32e64_si <0x16>; |
| 512 | defm V_ASHRREV_I32 : VOP2_Real_e32e64_si <0x18>; |
| 513 | defm V_LSHLREV_B32 : VOP2_Real_e32e64_si <0x1a>; |
| 514 | defm V_AND_B32 : VOP2_Real_e32e64_si <0x1b>; |
| 515 | defm V_OR_B32 : VOP2_Real_e32e64_si <0x1c>; |
| 516 | defm V_XOR_B32 : VOP2_Real_e32e64_si <0x1d>; |
| 517 | defm V_MAC_F32 : VOP2_Real_e32e64_si <0x1f>; |
| 518 | defm V_MADMK_F32 : VOP2_Real_MADK_si <0x20>; |
| 519 | defm V_MADAK_F32 : VOP2_Real_MADK_si <0x21>; |
| 520 | defm V_ADD_I32 : VOP2be_Real_e32e64_si <0x25>; |
| 521 | defm V_SUB_I32 : VOP2be_Real_e32e64_si <0x26>; |
| 522 | defm V_SUBREV_I32 : VOP2be_Real_e32e64_si <0x27>; |
| 523 | defm V_ADDC_U32 : VOP2be_Real_e32e64_si <0x28>; |
| 524 | defm V_SUBB_U32 : VOP2be_Real_e32e64_si <0x29>; |
| 525 | defm V_SUBBREV_U32 : VOP2be_Real_e32e64_si <0x2a>; |
| 526 | |
| 527 | defm V_READLANE_B32 : VOP2_Real_si <0x01>; |
| 528 | defm V_WRITELANE_B32 : VOP2_Real_si <0x02>; |
| 529 | |
| 530 | defm V_MAC_LEGACY_F32 : VOP2_Real_e32e64_si <0x6>; |
| 531 | defm V_MIN_LEGACY_F32 : VOP2_Real_e32e64_si <0xd>; |
| 532 | defm V_MAX_LEGACY_F32 : VOP2_Real_e32e64_si <0xe>; |
| 533 | defm V_LSHR_B32 : VOP2_Real_e32e64_si <0x15>; |
| 534 | defm V_ASHR_I32 : VOP2_Real_e32e64_si <0x17>; |
| 535 | defm V_LSHL_B32 : VOP2_Real_e32e64_si <0x19>; |
| 536 | |
| 537 | defm V_BFM_B32 : VOP2_Real_e32e64_si <0x1e>; |
| 538 | defm V_BCNT_U32_B32 : VOP2_Real_e32e64_si <0x22>; |
| 539 | defm V_MBCNT_LO_U32_B32 : VOP2_Real_e32e64_si <0x23>; |
| 540 | defm V_MBCNT_HI_U32_B32 : VOP2_Real_e32e64_si <0x24>; |
| 541 | defm V_LDEXP_F32 : VOP2_Real_e32e64_si <0x2b>; |
| 542 | defm V_CVT_PKACCUM_U8_F32 : VOP2_Real_e32e64_si <0x2c>; |
| 543 | defm V_CVT_PKNORM_I16_F32 : VOP2_Real_e32e64_si <0x2d>; |
| 544 | defm V_CVT_PKNORM_U16_F32 : VOP2_Real_e32e64_si <0x2e>; |
| 545 | defm V_CVT_PKRTZ_F16_F32 : VOP2_Real_e32e64_si <0x2f>; |
| 546 | defm V_CVT_PK_U16_U32 : VOP2_Real_e32e64_si <0x30>; |
| 547 | defm V_CVT_PK_I16_I32 : VOP2_Real_e32e64_si <0x31>; |
| 548 | |
| 549 | |
| 550 | //===----------------------------------------------------------------------===// |
| 551 | // VI |
| 552 | //===----------------------------------------------------------------------===// |
| 553 | |
| 554 | class VOP2_SDWA <bits<6> op, VOP2_Pseudo ps, VOPProfile P = ps.Pfl> : |
| 555 | VOP_SDWA <ps.OpName, P> { |
| 556 | let Defs = ps.Defs; |
| 557 | let Uses = ps.Uses; |
| 558 | let SchedRW = ps.SchedRW; |
| 559 | let hasSideEffects = ps.hasSideEffects; |
| 560 | let AsmMatchConverter = "cvtSdwaVOP2"; |
| 561 | |
| 562 | bits<8> vdst; |
| 563 | bits<8> src1; |
| 564 | let Inst{8-0} = 0xf9; // sdwa |
| 565 | let Inst{16-9} = !if(P.HasSrc1, src1{7-0}, 0); |
| 566 | let Inst{24-17} = !if(P.EmitDst, vdst{7-0}, 0); |
| 567 | let Inst{30-25} = op; |
| 568 | let Inst{31} = 0x0; // encoding |
| 569 | } |
| 570 | |
| 571 | class VOP2_DPP <bits<6> op, VOP2_Pseudo ps, VOPProfile P = ps.Pfl> : |
| 572 | VOP_DPP <ps.OpName, P> { |
| 573 | let Defs = ps.Defs; |
| 574 | let Uses = ps.Uses; |
| 575 | let SchedRW = ps.SchedRW; |
| 576 | let hasSideEffects = ps.hasSideEffects; |
| 577 | |
| 578 | bits<8> vdst; |
| 579 | bits<8> src1; |
| 580 | let Inst{8-0} = 0xfa; //dpp |
| 581 | let Inst{16-9} = !if(P.HasSrc1, src1{7-0}, 0); |
| 582 | let Inst{24-17} = !if(P.EmitDst, vdst{7-0}, 0); |
| 583 | let Inst{30-25} = op; |
| 584 | let Inst{31} = 0x0; //encoding |
| 585 | } |
| 586 | |
| 587 | let AssemblerPredicates = [isVI], DecoderNamespace = "VI" in { |
| 588 | |
| 589 | multiclass VOP32_Real_vi <bits<10> op> { |
| 590 | def _vi : |
| 591 | VOP2_Real<!cast<VOP2_Pseudo>(NAME), SIEncodingFamily.VI>, |
| 592 | VOP3e_vi<op, !cast<VOP2_Pseudo>(NAME).Pfl>; |
| 593 | } |
| 594 | |
| 595 | multiclass VOP2_Real_MADK_vi <bits<6> op> { |
| 596 | def _vi : VOP2_Real<!cast<VOP2_Pseudo>(NAME), SIEncodingFamily.VI>, |
| 597 | VOP2_MADKe<op{5-0}, !cast<VOP2_Pseudo>(NAME).Pfl>; |
| 598 | } |
| 599 | |
| 600 | multiclass VOP2_Real_e32_vi <bits<6> op> { |
| 601 | def _e32_vi : |
| 602 | VOP2_Real<!cast<VOP2_Pseudo>(NAME#"_e32"), SIEncodingFamily.VI>, |
| 603 | VOP2e<op{5-0}, !cast<VOP2_Pseudo>(NAME#"_e32").Pfl>; |
| 604 | } |
| 605 | |
| 606 | multiclass VOP2_Real_e64_vi <bits<10> op> { |
| 607 | def _e64_vi : |
| 608 | VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>, |
| 609 | VOP3e_vi <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>; |
| 610 | } |
| 611 | |
| 612 | multiclass VOP2be_Real_e32e64_vi <bits<6> op> : VOP2_Real_e32_vi<op> { |
| 613 | def _e64_vi : |
| 614 | VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>, |
| 615 | VOP3be_vi <{0, 1, 0, 0, op{5-0}}, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>; |
| 616 | } |
| 617 | |
| 618 | multiclass Base_VOP2_Real_e32e64_vi <bits<6> op> : |
| 619 | VOP2_Real_e32_vi<op>, |
| 620 | VOP2_Real_e64_vi<{0, 1, 0, 0, op{5-0}}>; |
| 621 | |
| 622 | } // End AssemblerPredicates = [isVI], DecoderNamespace = "VI" |
| 623 | |
| 624 | multiclass VOP2_Real_e32e64_vi <bits<6> op> : |
| 625 | Base_VOP2_Real_e32e64_vi<op> { |
| 626 | // for now left sdwa/dpp only for asm/dasm |
| 627 | // TODO: add corresponding pseudo |
| 628 | def _sdwa : VOP2_SDWA<op, !cast<VOP2_Pseudo>(NAME#"_e32")>; |
| 629 | def _dpp : VOP2_DPP<op, !cast<VOP2_Pseudo>(NAME#"_e32")>; |
| 630 | } |
| 631 | |
| 632 | defm V_CNDMASK_B32 : Base_VOP2_Real_e32e64_vi <0x0>; |
| 633 | defm V_ADD_F32 : VOP2_Real_e32e64_vi <0x1>; |
| 634 | defm V_SUB_F32 : VOP2_Real_e32e64_vi <0x2>; |
| 635 | defm V_SUBREV_F32 : VOP2_Real_e32e64_vi <0x3>; |
| 636 | defm V_MUL_LEGACY_F32 : VOP2_Real_e32e64_vi <0x4>; |
| 637 | defm V_MUL_F32 : VOP2_Real_e32e64_vi <0x5>; |
| 638 | defm V_MUL_I32_I24 : VOP2_Real_e32e64_vi <0x6>; |
| 639 | defm V_MUL_HI_I32_I24 : VOP2_Real_e32e64_vi <0x7>; |
| 640 | defm V_MUL_U32_U24 : VOP2_Real_e32e64_vi <0x8>; |
| 641 | defm V_MUL_HI_U32_U24 : VOP2_Real_e32e64_vi <0x9>; |
| 642 | defm V_MIN_F32 : VOP2_Real_e32e64_vi <0xa>; |
| 643 | defm V_MAX_F32 : VOP2_Real_e32e64_vi <0xb>; |
| 644 | defm V_MIN_I32 : VOP2_Real_e32e64_vi <0xc>; |
| 645 | defm V_MAX_I32 : VOP2_Real_e32e64_vi <0xd>; |
| 646 | defm V_MIN_U32 : VOP2_Real_e32e64_vi <0xe>; |
| 647 | defm V_MAX_U32 : VOP2_Real_e32e64_vi <0xf>; |
| 648 | defm V_LSHRREV_B32 : VOP2_Real_e32e64_vi <0x10>; |
| 649 | defm V_ASHRREV_I32 : VOP2_Real_e32e64_vi <0x11>; |
| 650 | defm V_LSHLREV_B32 : VOP2_Real_e32e64_vi <0x12>; |
| 651 | defm V_AND_B32 : VOP2_Real_e32e64_vi <0x13>; |
| 652 | defm V_OR_B32 : VOP2_Real_e32e64_vi <0x14>; |
| 653 | defm V_XOR_B32 : VOP2_Real_e32e64_vi <0x15>; |
| 654 | defm V_MAC_F32 : VOP2_Real_e32e64_vi <0x16>; |
| 655 | defm V_MADMK_F32 : VOP2_Real_MADK_vi <0x17>; |
| 656 | defm V_MADAK_F32 : VOP2_Real_MADK_vi <0x18>; |
| 657 | defm V_ADD_I32 : VOP2be_Real_e32e64_vi <0x19>; |
| 658 | defm V_SUB_I32 : VOP2be_Real_e32e64_vi <0x1a>; |
| 659 | defm V_SUBREV_I32 : VOP2be_Real_e32e64_vi <0x1b>; |
| 660 | defm V_ADDC_U32 : VOP2be_Real_e32e64_vi <0x1c>; |
| 661 | defm V_SUBB_U32 : VOP2be_Real_e32e64_vi <0x1d>; |
| 662 | defm V_SUBBREV_U32 : VOP2be_Real_e32e64_vi <0x1e>; |
| 663 | |
| 664 | defm V_READLANE_B32 : VOP32_Real_vi <0x289>; |
| 665 | defm V_WRITELANE_B32 : VOP32_Real_vi <0x28a>; |
| 666 | |
| 667 | defm V_BFM_B32 : VOP2_Real_e64_vi <0x293>; |
| 668 | defm V_BCNT_U32_B32 : VOP2_Real_e64_vi <0x28b>; |
| 669 | defm V_MBCNT_LO_U32_B32 : VOP2_Real_e64_vi <0x28c>; |
| 670 | defm V_MBCNT_HI_U32_B32 : VOP2_Real_e64_vi <0x28d>; |
| 671 | defm V_LDEXP_F32 : VOP2_Real_e64_vi <0x288>; |
| 672 | defm V_CVT_PKACCUM_U8_F32 : VOP2_Real_e64_vi <0x1f0>; |
| 673 | defm V_CVT_PKNORM_I16_F32 : VOP2_Real_e64_vi <0x294>; |
| 674 | defm V_CVT_PKNORM_U16_F32 : VOP2_Real_e64_vi <0x295>; |
| 675 | defm V_CVT_PKRTZ_F16_F32 : VOP2_Real_e64_vi <0x296>; |
| 676 | defm V_CVT_PK_U16_U32 : VOP2_Real_e64_vi <0x297>; |
| 677 | defm V_CVT_PK_I16_I32 : VOP2_Real_e64_vi <0x298>; |
| 678 | |
| 679 | defm V_ADD_F16 : VOP2_Real_e32e64_vi <0x1f>; |
| 680 | defm V_SUB_F16 : VOP2_Real_e32e64_vi <0x20>; |
| 681 | defm V_SUBREV_F16 : VOP2_Real_e32e64_vi <0x21>; |
| 682 | defm V_MUL_F16 : VOP2_Real_e32e64_vi <0x22>; |
| 683 | defm V_MAC_F16 : VOP2_Real_e32e64_vi <0x23>; |
| 684 | defm V_MADMK_F16 : VOP2_Real_MADK_vi <0x24>; |
| 685 | defm V_MADAK_F16 : VOP2_Real_MADK_vi <0x25>; |
| 686 | defm V_ADD_U16 : VOP2_Real_e32e64_vi <0x26>; |
| 687 | defm V_SUB_U16 : VOP2_Real_e32e64_vi <0x27>; |
| 688 | defm V_SUBREV_U16 : VOP2_Real_e32e64_vi <0x28>; |
| 689 | defm V_MUL_LO_U16 : VOP2_Real_e32e64_vi <0x29>; |
| 690 | defm V_LSHLREV_B16 : VOP2_Real_e32e64_vi <0x2a>; |
| 691 | defm V_LSHRREV_B16 : VOP2_Real_e32e64_vi <0x2b>; |
| 692 | defm V_ASHRREV_B16 : VOP2_Real_e32e64_vi <0x2c>; |
| 693 | defm V_MAX_F16 : VOP2_Real_e32e64_vi <0x2d>; |
| 694 | defm V_MIN_F16 : VOP2_Real_e32e64_vi <0x2e>; |
| 695 | defm V_MAX_U16 : VOP2_Real_e32e64_vi <0x2f>; |
| 696 | defm V_MAX_I16 : VOP2_Real_e32e64_vi <0x30>; |
| 697 | defm V_MIN_U16 : VOP2_Real_e32e64_vi <0x31>; |
| 698 | defm V_MIN_I16 : VOP2_Real_e32e64_vi <0x32>; |
| 699 | defm V_LDEXP_F16 : VOP2_Real_e32e64_vi <0x33>; |
| 700 | |
| 701 | let SubtargetPredicate = isVI in { |
| 702 | |
| 703 | // Aliases to simplify matching of floating-point instructions that |
| 704 | // are VOP2 on SI and VOP3 on VI. |
| 705 | class SI2_VI3Alias <string name, Instruction inst> : InstAlias < |
| 706 | name#" $dst, $src0, $src1", |
| 707 | (inst VGPR_32:$dst, 0, VCSrc_f32:$src0, 0, VCSrc_f32:$src1, 0, 0) |
| 708 | >, PredicateControl { |
| 709 | let UseInstAsmMatchConverter = 0; |
| 710 | let AsmVariantName = AMDGPUAsmVariants.VOP3; |
| 711 | } |
| 712 | |
| 713 | def : SI2_VI3Alias <"v_ldexp_f32", V_LDEXP_F32_e64_vi>; |
| 714 | def : SI2_VI3Alias <"v_cvt_pkaccum_u8_f32", V_CVT_PKACCUM_U8_F32_e64_vi>; |
| 715 | def : SI2_VI3Alias <"v_cvt_pknorm_i16_f32", V_CVT_PKNORM_I16_F32_e64_vi>; |
| 716 | def : SI2_VI3Alias <"v_cvt_pknorm_u16_f32", V_CVT_PKNORM_U16_F32_e64_vi>; |
| 717 | def : SI2_VI3Alias <"v_cvt_pkrtz_f16_f32", V_CVT_PKRTZ_F16_F32_e64_vi>; |
| 718 | |
| 719 | } // End SubtargetPredicate = isVI |