Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame^] | 1 | //===-- VOP1Instructions.td - Vector Instruction Defintions ---------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | |
| 10 | //===----------------------------------------------------------------------===// |
| 11 | // VOP1 Classes |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | class VOP1e <bits<8> op, VOPProfile P> : Enc32 { |
| 15 | bits<8> vdst; |
| 16 | bits<9> src0; |
| 17 | |
| 18 | let Inst{8-0} = !if(P.HasSrc0, src0{8-0}, 0); |
| 19 | let Inst{16-9} = op; |
| 20 | let Inst{24-17} = !if(P.EmitDst, vdst{7-0}, 0); |
| 21 | let Inst{31-25} = 0x3f; //encoding |
| 22 | } |
| 23 | |
| 24 | class VOP1_Pseudo <string opName, VOPProfile P, list<dag> pattern=[]> : |
| 25 | InstSI <P.Outs32, P.Ins32, "", pattern>, |
| 26 | VOP <opName>, |
| 27 | SIMCInstr <opName#"_e32", SIEncodingFamily.NONE>, |
| 28 | MnemonicAlias<opName#"_e32", opName> { |
| 29 | |
| 30 | let isPseudo = 1; |
| 31 | let isCodeGenOnly = 1; |
| 32 | let UseNamedOperandTable = 1; |
| 33 | |
| 34 | string Mnemonic = opName; |
| 35 | string AsmOperands = P.Asm32; |
| 36 | |
| 37 | let Size = 4; |
| 38 | let mayLoad = 0; |
| 39 | let mayStore = 0; |
| 40 | let hasSideEffects = 0; |
| 41 | let SubtargetPredicate = isGCN; |
| 42 | |
| 43 | let VOP1 = 1; |
| 44 | let VALU = 1; |
| 45 | let Uses = [EXEC]; |
| 46 | |
| 47 | let AsmVariantName = AMDGPUAsmVariants.Default; |
| 48 | |
| 49 | VOPProfile Pfl = P; |
| 50 | } |
| 51 | |
| 52 | class VOP1_Real <VOP1_Pseudo ps, int EncodingFamily> : |
| 53 | InstSI <ps.OutOperandList, ps.InOperandList, ps.Mnemonic # ps.AsmOperands, []>, |
| 54 | SIMCInstr <ps.PseudoInstr, EncodingFamily> { |
| 55 | |
| 56 | let isPseudo = 0; |
| 57 | let isCodeGenOnly = 0; |
| 58 | |
| 59 | // copy relevant pseudo op flags |
| 60 | let SubtargetPredicate = ps.SubtargetPredicate; |
| 61 | let AsmMatchConverter = ps.AsmMatchConverter; |
| 62 | let AsmVariantName = ps.AsmVariantName; |
| 63 | let Constraints = ps.Constraints; |
| 64 | let DisableEncoding = ps.DisableEncoding; |
| 65 | let TSFlags = ps.TSFlags; |
| 66 | } |
| 67 | |
| 68 | class getVOP1Pat64 <SDPatternOperator node, VOPProfile P> : LetDummies { |
| 69 | list<dag> ret = !if(P.HasModifiers, |
| 70 | [(set P.DstVT:$vdst, (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, |
| 71 | i32:$src0_modifiers, i1:$clamp, i32:$omod))))], |
| 72 | [(set P.DstVT:$vdst, (node P.Src0VT:$src0))]); |
| 73 | } |
| 74 | |
| 75 | multiclass VOP1Inst <string opName, VOPProfile P, |
| 76 | SDPatternOperator node = null_frag> { |
| 77 | def _e32 : VOP1_Pseudo <opName, P>; |
| 78 | def _e64 : VOP3_Pseudo <opName, P, getVOP1Pat64<node, P>.ret>; |
| 79 | } |
| 80 | |
| 81 | //===----------------------------------------------------------------------===// |
| 82 | // VOP1 Instructions |
| 83 | //===----------------------------------------------------------------------===// |
| 84 | |
| 85 | let VOPAsmPrefer32Bit = 1 in { |
| 86 | defm V_NOP : VOP1Inst <"v_nop", VOP_NONE>; |
| 87 | } |
| 88 | |
| 89 | let isMoveImm = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in { |
| 90 | defm V_MOV_B32 : VOP1Inst <"v_mov_b32", VOP_I32_I32>; |
| 91 | } // End isMoveImm = 1 |
| 92 | |
| 93 | // FIXME: Specify SchedRW for READFIRSTLANE_B32 |
| 94 | // TODO: Make profile for this, there is VOP3 encoding also |
| 95 | def V_READFIRSTLANE_B32 : |
| 96 | InstSI <(outs SReg_32:$vdst), |
| 97 | (ins VGPR_32:$src0), |
| 98 | "v_readfirstlane_b32 $vdst, $src0", |
| 99 | [(set i32:$vdst, (int_amdgcn_readfirstlane i32:$src0))]>, |
| 100 | Enc32 { |
| 101 | |
| 102 | let isCodeGenOnly = 0; |
| 103 | let UseNamedOperandTable = 1; |
| 104 | |
| 105 | let Size = 4; |
| 106 | let mayLoad = 0; |
| 107 | let mayStore = 0; |
| 108 | let hasSideEffects = 0; |
| 109 | let SubtargetPredicate = isGCN; |
| 110 | |
| 111 | let VOP1 = 1; |
| 112 | let VALU = 1; |
| 113 | let Uses = [EXEC]; |
| 114 | let isConvergent = 1; |
| 115 | |
| 116 | bits<8> vdst; |
| 117 | bits<9> src0; |
| 118 | |
| 119 | let Inst{8-0} = src0; |
| 120 | let Inst{16-9} = 0x2; |
| 121 | let Inst{24-17} = vdst; |
| 122 | let Inst{31-25} = 0x3f; //encoding |
| 123 | } |
| 124 | |
| 125 | let SchedRW = [WriteQuarterRate32] in { |
| 126 | defm V_CVT_I32_F64 : VOP1Inst <"v_cvt_i32_f64", VOP_I32_F64, fp_to_sint>; |
| 127 | defm V_CVT_F64_I32 : VOP1Inst <"v_cvt_f64_i32", VOP_F64_I32, sint_to_fp>; |
| 128 | defm V_CVT_F32_I32 : VOP1Inst <"v_cvt_f32_i32", VOP_F32_I32, sint_to_fp>; |
| 129 | defm V_CVT_F32_U32 : VOP1Inst <"v_cvt_f32_u32", VOP_F32_I32, uint_to_fp>; |
| 130 | defm V_CVT_U32_F32 : VOP1Inst <"v_cvt_u32_f32", VOP_I32_F32, fp_to_uint>; |
| 131 | defm V_CVT_I32_F32 : VOP1Inst <"v_cvt_i32_f32", VOP_I32_F32, fp_to_sint>; |
| 132 | defm V_CVT_F16_F32 : VOP1Inst <"v_cvt_f16_f32", VOP_I32_F32, fp_to_f16>; |
| 133 | defm V_CVT_F32_F16 : VOP1Inst <"v_cvt_f32_f16", VOP_F32_I32, f16_to_fp>; |
| 134 | defm V_CVT_RPI_I32_F32 : VOP1Inst <"v_cvt_rpi_i32_f32", VOP_I32_F32, cvt_rpi_i32_f32>; |
| 135 | defm V_CVT_FLR_I32_F32 : VOP1Inst <"v_cvt_flr_i32_f32", VOP_I32_F32, cvt_flr_i32_f32>; |
| 136 | defm V_CVT_OFF_F32_I4 : VOP1Inst <"v_cvt_off_f32_i4", VOP_F32_I32>; |
| 137 | defm V_CVT_F32_F64 : VOP1Inst <"v_cvt_f32_f64", VOP_F32_F64, fpround>; |
| 138 | defm V_CVT_F64_F32 : VOP1Inst <"v_cvt_f64_f32", VOP_F64_F32, fpextend>; |
| 139 | defm V_CVT_F32_UBYTE0 : VOP1Inst <"v_cvt_f32_ubyte0", VOP_F32_I32, AMDGPUcvt_f32_ubyte0>; |
| 140 | defm V_CVT_F32_UBYTE1 : VOP1Inst <"v_cvt_f32_ubyte1", VOP_F32_I32, AMDGPUcvt_f32_ubyte1>; |
| 141 | defm V_CVT_F32_UBYTE2 : VOP1Inst <"v_cvt_f32_ubyte2", VOP_F32_I32, AMDGPUcvt_f32_ubyte2>; |
| 142 | defm V_CVT_F32_UBYTE3 : VOP1Inst <"v_cvt_f32_ubyte3", VOP_F32_I32, AMDGPUcvt_f32_ubyte3>; |
| 143 | defm V_CVT_U32_F64 : VOP1Inst <"v_cvt_u32_f64", VOP_I32_F64, fp_to_uint>; |
| 144 | defm V_CVT_F64_U32 : VOP1Inst <"v_cvt_f64_u32", VOP_F64_I32, uint_to_fp>; |
| 145 | } // End SchedRW = [WriteQuarterRate32] |
| 146 | |
| 147 | defm V_FRACT_F32 : VOP1Inst <"v_fract_f32", VOP_F32_F32, AMDGPUfract>; |
| 148 | defm V_TRUNC_F32 : VOP1Inst <"v_trunc_f32", VOP_F32_F32, ftrunc>; |
| 149 | defm V_CEIL_F32 : VOP1Inst <"v_ceil_f32", VOP_F32_F32, fceil>; |
| 150 | defm V_RNDNE_F32 : VOP1Inst <"v_rndne_f32", VOP_F32_F32, frint>; |
| 151 | defm V_FLOOR_F32 : VOP1Inst <"v_floor_f32", VOP_F32_F32, ffloor>; |
| 152 | defm V_EXP_F32 : VOP1Inst <"v_exp_f32", VOP_F32_F32, fexp2>; |
| 153 | |
| 154 | let SchedRW = [WriteQuarterRate32] in { |
| 155 | defm V_LOG_F32 : VOP1Inst <"v_log_f32", VOP_F32_F32, flog2>; |
| 156 | defm V_RCP_F32 : VOP1Inst <"v_rcp_f32", VOP_F32_F32, AMDGPUrcp>; |
| 157 | defm V_RCP_IFLAG_F32 : VOP1Inst <"v_rcp_iflag_f32", VOP_F32_F32>; |
| 158 | defm V_RSQ_F32 : VOP1Inst <"v_rsq_f32", VOP_F32_F32, AMDGPUrsq>; |
| 159 | } // End SchedRW = [WriteQuarterRate32] |
| 160 | |
| 161 | let SchedRW = [WriteDouble] in { |
| 162 | defm V_RCP_F64 : VOP1Inst <"v_rcp_f64", VOP_F64_F64, AMDGPUrcp>; |
| 163 | defm V_RSQ_F64 : VOP1Inst <"v_rsq_f64", VOP_F64_F64, AMDGPUrsq>; |
| 164 | } // End SchedRW = [WriteDouble]; |
| 165 | |
| 166 | defm V_SQRT_F32 : VOP1Inst <"v_sqrt_f32", VOP_F32_F32, fsqrt>; |
| 167 | |
| 168 | let SchedRW = [WriteDouble] in { |
| 169 | defm V_SQRT_F64 : VOP1Inst <"v_sqrt_f64", VOP_F64_F64, fsqrt>; |
| 170 | } // End SchedRW = [WriteDouble] |
| 171 | |
| 172 | let SchedRW = [WriteQuarterRate32] in { |
| 173 | defm V_SIN_F32 : VOP1Inst <"v_sin_f32", VOP_F32_F32, AMDGPUsin>; |
| 174 | defm V_COS_F32 : VOP1Inst <"v_cos_f32", VOP_F32_F32, AMDGPUcos>; |
| 175 | } // End SchedRW = [WriteQuarterRate32] |
| 176 | |
| 177 | defm V_NOT_B32 : VOP1Inst <"v_not_b32", VOP_I32_I32>; |
| 178 | defm V_BFREV_B32 : VOP1Inst <"v_bfrev_b32", VOP_I32_I32>; |
| 179 | defm V_FFBH_U32 : VOP1Inst <"v_ffbh_u32", VOP_I32_I32>; |
| 180 | defm V_FFBL_B32 : VOP1Inst <"v_ffbl_b32", VOP_I32_I32>; |
| 181 | defm V_FFBH_I32 : VOP1Inst <"v_ffbh_i32", VOP_I32_I32>; |
| 182 | defm V_FREXP_EXP_I32_F64 : VOP1Inst <"v_frexp_exp_i32_f64", VOP_I32_F64, int_amdgcn_frexp_exp>; |
| 183 | |
| 184 | let SchedRW = [WriteDoubleAdd] in { |
| 185 | defm V_FREXP_MANT_F64 : VOP1Inst <"v_frexp_mant_f64", VOP_F64_F64, int_amdgcn_frexp_mant>; |
| 186 | defm V_FRACT_F64 : VOP1Inst <"v_fract_f64", VOP_F64_F64, AMDGPUfract>; |
| 187 | } // End SchedRW = [WriteDoubleAdd] |
| 188 | |
| 189 | defm V_FREXP_EXP_I32_F32 : VOP1Inst <"v_frexp_exp_i32_f32", VOP_I32_F32, int_amdgcn_frexp_exp>; |
| 190 | defm V_FREXP_MANT_F32 : VOP1Inst <"v_frexp_mant_f32", VOP_F32_F32, int_amdgcn_frexp_mant>; |
| 191 | |
| 192 | let VOPAsmPrefer32Bit = 1 in { |
| 193 | defm V_CLREXCP : VOP1Inst <"v_clrexcp", VOP_NO_EXT<VOP_NONE>>; |
| 194 | } |
| 195 | |
| 196 | // Restrict src0 to be VGPR |
| 197 | def VOP_I32_VI32_NO_EXT : VOPProfile<[i32, i32, untyped, untyped]> { |
| 198 | let Src0RC32 = VRegSrc_32; |
| 199 | let Src0RC64 = VRegSrc_32; |
| 200 | |
| 201 | let HasExt = 0; |
| 202 | } |
| 203 | |
| 204 | // Special case because there are no true output operands. Hack vdst |
| 205 | // to be a src operand. The custom inserter must add a tied implicit |
| 206 | // def and use of the super register since there seems to be no way to |
| 207 | // add an implicit def of a virtual register in tablegen. |
| 208 | def VOP_MOVRELD : VOPProfile<[untyped, i32, untyped, untyped]> { |
| 209 | let Src0RC32 = VOPDstOperand<VGPR_32>; |
| 210 | let Src0RC64 = VOPDstOperand<VGPR_32>; |
| 211 | |
| 212 | let Outs = (outs); |
| 213 | let Ins32 = (ins Src0RC32:$vdst, VSrc_b32:$src0); |
| 214 | let Ins64 = (ins Src0RC64:$vdst, VSrc_b32:$src0); |
| 215 | |
| 216 | let InsDPP = (ins Src0RC32:$vdst, Src0RC32:$src0, dpp_ctrl:$dpp_ctrl, row_mask:$row_mask, |
| 217 | bank_mask:$bank_mask, bound_ctrl:$bound_ctrl); |
| 218 | let InsSDWA = (ins Src0RC32:$vdst, Int32InputMods:$src0_modifiers, VCSrc_b32:$src0, |
| 219 | clampmod:$clamp, dst_sel:$dst_sel, dst_unused:$dst_unused, |
| 220 | src0_sel:$src0_sel); |
| 221 | |
| 222 | let Asm32 = getAsm32<1, 1>.ret; |
| 223 | let Asm64 = getAsm64<1, 1, 0>.ret; |
| 224 | let AsmDPP = getAsmDPP<1, 1, 0>.ret; |
| 225 | let AsmSDWA = getAsmSDWA<1, 1, 0>.ret; |
| 226 | |
| 227 | let HasExt = 0; |
| 228 | let HasDst = 0; |
| 229 | let EmitDst = 1; // force vdst emission |
| 230 | } |
| 231 | |
| 232 | let Uses = [M0, EXEC] in { |
| 233 | // v_movreld_b32 is a special case because the destination output |
| 234 | // register is really a source. It isn't actually read (but may be |
| 235 | // written), and is only to provide the base register to start |
| 236 | // indexing from. Tablegen seems to not let you define an implicit |
| 237 | // virtual register output for the super register being written into, |
| 238 | // so this must have an implicit def of the register added to it. |
| 239 | defm V_MOVRELD_B32 : VOP1Inst <"v_movreld_b32", VOP_MOVRELD>; |
| 240 | defm V_MOVRELS_B32 : VOP1Inst <"v_movrels_b32", VOP_I32_VI32_NO_EXT>; |
| 241 | defm V_MOVRELSD_B32 : VOP1Inst <"v_movrelsd_b32", VOP_NO_EXT<VOP_I32_I32>>; |
| 242 | } // End Uses = [M0, EXEC] |
| 243 | |
| 244 | // These instruction only exist on SI and CI |
| 245 | let SubtargetPredicate = isSICI in { |
| 246 | |
| 247 | let SchedRW = [WriteQuarterRate32] in { |
| 248 | defm V_MOV_FED_B32 : VOP1Inst <"v_mov_fed_b32", VOP_I32_I32>; |
| 249 | defm V_LOG_CLAMP_F32 : VOP1Inst <"v_log_clamp_f32", VOP_F32_F32, int_amdgcn_log_clamp>; |
| 250 | defm V_RCP_CLAMP_F32 : VOP1Inst <"v_rcp_clamp_f32", VOP_F32_F32>; |
| 251 | defm V_RCP_LEGACY_F32 : VOP1Inst <"v_rcp_legacy_f32", VOP_F32_F32, AMDGPUrcp_legacy>; |
| 252 | defm V_RSQ_CLAMP_F32 : VOP1Inst <"v_rsq_clamp_f32", VOP_F32_F32, AMDGPUrsq_clamp>; |
| 253 | defm V_RSQ_LEGACY_F32 : VOP1Inst <"v_rsq_legacy_f32", VOP_F32_F32, AMDGPUrsq_legacy>; |
| 254 | } // End SchedRW = [WriteQuarterRate32] |
| 255 | |
| 256 | let SchedRW = [WriteDouble] in { |
| 257 | defm V_RCP_CLAMP_F64 : VOP1Inst <"v_rcp_clamp_f64", VOP_F64_F64>; |
| 258 | defm V_RSQ_CLAMP_F64 : VOP1Inst <"v_rsq_clamp_f64", VOP_F64_F64, AMDGPUrsq_clamp>; |
| 259 | } // End SchedRW = [WriteDouble] |
| 260 | |
| 261 | } // End SubtargetPredicate = isSICI |
| 262 | |
| 263 | |
| 264 | let SubtargetPredicate = isCIVI in { |
| 265 | |
| 266 | let SchedRW = [WriteDoubleAdd] in { |
| 267 | defm V_TRUNC_F64 : VOP1Inst <"v_trunc_f64", VOP_F64_F64, ftrunc>; |
| 268 | defm V_CEIL_F64 : VOP1Inst <"v_ceil_f64", VOP_F64_F64, fceil>; |
| 269 | defm V_FLOOR_F64 : VOP1Inst <"v_floor_f64", VOP_F64_F64, ffloor>; |
| 270 | defm V_RNDNE_F64 : VOP1Inst <"v_rndne_f64", VOP_F64_F64, frint>; |
| 271 | } // End SchedRW = [WriteDoubleAdd] |
| 272 | |
| 273 | let SchedRW = [WriteQuarterRate32] in { |
| 274 | defm V_LOG_LEGACY_F32 : VOP1Inst <"v_log_legacy_f32", VOP_F32_F32>; |
| 275 | defm V_EXP_LEGACY_F32 : VOP1Inst <"v_exp_legacy_f32", VOP_F32_F32>; |
| 276 | } // End SchedRW = [WriteQuarterRate32] |
| 277 | |
| 278 | } // End SubtargetPredicate = isCIVI |
| 279 | |
| 280 | |
| 281 | let SubtargetPredicate = isVI in { |
| 282 | |
| 283 | defm V_CVT_F16_U16 : VOP1Inst <"v_cvt_f16_u16", VOP_F16_I16>; |
| 284 | defm V_CVT_F16_I16 : VOP1Inst <"v_cvt_f16_i16", VOP_F16_I16>; |
| 285 | defm V_CVT_U16_F16 : VOP1Inst <"v_cvt_u16_f16", VOP_I16_F16>; |
| 286 | defm V_CVT_I16_F16 : VOP1Inst <"v_cvt_i16_f16", VOP_I16_F16>; |
| 287 | defm V_RCP_F16 : VOP1Inst <"v_rcp_f16", VOP_F16_F16>; |
| 288 | defm V_SQRT_F16 : VOP1Inst <"v_sqrt_f16", VOP_F16_F16>; |
| 289 | defm V_RSQ_F16 : VOP1Inst <"v_rsq_f16", VOP_F16_F16>; |
| 290 | defm V_LOG_F16 : VOP1Inst <"v_log_f16", VOP_F16_F16>; |
| 291 | defm V_EXP_F16 : VOP1Inst <"v_exp_f16", VOP_F16_F16>; |
| 292 | defm V_FREXP_MANT_F16 : VOP1Inst <"v_frexp_mant_f16", VOP_F16_F16>; |
| 293 | defm V_FREXP_EXP_I16_F16 : VOP1Inst <"v_frexp_exp_i16_f16", VOP_I16_F16>; |
| 294 | defm V_FLOOR_F16 : VOP1Inst <"v_floor_f16", VOP_F16_F16>; |
| 295 | defm V_CEIL_F16 : VOP1Inst <"v_ceil_f16", VOP_F16_F16>; |
| 296 | defm V_TRUNC_F16 : VOP1Inst <"v_trunc_f16", VOP_F16_F16>; |
| 297 | defm V_RNDNE_F16 : VOP1Inst <"v_rndne_f16", VOP_F16_F16>; |
| 298 | defm V_FRACT_F16 : VOP1Inst <"v_fract_f16", VOP_F16_F16>; |
| 299 | defm V_SIN_F16 : VOP1Inst <"v_sin_f16", VOP_F16_F16>; |
| 300 | defm V_COS_F16 : VOP1Inst <"v_cos_f16", VOP_F16_F16>; |
| 301 | |
| 302 | } |
| 303 | |
| 304 | //===----------------------------------------------------------------------===// |
| 305 | // Target |
| 306 | //===----------------------------------------------------------------------===// |
| 307 | |
| 308 | //===----------------------------------------------------------------------===// |
| 309 | // SI |
| 310 | //===----------------------------------------------------------------------===// |
| 311 | |
| 312 | multiclass VOP1_Real_si <bits<9> op> { |
| 313 | let AssemblerPredicates = [isSICI], DecoderNamespace = "SICI" in { |
| 314 | def _e32_si : |
| 315 | VOP1_Real<!cast<VOP1_Pseudo>(NAME#"_e32"), SIEncodingFamily.SI>, |
| 316 | VOP1e<op{7-0}, !cast<VOP1_Pseudo>(NAME#"_e32").Pfl>; |
| 317 | def _e64_si : |
| 318 | VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.SI>, |
| 319 | VOP3e_si <{1, 1, op{6-0}}, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>; |
| 320 | } |
| 321 | } |
| 322 | |
| 323 | defm V_NOP : VOP1_Real_si <0x0>; |
| 324 | defm V_MOV_B32 : VOP1_Real_si <0x1>; |
| 325 | defm V_CVT_I32_F64 : VOP1_Real_si <0x3>; |
| 326 | defm V_CVT_F64_I32 : VOP1_Real_si <0x4>; |
| 327 | defm V_CVT_F32_I32 : VOP1_Real_si <0x5>; |
| 328 | defm V_CVT_F32_U32 : VOP1_Real_si <0x6>; |
| 329 | defm V_CVT_U32_F32 : VOP1_Real_si <0x7>; |
| 330 | defm V_CVT_I32_F32 : VOP1_Real_si <0x8>; |
| 331 | defm V_MOV_FED_B32 : VOP1_Real_si <0x9>; |
| 332 | defm V_CVT_F16_F32 : VOP1_Real_si <0xa>; |
| 333 | defm V_CVT_F32_F16 : VOP1_Real_si <0xb>; |
| 334 | defm V_CVT_RPI_I32_F32 : VOP1_Real_si <0xc>; |
| 335 | defm V_CVT_FLR_I32_F32 : VOP1_Real_si <0xd>; |
| 336 | defm V_CVT_OFF_F32_I4 : VOP1_Real_si <0xe>; |
| 337 | defm V_CVT_F32_F64 : VOP1_Real_si <0xf>; |
| 338 | defm V_CVT_F64_F32 : VOP1_Real_si <0x10>; |
| 339 | defm V_CVT_F32_UBYTE0 : VOP1_Real_si <0x11>; |
| 340 | defm V_CVT_F32_UBYTE1 : VOP1_Real_si <0x12>; |
| 341 | defm V_CVT_F32_UBYTE2 : VOP1_Real_si <0x13>; |
| 342 | defm V_CVT_F32_UBYTE3 : VOP1_Real_si <0x14>; |
| 343 | defm V_CVT_U32_F64 : VOP1_Real_si <0x15>; |
| 344 | defm V_CVT_F64_U32 : VOP1_Real_si <0x16>; |
| 345 | defm V_FRACT_F32 : VOP1_Real_si <0x20>; |
| 346 | defm V_TRUNC_F32 : VOP1_Real_si <0x21>; |
| 347 | defm V_CEIL_F32 : VOP1_Real_si <0x22>; |
| 348 | defm V_RNDNE_F32 : VOP1_Real_si <0x23>; |
| 349 | defm V_FLOOR_F32 : VOP1_Real_si <0x24>; |
| 350 | defm V_EXP_F32 : VOP1_Real_si <0x25>; |
| 351 | defm V_LOG_CLAMP_F32 : VOP1_Real_si <0x26>; |
| 352 | defm V_LOG_F32 : VOP1_Real_si <0x27>; |
| 353 | defm V_RCP_CLAMP_F32 : VOP1_Real_si <0x28>; |
| 354 | defm V_RCP_LEGACY_F32 : VOP1_Real_si <0x29>; |
| 355 | defm V_RCP_F32 : VOP1_Real_si <0x2a>; |
| 356 | defm V_RCP_IFLAG_F32 : VOP1_Real_si <0x2b>; |
| 357 | defm V_RSQ_CLAMP_F32 : VOP1_Real_si <0x2c>; |
| 358 | defm V_RSQ_LEGACY_F32 : VOP1_Real_si <0x2d>; |
| 359 | defm V_RSQ_F32 : VOP1_Real_si <0x2e>; |
| 360 | defm V_RCP_F64 : VOP1_Real_si <0x2f>; |
| 361 | defm V_RCP_CLAMP_F64 : VOP1_Real_si <0x30>; |
| 362 | defm V_RSQ_F64 : VOP1_Real_si <0x31>; |
| 363 | defm V_RSQ_CLAMP_F64 : VOP1_Real_si <0x32>; |
| 364 | defm V_SQRT_F32 : VOP1_Real_si <0x33>; |
| 365 | defm V_SQRT_F64 : VOP1_Real_si <0x34>; |
| 366 | defm V_SIN_F32 : VOP1_Real_si <0x35>; |
| 367 | defm V_COS_F32 : VOP1_Real_si <0x36>; |
| 368 | defm V_NOT_B32 : VOP1_Real_si <0x37>; |
| 369 | defm V_BFREV_B32 : VOP1_Real_si <0x38>; |
| 370 | defm V_FFBH_U32 : VOP1_Real_si <0x39>; |
| 371 | defm V_FFBL_B32 : VOP1_Real_si <0x3a>; |
| 372 | defm V_FFBH_I32 : VOP1_Real_si <0x3b>; |
| 373 | defm V_FREXP_EXP_I32_F64 : VOP1_Real_si <0x3c>; |
| 374 | defm V_FREXP_MANT_F64 : VOP1_Real_si <0x3d>; |
| 375 | defm V_FRACT_F64 : VOP1_Real_si <0x3e>; |
| 376 | defm V_FREXP_EXP_I32_F32 : VOP1_Real_si <0x3f>; |
| 377 | defm V_FREXP_MANT_F32 : VOP1_Real_si <0x40>; |
| 378 | defm V_CLREXCP : VOP1_Real_si <0x41>; |
| 379 | defm V_MOVRELD_B32 : VOP1_Real_si <0x42>; |
| 380 | defm V_MOVRELS_B32 : VOP1_Real_si <0x43>; |
| 381 | defm V_MOVRELSD_B32 : VOP1_Real_si <0x44>; |
| 382 | |
| 383 | //===----------------------------------------------------------------------===// |
| 384 | // CI |
| 385 | //===----------------------------------------------------------------------===// |
| 386 | |
| 387 | multiclass VOP1_Real_ci <bits<9> op> { |
| 388 | let AssemblerPredicates = [isCIOnly], DecoderNamespace = "CI" in { |
| 389 | def _e32_ci : |
| 390 | VOP1_Real<!cast<VOP1_Pseudo>(NAME#"_e32"), SIEncodingFamily.SI>, |
| 391 | VOP1e<op{7-0}, !cast<VOP1_Pseudo>(NAME#"_e32").Pfl>; |
| 392 | def _e64_ci : |
| 393 | VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.SI>, |
| 394 | VOP3e_si <{1, 1, op{6-0}}, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>; |
| 395 | } |
| 396 | } |
| 397 | |
| 398 | defm V_TRUNC_F64 : VOP1_Real_ci <0x17>; |
| 399 | defm V_CEIL_F64 : VOP1_Real_ci <0x18>; |
| 400 | defm V_FLOOR_F64 : VOP1_Real_ci <0x1A>; |
| 401 | defm V_RNDNE_F64 : VOP1_Real_ci <0x19>; |
| 402 | defm V_LOG_LEGACY_F32 : VOP1_Real_ci <0x45>; |
| 403 | defm V_EXP_LEGACY_F32 : VOP1_Real_ci <0x46>; |
| 404 | |
| 405 | //===----------------------------------------------------------------------===// |
| 406 | // VI |
| 407 | //===----------------------------------------------------------------------===// |
| 408 | |
| 409 | class VOP1_SDWA <bits<8> op, VOP1_Pseudo ps, VOPProfile P = ps.Pfl> : |
| 410 | VOP_SDWA <ps.OpName, P> { |
| 411 | let Defs = ps.Defs; |
| 412 | let Uses = ps.Uses; |
| 413 | let SchedRW = ps.SchedRW; |
| 414 | let hasSideEffects = ps.hasSideEffects; |
| 415 | let AsmMatchConverter = "cvtSdwaVOP1"; |
| 416 | |
| 417 | bits<8> vdst; |
| 418 | let Inst{8-0} = 0xf9; // sdwa |
| 419 | let Inst{16-9} = op; |
| 420 | let Inst{24-17} = !if(P.EmitDst, vdst{7-0}, 0); |
| 421 | let Inst{31-25} = 0x3f; // encoding |
| 422 | } |
| 423 | |
| 424 | class VOP1_DPP <bits<8> op, VOP1_Pseudo ps, VOPProfile P = ps.Pfl> : |
| 425 | VOP_DPP <ps.OpName, P> { |
| 426 | let Defs = ps.Defs; |
| 427 | let Uses = ps.Uses; |
| 428 | let SchedRW = ps.SchedRW; |
| 429 | let hasSideEffects = ps.hasSideEffects; |
| 430 | |
| 431 | bits<8> vdst; |
| 432 | let Inst{8-0} = 0xfa; // dpp |
| 433 | let Inst{16-9} = op; |
| 434 | let Inst{24-17} = !if(P.EmitDst, vdst{7-0}, 0); |
| 435 | let Inst{31-25} = 0x3f; //encoding |
| 436 | } |
| 437 | |
| 438 | multiclass VOP1_Real_vi <bits<10> op> { |
| 439 | let AssemblerPredicates = [isVI], DecoderNamespace = "VI" in { |
| 440 | def _e32_vi : |
| 441 | VOP1_Real<!cast<VOP1_Pseudo>(NAME#"_e32"), SIEncodingFamily.VI>, |
| 442 | VOP1e<op{7-0}, !cast<VOP1_Pseudo>(NAME#"_e32").Pfl>; |
| 443 | def _e64_vi : |
| 444 | VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>, |
| 445 | VOP3e_vi <!add(0x140, op), !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>; |
| 446 | } |
| 447 | |
| 448 | // for now left sdwa/dpp only for asm/dasm |
| 449 | // TODO: add corresponding pseudo |
| 450 | def _sdwa : VOP1_SDWA<op{7-0}, !cast<VOP1_Pseudo>(NAME#"_e32")>; |
| 451 | def _dpp : VOP1_DPP<op{7-0}, !cast<VOP1_Pseudo>(NAME#"_e32")>; |
| 452 | } |
| 453 | |
| 454 | defm V_NOP : VOP1_Real_vi <0x0>; |
| 455 | defm V_MOV_B32 : VOP1_Real_vi <0x1>; |
| 456 | defm V_CVT_I32_F64 : VOP1_Real_vi <0x3>; |
| 457 | defm V_CVT_F64_I32 : VOP1_Real_vi <0x4>; |
| 458 | defm V_CVT_F32_I32 : VOP1_Real_vi <0x5>; |
| 459 | defm V_CVT_F32_U32 : VOP1_Real_vi <0x6>; |
| 460 | defm V_CVT_U32_F32 : VOP1_Real_vi <0x7>; |
| 461 | defm V_CVT_I32_F32 : VOP1_Real_vi <0x8>; |
| 462 | defm V_CVT_F16_F32 : VOP1_Real_vi <0xa>; |
| 463 | defm V_CVT_F32_F16 : VOP1_Real_vi <0xb>; |
| 464 | defm V_CVT_RPI_I32_F32 : VOP1_Real_vi <0xc>; |
| 465 | defm V_CVT_FLR_I32_F32 : VOP1_Real_vi <0xd>; |
| 466 | defm V_CVT_OFF_F32_I4 : VOP1_Real_vi <0xe>; |
| 467 | defm V_CVT_F32_F64 : VOP1_Real_vi <0xf>; |
| 468 | defm V_CVT_F64_F32 : VOP1_Real_vi <0x10>; |
| 469 | defm V_CVT_F32_UBYTE0 : VOP1_Real_vi <0x11>; |
| 470 | defm V_CVT_F32_UBYTE1 : VOP1_Real_vi <0x12>; |
| 471 | defm V_CVT_F32_UBYTE2 : VOP1_Real_vi <0x13>; |
| 472 | defm V_CVT_F32_UBYTE3 : VOP1_Real_vi <0x14>; |
| 473 | defm V_CVT_U32_F64 : VOP1_Real_vi <0x15>; |
| 474 | defm V_CVT_F64_U32 : VOP1_Real_vi <0x16>; |
| 475 | defm V_FRACT_F32 : VOP1_Real_vi <0x1b>; |
| 476 | defm V_TRUNC_F32 : VOP1_Real_vi <0x1c>; |
| 477 | defm V_CEIL_F32 : VOP1_Real_vi <0x1d>; |
| 478 | defm V_RNDNE_F32 : VOP1_Real_vi <0x1e>; |
| 479 | defm V_FLOOR_F32 : VOP1_Real_vi <0x1f>; |
| 480 | defm V_EXP_F32 : VOP1_Real_vi <0x20>; |
| 481 | defm V_LOG_F32 : VOP1_Real_vi <0x21>; |
| 482 | defm V_RCP_F32 : VOP1_Real_vi <0x22>; |
| 483 | defm V_RCP_IFLAG_F32 : VOP1_Real_vi <0x23>; |
| 484 | defm V_RSQ_F32 : VOP1_Real_vi <0x24>; |
| 485 | defm V_RCP_F64 : VOP1_Real_vi <0x25>; |
| 486 | defm V_RSQ_F64 : VOP1_Real_vi <0x26>; |
| 487 | defm V_SQRT_F32 : VOP1_Real_vi <0x27>; |
| 488 | defm V_SQRT_F64 : VOP1_Real_vi <0x28>; |
| 489 | defm V_SIN_F32 : VOP1_Real_vi <0x29>; |
| 490 | defm V_COS_F32 : VOP1_Real_vi <0x2a>; |
| 491 | defm V_NOT_B32 : VOP1_Real_vi <0x2b>; |
| 492 | defm V_BFREV_B32 : VOP1_Real_vi <0x2c>; |
| 493 | defm V_FFBH_U32 : VOP1_Real_vi <0x2d>; |
| 494 | defm V_FFBL_B32 : VOP1_Real_vi <0x2e>; |
| 495 | defm V_FFBH_I32 : VOP1_Real_vi <0x2f>; |
| 496 | defm V_FREXP_EXP_I32_F64 : VOP1_Real_vi <0x30>; |
| 497 | defm V_FREXP_MANT_F64 : VOP1_Real_vi <0x31>; |
| 498 | defm V_FRACT_F64 : VOP1_Real_vi <0x32>; |
| 499 | defm V_FREXP_EXP_I32_F32 : VOP1_Real_vi <0x33>; |
| 500 | defm V_FREXP_MANT_F32 : VOP1_Real_vi <0x34>; |
| 501 | defm V_CLREXCP : VOP1_Real_vi <0x35>; |
| 502 | defm V_MOVRELD_B32 : VOP1_Real_vi <0x36>; |
| 503 | defm V_MOVRELS_B32 : VOP1_Real_vi <0x37>; |
| 504 | defm V_MOVRELSD_B32 : VOP1_Real_vi <0x38>; |
| 505 | defm V_TRUNC_F64 : VOP1_Real_vi <0x17>; |
| 506 | defm V_CEIL_F64 : VOP1_Real_vi <0x18>; |
| 507 | defm V_FLOOR_F64 : VOP1_Real_vi <0x1A>; |
| 508 | defm V_RNDNE_F64 : VOP1_Real_vi <0x19>; |
| 509 | defm V_LOG_LEGACY_F32 : VOP1_Real_vi <0x4c>; |
| 510 | defm V_EXP_LEGACY_F32 : VOP1_Real_vi <0x4b>; |
| 511 | defm V_CVT_F16_U16 : VOP1_Real_vi <0x39>; |
| 512 | defm V_CVT_F16_I16 : VOP1_Real_vi <0x3a>; |
| 513 | defm V_CVT_U16_F16 : VOP1_Real_vi <0x3b>; |
| 514 | defm V_CVT_I16_F16 : VOP1_Real_vi <0x3c>; |
| 515 | defm V_RCP_F16 : VOP1_Real_vi <0x3d>; |
| 516 | defm V_SQRT_F16 : VOP1_Real_vi <0x3e>; |
| 517 | defm V_RSQ_F16 : VOP1_Real_vi <0x3f>; |
| 518 | defm V_LOG_F16 : VOP1_Real_vi <0x40>; |
| 519 | defm V_EXP_F16 : VOP1_Real_vi <0x41>; |
| 520 | defm V_FREXP_MANT_F16 : VOP1_Real_vi <0x42>; |
| 521 | defm V_FREXP_EXP_I16_F16 : VOP1_Real_vi <0x43>; |
| 522 | defm V_FLOOR_F16 : VOP1_Real_vi <0x44>; |
| 523 | defm V_CEIL_F16 : VOP1_Real_vi <0x45>; |
| 524 | defm V_TRUNC_F16 : VOP1_Real_vi <0x46>; |
| 525 | defm V_RNDNE_F16 : VOP1_Real_vi <0x47>; |
| 526 | defm V_FRACT_F16 : VOP1_Real_vi <0x48>; |
| 527 | defm V_SIN_F16 : VOP1_Real_vi <0x49>; |
| 528 | defm V_COS_F16 : VOP1_Real_vi <0x4a>; |
| 529 | |
| 530 | let Predicates = [isVI] in { |
| 531 | |
| 532 | def : Pat < |
| 533 | (int_amdgcn_mov_dpp i32:$src, imm:$dpp_ctrl, imm:$row_mask, imm:$bank_mask, |
| 534 | imm:$bound_ctrl), |
| 535 | (V_MOV_B32_dpp $src, (as_i32imm $dpp_ctrl), (as_i32imm $row_mask), |
| 536 | (as_i32imm $bank_mask), (as_i1imm $bound_ctrl)) |
| 537 | >; |
| 538 | |
| 539 | } // End Predicates = [isVI] |