Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 1 | //===-- VOP1Instructions.td - Vector Instruction Defintions ---------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | |
| 10 | //===----------------------------------------------------------------------===// |
| 11 | // VOP1 Classes |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | class VOP1e <bits<8> op, VOPProfile P> : Enc32 { |
| 15 | bits<8> vdst; |
| 16 | bits<9> src0; |
| 17 | |
| 18 | let Inst{8-0} = !if(P.HasSrc0, src0{8-0}, 0); |
| 19 | let Inst{16-9} = op; |
| 20 | let Inst{24-17} = !if(P.EmitDst, vdst{7-0}, 0); |
| 21 | let Inst{31-25} = 0x3f; //encoding |
| 22 | } |
| 23 | |
Sam Kolton | a568e3d | 2016-12-22 12:57:41 +0000 | [diff] [blame] | 24 | class VOP1_SDWAe <bits<8> op, VOPProfile P> : VOP_SDWAe <P> { |
| 25 | bits<8> vdst; |
Matt Arsenault | b4493e9 | 2017-02-10 02:42:31 +0000 | [diff] [blame] | 26 | |
Sam Kolton | a568e3d | 2016-12-22 12:57:41 +0000 | [diff] [blame] | 27 | let Inst{8-0} = 0xf9; // sdwa |
| 28 | let Inst{16-9} = op; |
| 29 | let Inst{24-17} = !if(P.EmitDst, vdst{7-0}, 0); |
| 30 | let Inst{31-25} = 0x3f; // encoding |
| 31 | } |
| 32 | |
Matt Arsenault | 4d263f6 | 2017-02-28 21:09:04 +0000 | [diff] [blame] | 33 | class VOP1_Pseudo <string opName, VOPProfile P, list<dag> pattern=[], bit VOP1Only = 0> : |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 34 | InstSI <P.Outs32, P.Ins32, "", pattern>, |
| 35 | VOP <opName>, |
Matt Arsenault | 4d263f6 | 2017-02-28 21:09:04 +0000 | [diff] [blame] | 36 | SIMCInstr <!if(VOP1Only, opName, opName#"_e32"), SIEncodingFamily.NONE>, |
| 37 | MnemonicAlias<!if(VOP1Only, opName, opName#"_e32"), opName> { |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 38 | |
| 39 | let isPseudo = 1; |
| 40 | let isCodeGenOnly = 1; |
| 41 | let UseNamedOperandTable = 1; |
| 42 | |
| 43 | string Mnemonic = opName; |
| 44 | string AsmOperands = P.Asm32; |
| 45 | |
| 46 | let Size = 4; |
| 47 | let mayLoad = 0; |
| 48 | let mayStore = 0; |
| 49 | let hasSideEffects = 0; |
| 50 | let SubtargetPredicate = isGCN; |
| 51 | |
| 52 | let VOP1 = 1; |
| 53 | let VALU = 1; |
| 54 | let Uses = [EXEC]; |
| 55 | |
| 56 | let AsmVariantName = AMDGPUAsmVariants.Default; |
| 57 | |
| 58 | VOPProfile Pfl = P; |
| 59 | } |
| 60 | |
| 61 | class VOP1_Real <VOP1_Pseudo ps, int EncodingFamily> : |
| 62 | InstSI <ps.OutOperandList, ps.InOperandList, ps.Mnemonic # ps.AsmOperands, []>, |
| 63 | SIMCInstr <ps.PseudoInstr, EncodingFamily> { |
| 64 | |
| 65 | let isPseudo = 0; |
| 66 | let isCodeGenOnly = 0; |
| 67 | |
Sam Kolton | a6792a3 | 2016-12-22 11:30:48 +0000 | [diff] [blame] | 68 | let Constraints = ps.Constraints; |
| 69 | let DisableEncoding = ps.DisableEncoding; |
| 70 | |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 71 | // copy relevant pseudo op flags |
| 72 | let SubtargetPredicate = ps.SubtargetPredicate; |
| 73 | let AsmMatchConverter = ps.AsmMatchConverter; |
| 74 | let AsmVariantName = ps.AsmVariantName; |
| 75 | let Constraints = ps.Constraints; |
| 76 | let DisableEncoding = ps.DisableEncoding; |
| 77 | let TSFlags = ps.TSFlags; |
Dmitry Preobrazhensky | 03880f8 | 2017-03-03 14:31:06 +0000 | [diff] [blame] | 78 | let UseNamedOperandTable = ps.UseNamedOperandTable; |
| 79 | let Uses = ps.Uses; |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 80 | } |
| 81 | |
Sam Kolton | a568e3d | 2016-12-22 12:57:41 +0000 | [diff] [blame] | 82 | class VOP1_SDWA_Pseudo <string OpName, VOPProfile P, list<dag> pattern=[]> : |
| 83 | VOP_SDWA_Pseudo <OpName, P, pattern> { |
| 84 | let AsmMatchConverter = "cvtSdwaVOP1"; |
| 85 | } |
| 86 | |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 87 | class getVOP1Pat64 <SDPatternOperator node, VOPProfile P> : LetDummies { |
| 88 | list<dag> ret = !if(P.HasModifiers, |
| 89 | [(set P.DstVT:$vdst, (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0, |
| 90 | i32:$src0_modifiers, i1:$clamp, i32:$omod))))], |
| 91 | [(set P.DstVT:$vdst, (node P.Src0VT:$src0))]); |
| 92 | } |
| 93 | |
| 94 | multiclass VOP1Inst <string opName, VOPProfile P, |
| 95 | SDPatternOperator node = null_frag> { |
| 96 | def _e32 : VOP1_Pseudo <opName, P>; |
| 97 | def _e64 : VOP3_Pseudo <opName, P, getVOP1Pat64<node, P>.ret>; |
Sam Kolton | a568e3d | 2016-12-22 12:57:41 +0000 | [diff] [blame] | 98 | def _sdwa : VOP1_SDWA_Pseudo <opName, P>; |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 99 | } |
| 100 | |
| 101 | //===----------------------------------------------------------------------===// |
| 102 | // VOP1 Instructions |
| 103 | //===----------------------------------------------------------------------===// |
| 104 | |
| 105 | let VOPAsmPrefer32Bit = 1 in { |
| 106 | defm V_NOP : VOP1Inst <"v_nop", VOP_NONE>; |
| 107 | } |
| 108 | |
| 109 | let isMoveImm = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in { |
| 110 | defm V_MOV_B32 : VOP1Inst <"v_mov_b32", VOP_I32_I32>; |
| 111 | } // End isMoveImm = 1 |
| 112 | |
| 113 | // FIXME: Specify SchedRW for READFIRSTLANE_B32 |
| 114 | // TODO: Make profile for this, there is VOP3 encoding also |
| 115 | def V_READFIRSTLANE_B32 : |
| 116 | InstSI <(outs SReg_32:$vdst), |
| 117 | (ins VGPR_32:$src0), |
| 118 | "v_readfirstlane_b32 $vdst, $src0", |
| 119 | [(set i32:$vdst, (int_amdgcn_readfirstlane i32:$src0))]>, |
| 120 | Enc32 { |
| 121 | |
| 122 | let isCodeGenOnly = 0; |
| 123 | let UseNamedOperandTable = 1; |
| 124 | |
| 125 | let Size = 4; |
| 126 | let mayLoad = 0; |
| 127 | let mayStore = 0; |
| 128 | let hasSideEffects = 0; |
| 129 | let SubtargetPredicate = isGCN; |
| 130 | |
| 131 | let VOP1 = 1; |
| 132 | let VALU = 1; |
| 133 | let Uses = [EXEC]; |
| 134 | let isConvergent = 1; |
| 135 | |
| 136 | bits<8> vdst; |
| 137 | bits<9> src0; |
| 138 | |
| 139 | let Inst{8-0} = src0; |
| 140 | let Inst{16-9} = 0x2; |
| 141 | let Inst{24-17} = vdst; |
| 142 | let Inst{31-25} = 0x3f; //encoding |
| 143 | } |
| 144 | |
| 145 | let SchedRW = [WriteQuarterRate32] in { |
| 146 | defm V_CVT_I32_F64 : VOP1Inst <"v_cvt_i32_f64", VOP_I32_F64, fp_to_sint>; |
| 147 | defm V_CVT_F64_I32 : VOP1Inst <"v_cvt_f64_i32", VOP_F64_I32, sint_to_fp>; |
| 148 | defm V_CVT_F32_I32 : VOP1Inst <"v_cvt_f32_i32", VOP_F32_I32, sint_to_fp>; |
| 149 | defm V_CVT_F32_U32 : VOP1Inst <"v_cvt_f32_u32", VOP_F32_I32, uint_to_fp>; |
| 150 | defm V_CVT_U32_F32 : VOP1Inst <"v_cvt_u32_f32", VOP_I32_F32, fp_to_uint>; |
| 151 | defm V_CVT_I32_F32 : VOP1Inst <"v_cvt_i32_f32", VOP_I32_F32, fp_to_sint>; |
Matt Arsenault | 9dba9bd | 2017-02-02 02:27:04 +0000 | [diff] [blame] | 152 | defm V_CVT_F16_F32 : VOP1Inst <"v_cvt_f16_f32", VOP_F16_F32, fpround>; |
| 153 | defm V_CVT_F32_F16 : VOP1Inst <"v_cvt_f32_f16", VOP_F32_F16, fpextend>; |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 154 | defm V_CVT_RPI_I32_F32 : VOP1Inst <"v_cvt_rpi_i32_f32", VOP_I32_F32, cvt_rpi_i32_f32>; |
| 155 | defm V_CVT_FLR_I32_F32 : VOP1Inst <"v_cvt_flr_i32_f32", VOP_I32_F32, cvt_flr_i32_f32>; |
| 156 | defm V_CVT_OFF_F32_I4 : VOP1Inst <"v_cvt_off_f32_i4", VOP_F32_I32>; |
| 157 | defm V_CVT_F32_F64 : VOP1Inst <"v_cvt_f32_f64", VOP_F32_F64, fpround>; |
| 158 | defm V_CVT_F64_F32 : VOP1Inst <"v_cvt_f64_f32", VOP_F64_F32, fpextend>; |
| 159 | defm V_CVT_F32_UBYTE0 : VOP1Inst <"v_cvt_f32_ubyte0", VOP_F32_I32, AMDGPUcvt_f32_ubyte0>; |
| 160 | defm V_CVT_F32_UBYTE1 : VOP1Inst <"v_cvt_f32_ubyte1", VOP_F32_I32, AMDGPUcvt_f32_ubyte1>; |
| 161 | defm V_CVT_F32_UBYTE2 : VOP1Inst <"v_cvt_f32_ubyte2", VOP_F32_I32, AMDGPUcvt_f32_ubyte2>; |
| 162 | defm V_CVT_F32_UBYTE3 : VOP1Inst <"v_cvt_f32_ubyte3", VOP_F32_I32, AMDGPUcvt_f32_ubyte3>; |
| 163 | defm V_CVT_U32_F64 : VOP1Inst <"v_cvt_u32_f64", VOP_I32_F64, fp_to_uint>; |
| 164 | defm V_CVT_F64_U32 : VOP1Inst <"v_cvt_f64_u32", VOP_F64_I32, uint_to_fp>; |
| 165 | } // End SchedRW = [WriteQuarterRate32] |
| 166 | |
| 167 | defm V_FRACT_F32 : VOP1Inst <"v_fract_f32", VOP_F32_F32, AMDGPUfract>; |
| 168 | defm V_TRUNC_F32 : VOP1Inst <"v_trunc_f32", VOP_F32_F32, ftrunc>; |
| 169 | defm V_CEIL_F32 : VOP1Inst <"v_ceil_f32", VOP_F32_F32, fceil>; |
| 170 | defm V_RNDNE_F32 : VOP1Inst <"v_rndne_f32", VOP_F32_F32, frint>; |
| 171 | defm V_FLOOR_F32 : VOP1Inst <"v_floor_f32", VOP_F32_F32, ffloor>; |
| 172 | defm V_EXP_F32 : VOP1Inst <"v_exp_f32", VOP_F32_F32, fexp2>; |
| 173 | |
| 174 | let SchedRW = [WriteQuarterRate32] in { |
| 175 | defm V_LOG_F32 : VOP1Inst <"v_log_f32", VOP_F32_F32, flog2>; |
| 176 | defm V_RCP_F32 : VOP1Inst <"v_rcp_f32", VOP_F32_F32, AMDGPUrcp>; |
| 177 | defm V_RCP_IFLAG_F32 : VOP1Inst <"v_rcp_iflag_f32", VOP_F32_F32>; |
| 178 | defm V_RSQ_F32 : VOP1Inst <"v_rsq_f32", VOP_F32_F32, AMDGPUrsq>; |
| 179 | } // End SchedRW = [WriteQuarterRate32] |
| 180 | |
| 181 | let SchedRW = [WriteDouble] in { |
| 182 | defm V_RCP_F64 : VOP1Inst <"v_rcp_f64", VOP_F64_F64, AMDGPUrcp>; |
| 183 | defm V_RSQ_F64 : VOP1Inst <"v_rsq_f64", VOP_F64_F64, AMDGPUrsq>; |
| 184 | } // End SchedRW = [WriteDouble]; |
| 185 | |
| 186 | defm V_SQRT_F32 : VOP1Inst <"v_sqrt_f32", VOP_F32_F32, fsqrt>; |
| 187 | |
| 188 | let SchedRW = [WriteDouble] in { |
| 189 | defm V_SQRT_F64 : VOP1Inst <"v_sqrt_f64", VOP_F64_F64, fsqrt>; |
| 190 | } // End SchedRW = [WriteDouble] |
| 191 | |
| 192 | let SchedRW = [WriteQuarterRate32] in { |
| 193 | defm V_SIN_F32 : VOP1Inst <"v_sin_f32", VOP_F32_F32, AMDGPUsin>; |
| 194 | defm V_COS_F32 : VOP1Inst <"v_cos_f32", VOP_F32_F32, AMDGPUcos>; |
| 195 | } // End SchedRW = [WriteQuarterRate32] |
| 196 | |
| 197 | defm V_NOT_B32 : VOP1Inst <"v_not_b32", VOP_I32_I32>; |
| 198 | defm V_BFREV_B32 : VOP1Inst <"v_bfrev_b32", VOP_I32_I32>; |
| 199 | defm V_FFBH_U32 : VOP1Inst <"v_ffbh_u32", VOP_I32_I32>; |
| 200 | defm V_FFBL_B32 : VOP1Inst <"v_ffbl_b32", VOP_I32_I32>; |
| 201 | defm V_FFBH_I32 : VOP1Inst <"v_ffbh_i32", VOP_I32_I32>; |
| 202 | defm V_FREXP_EXP_I32_F64 : VOP1Inst <"v_frexp_exp_i32_f64", VOP_I32_F64, int_amdgcn_frexp_exp>; |
| 203 | |
| 204 | let SchedRW = [WriteDoubleAdd] in { |
| 205 | defm V_FREXP_MANT_F64 : VOP1Inst <"v_frexp_mant_f64", VOP_F64_F64, int_amdgcn_frexp_mant>; |
| 206 | defm V_FRACT_F64 : VOP1Inst <"v_fract_f64", VOP_F64_F64, AMDGPUfract>; |
| 207 | } // End SchedRW = [WriteDoubleAdd] |
| 208 | |
| 209 | defm V_FREXP_EXP_I32_F32 : VOP1Inst <"v_frexp_exp_i32_f32", VOP_I32_F32, int_amdgcn_frexp_exp>; |
| 210 | defm V_FREXP_MANT_F32 : VOP1Inst <"v_frexp_mant_f32", VOP_F32_F32, int_amdgcn_frexp_mant>; |
| 211 | |
| 212 | let VOPAsmPrefer32Bit = 1 in { |
| 213 | defm V_CLREXCP : VOP1Inst <"v_clrexcp", VOP_NO_EXT<VOP_NONE>>; |
| 214 | } |
| 215 | |
| 216 | // Restrict src0 to be VGPR |
| 217 | def VOP_I32_VI32_NO_EXT : VOPProfile<[i32, i32, untyped, untyped]> { |
| 218 | let Src0RC32 = VRegSrc_32; |
| 219 | let Src0RC64 = VRegSrc_32; |
| 220 | |
| 221 | let HasExt = 0; |
| 222 | } |
| 223 | |
| 224 | // Special case because there are no true output operands. Hack vdst |
| 225 | // to be a src operand. The custom inserter must add a tied implicit |
| 226 | // def and use of the super register since there seems to be no way to |
| 227 | // add an implicit def of a virtual register in tablegen. |
| 228 | def VOP_MOVRELD : VOPProfile<[untyped, i32, untyped, untyped]> { |
| 229 | let Src0RC32 = VOPDstOperand<VGPR_32>; |
| 230 | let Src0RC64 = VOPDstOperand<VGPR_32>; |
| 231 | |
| 232 | let Outs = (outs); |
| 233 | let Ins32 = (ins Src0RC32:$vdst, VSrc_b32:$src0); |
| 234 | let Ins64 = (ins Src0RC64:$vdst, VSrc_b32:$src0); |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 235 | let InsDPP = (ins Src0RC32:$vdst, Src0RC32:$src0, dpp_ctrl:$dpp_ctrl, row_mask:$row_mask, |
| 236 | bank_mask:$bank_mask, bound_ctrl:$bound_ctrl); |
Sam Kolton | 9772eb3 | 2017-01-11 11:46:30 +0000 | [diff] [blame] | 237 | let InsSDWA = (ins Src0RC32:$vdst, Src0ModSDWA:$src0_modifiers, VCSrc_b32:$src0, |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 238 | clampmod:$clamp, dst_sel:$dst_sel, dst_unused:$dst_unused, |
| 239 | src0_sel:$src0_sel); |
| 240 | |
| 241 | let Asm32 = getAsm32<1, 1>.ret; |
Matt Arsenault | 9be7b0d | 2017-02-27 18:49:11 +0000 | [diff] [blame] | 242 | let Asm64 = getAsm64<1, 1, 0, 1>.ret; |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 243 | let AsmDPP = getAsmDPP<1, 1, 0>.ret; |
| 244 | let AsmSDWA = getAsmSDWA<1, 1, 0>.ret; |
| 245 | |
| 246 | let HasExt = 0; |
| 247 | let HasDst = 0; |
| 248 | let EmitDst = 1; // force vdst emission |
| 249 | } |
| 250 | |
Matt Arsenault | cc88ce3 | 2016-10-12 18:00:51 +0000 | [diff] [blame] | 251 | let SubtargetPredicate = HasMovrel, Uses = [M0, EXEC] in { |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 252 | // v_movreld_b32 is a special case because the destination output |
| 253 | // register is really a source. It isn't actually read (but may be |
| 254 | // written), and is only to provide the base register to start |
| 255 | // indexing from. Tablegen seems to not let you define an implicit |
| 256 | // virtual register output for the super register being written into, |
| 257 | // so this must have an implicit def of the register added to it. |
| 258 | defm V_MOVRELD_B32 : VOP1Inst <"v_movreld_b32", VOP_MOVRELD>; |
| 259 | defm V_MOVRELS_B32 : VOP1Inst <"v_movrels_b32", VOP_I32_VI32_NO_EXT>; |
| 260 | defm V_MOVRELSD_B32 : VOP1Inst <"v_movrelsd_b32", VOP_NO_EXT<VOP_I32_I32>>; |
| 261 | } // End Uses = [M0, EXEC] |
| 262 | |
| 263 | // These instruction only exist on SI and CI |
| 264 | let SubtargetPredicate = isSICI in { |
| 265 | |
| 266 | let SchedRW = [WriteQuarterRate32] in { |
| 267 | defm V_MOV_FED_B32 : VOP1Inst <"v_mov_fed_b32", VOP_I32_I32>; |
| 268 | defm V_LOG_CLAMP_F32 : VOP1Inst <"v_log_clamp_f32", VOP_F32_F32, int_amdgcn_log_clamp>; |
| 269 | defm V_RCP_CLAMP_F32 : VOP1Inst <"v_rcp_clamp_f32", VOP_F32_F32>; |
| 270 | defm V_RCP_LEGACY_F32 : VOP1Inst <"v_rcp_legacy_f32", VOP_F32_F32, AMDGPUrcp_legacy>; |
| 271 | defm V_RSQ_CLAMP_F32 : VOP1Inst <"v_rsq_clamp_f32", VOP_F32_F32, AMDGPUrsq_clamp>; |
| 272 | defm V_RSQ_LEGACY_F32 : VOP1Inst <"v_rsq_legacy_f32", VOP_F32_F32, AMDGPUrsq_legacy>; |
| 273 | } // End SchedRW = [WriteQuarterRate32] |
| 274 | |
| 275 | let SchedRW = [WriteDouble] in { |
| 276 | defm V_RCP_CLAMP_F64 : VOP1Inst <"v_rcp_clamp_f64", VOP_F64_F64>; |
| 277 | defm V_RSQ_CLAMP_F64 : VOP1Inst <"v_rsq_clamp_f64", VOP_F64_F64, AMDGPUrsq_clamp>; |
| 278 | } // End SchedRW = [WriteDouble] |
| 279 | |
| 280 | } // End SubtargetPredicate = isSICI |
| 281 | |
| 282 | |
| 283 | let SubtargetPredicate = isCIVI in { |
| 284 | |
| 285 | let SchedRW = [WriteDoubleAdd] in { |
| 286 | defm V_TRUNC_F64 : VOP1Inst <"v_trunc_f64", VOP_F64_F64, ftrunc>; |
| 287 | defm V_CEIL_F64 : VOP1Inst <"v_ceil_f64", VOP_F64_F64, fceil>; |
| 288 | defm V_FLOOR_F64 : VOP1Inst <"v_floor_f64", VOP_F64_F64, ffloor>; |
| 289 | defm V_RNDNE_F64 : VOP1Inst <"v_rndne_f64", VOP_F64_F64, frint>; |
| 290 | } // End SchedRW = [WriteDoubleAdd] |
| 291 | |
| 292 | let SchedRW = [WriteQuarterRate32] in { |
| 293 | defm V_LOG_LEGACY_F32 : VOP1Inst <"v_log_legacy_f32", VOP_F32_F32>; |
| 294 | defm V_EXP_LEGACY_F32 : VOP1Inst <"v_exp_legacy_f32", VOP_F32_F32>; |
| 295 | } // End SchedRW = [WriteQuarterRate32] |
| 296 | |
| 297 | } // End SubtargetPredicate = isCIVI |
| 298 | |
| 299 | |
| 300 | let SubtargetPredicate = isVI in { |
| 301 | |
Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 302 | defm V_CVT_F16_U16 : VOP1Inst <"v_cvt_f16_u16", VOP_F16_I16, uint_to_fp>; |
| 303 | defm V_CVT_F16_I16 : VOP1Inst <"v_cvt_f16_i16", VOP_F16_I16, sint_to_fp>; |
| 304 | defm V_CVT_U16_F16 : VOP1Inst <"v_cvt_u16_f16", VOP_I16_F16, fp_to_uint>; |
| 305 | defm V_CVT_I16_F16 : VOP1Inst <"v_cvt_i16_f16", VOP_I16_F16, fp_to_sint>; |
| 306 | defm V_RCP_F16 : VOP1Inst <"v_rcp_f16", VOP_F16_F16, AMDGPUrcp>; |
| 307 | defm V_SQRT_F16 : VOP1Inst <"v_sqrt_f16", VOP_F16_F16, fsqrt>; |
| 308 | defm V_RSQ_F16 : VOP1Inst <"v_rsq_f16", VOP_F16_F16, AMDGPUrsq>; |
| 309 | defm V_LOG_F16 : VOP1Inst <"v_log_f16", VOP_F16_F16, flog2>; |
| 310 | defm V_EXP_F16 : VOP1Inst <"v_exp_f16", VOP_F16_F16, fexp2>; |
| 311 | defm V_FREXP_MANT_F16 : VOP1Inst <"v_frexp_mant_f16", VOP_F16_F16, int_amdgcn_frexp_mant>; |
Konstantin Zhuravlyov | aefee42 | 2016-11-18 22:31:08 +0000 | [diff] [blame] | 312 | defm V_FREXP_EXP_I16_F16 : VOP1Inst <"v_frexp_exp_i16_f16", VOP_I16_F16, int_amdgcn_frexp_exp>; |
Konstantin Zhuravlyov | f86e4b7 | 2016-11-13 07:01:11 +0000 | [diff] [blame] | 313 | defm V_FLOOR_F16 : VOP1Inst <"v_floor_f16", VOP_F16_F16, ffloor>; |
| 314 | defm V_CEIL_F16 : VOP1Inst <"v_ceil_f16", VOP_F16_F16, fceil>; |
| 315 | defm V_TRUNC_F16 : VOP1Inst <"v_trunc_f16", VOP_F16_F16, ftrunc>; |
| 316 | defm V_RNDNE_F16 : VOP1Inst <"v_rndne_f16", VOP_F16_F16, frint>; |
| 317 | defm V_FRACT_F16 : VOP1Inst <"v_fract_f16", VOP_F16_F16, AMDGPUfract>; |
| 318 | defm V_SIN_F16 : VOP1Inst <"v_sin_f16", VOP_F16_F16, AMDGPUsin>; |
| 319 | defm V_COS_F16 : VOP1Inst <"v_cos_f16", VOP_F16_F16, AMDGPUcos>; |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 320 | |
| 321 | } |
| 322 | |
Tom Stellard | 115a615 | 2016-11-10 16:02:37 +0000 | [diff] [blame] | 323 | let Predicates = [isVI] in { |
| 324 | |
| 325 | def : Pat< |
| 326 | (f32 (f16_to_fp i16:$src)), |
| 327 | (V_CVT_F32_F16_e32 $src) |
| 328 | >; |
| 329 | |
| 330 | def : Pat< |
Matt Arsenault | 86e02ce | 2017-03-15 19:04:26 +0000 | [diff] [blame] | 331 | (i16 (AMDGPUfp_to_f16 f32:$src)), |
Tom Stellard | 115a615 | 2016-11-10 16:02:37 +0000 | [diff] [blame] | 332 | (V_CVT_F16_F32_e32 $src) |
| 333 | >; |
| 334 | |
| 335 | } |
| 336 | |
Matt Arsenault | 4d263f6 | 2017-02-28 21:09:04 +0000 | [diff] [blame] | 337 | def VOP_SWAP_I32 : VOPProfile<[i32, i32, i32, untyped]> { |
| 338 | let Outs32 = (outs VGPR_32:$vdst, VGPR_32:$vdst1); |
| 339 | let Ins32 = (ins VGPR_32:$src0, VGPR_32:$src1); |
| 340 | let Outs64 = Outs32; |
| 341 | let Asm32 = " $vdst, $src0"; |
| 342 | let Asm64 = ""; |
| 343 | let Ins64 = (ins); |
| 344 | } |
| 345 | |
| 346 | let SubtargetPredicate = isGFX9 in { |
| 347 | let Constraints = "$vdst = $src1, $vdst1 = $src0", |
| 348 | DisableEncoding="$vdst1,$src1", |
| 349 | SchedRW = [Write64Bit, Write64Bit] in { |
| 350 | // Never VOP3. Takes as long as 2 v_mov_b32s |
| 351 | def V_SWAP_B32 : VOP1_Pseudo <"v_swap_b32", VOP_SWAP_I32, [], 1>; |
| 352 | } |
| 353 | |
| 354 | } // End SubtargetPredicate = isGFX9 |
| 355 | |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 356 | //===----------------------------------------------------------------------===// |
| 357 | // Target |
| 358 | //===----------------------------------------------------------------------===// |
| 359 | |
| 360 | //===----------------------------------------------------------------------===// |
| 361 | // SI |
| 362 | //===----------------------------------------------------------------------===// |
| 363 | |
| 364 | multiclass VOP1_Real_si <bits<9> op> { |
| 365 | let AssemblerPredicates = [isSICI], DecoderNamespace = "SICI" in { |
| 366 | def _e32_si : |
| 367 | VOP1_Real<!cast<VOP1_Pseudo>(NAME#"_e32"), SIEncodingFamily.SI>, |
| 368 | VOP1e<op{7-0}, !cast<VOP1_Pseudo>(NAME#"_e32").Pfl>; |
| 369 | def _e64_si : |
| 370 | VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.SI>, |
| 371 | VOP3e_si <{1, 1, op{6-0}}, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>; |
| 372 | } |
| 373 | } |
| 374 | |
| 375 | defm V_NOP : VOP1_Real_si <0x0>; |
| 376 | defm V_MOV_B32 : VOP1_Real_si <0x1>; |
| 377 | defm V_CVT_I32_F64 : VOP1_Real_si <0x3>; |
| 378 | defm V_CVT_F64_I32 : VOP1_Real_si <0x4>; |
| 379 | defm V_CVT_F32_I32 : VOP1_Real_si <0x5>; |
| 380 | defm V_CVT_F32_U32 : VOP1_Real_si <0x6>; |
| 381 | defm V_CVT_U32_F32 : VOP1_Real_si <0x7>; |
| 382 | defm V_CVT_I32_F32 : VOP1_Real_si <0x8>; |
| 383 | defm V_MOV_FED_B32 : VOP1_Real_si <0x9>; |
| 384 | defm V_CVT_F16_F32 : VOP1_Real_si <0xa>; |
| 385 | defm V_CVT_F32_F16 : VOP1_Real_si <0xb>; |
| 386 | defm V_CVT_RPI_I32_F32 : VOP1_Real_si <0xc>; |
| 387 | defm V_CVT_FLR_I32_F32 : VOP1_Real_si <0xd>; |
| 388 | defm V_CVT_OFF_F32_I4 : VOP1_Real_si <0xe>; |
| 389 | defm V_CVT_F32_F64 : VOP1_Real_si <0xf>; |
| 390 | defm V_CVT_F64_F32 : VOP1_Real_si <0x10>; |
| 391 | defm V_CVT_F32_UBYTE0 : VOP1_Real_si <0x11>; |
| 392 | defm V_CVT_F32_UBYTE1 : VOP1_Real_si <0x12>; |
| 393 | defm V_CVT_F32_UBYTE2 : VOP1_Real_si <0x13>; |
| 394 | defm V_CVT_F32_UBYTE3 : VOP1_Real_si <0x14>; |
| 395 | defm V_CVT_U32_F64 : VOP1_Real_si <0x15>; |
| 396 | defm V_CVT_F64_U32 : VOP1_Real_si <0x16>; |
| 397 | defm V_FRACT_F32 : VOP1_Real_si <0x20>; |
| 398 | defm V_TRUNC_F32 : VOP1_Real_si <0x21>; |
| 399 | defm V_CEIL_F32 : VOP1_Real_si <0x22>; |
| 400 | defm V_RNDNE_F32 : VOP1_Real_si <0x23>; |
| 401 | defm V_FLOOR_F32 : VOP1_Real_si <0x24>; |
| 402 | defm V_EXP_F32 : VOP1_Real_si <0x25>; |
| 403 | defm V_LOG_CLAMP_F32 : VOP1_Real_si <0x26>; |
| 404 | defm V_LOG_F32 : VOP1_Real_si <0x27>; |
| 405 | defm V_RCP_CLAMP_F32 : VOP1_Real_si <0x28>; |
| 406 | defm V_RCP_LEGACY_F32 : VOP1_Real_si <0x29>; |
| 407 | defm V_RCP_F32 : VOP1_Real_si <0x2a>; |
| 408 | defm V_RCP_IFLAG_F32 : VOP1_Real_si <0x2b>; |
| 409 | defm V_RSQ_CLAMP_F32 : VOP1_Real_si <0x2c>; |
| 410 | defm V_RSQ_LEGACY_F32 : VOP1_Real_si <0x2d>; |
| 411 | defm V_RSQ_F32 : VOP1_Real_si <0x2e>; |
| 412 | defm V_RCP_F64 : VOP1_Real_si <0x2f>; |
| 413 | defm V_RCP_CLAMP_F64 : VOP1_Real_si <0x30>; |
| 414 | defm V_RSQ_F64 : VOP1_Real_si <0x31>; |
| 415 | defm V_RSQ_CLAMP_F64 : VOP1_Real_si <0x32>; |
| 416 | defm V_SQRT_F32 : VOP1_Real_si <0x33>; |
| 417 | defm V_SQRT_F64 : VOP1_Real_si <0x34>; |
| 418 | defm V_SIN_F32 : VOP1_Real_si <0x35>; |
| 419 | defm V_COS_F32 : VOP1_Real_si <0x36>; |
| 420 | defm V_NOT_B32 : VOP1_Real_si <0x37>; |
| 421 | defm V_BFREV_B32 : VOP1_Real_si <0x38>; |
| 422 | defm V_FFBH_U32 : VOP1_Real_si <0x39>; |
| 423 | defm V_FFBL_B32 : VOP1_Real_si <0x3a>; |
| 424 | defm V_FFBH_I32 : VOP1_Real_si <0x3b>; |
| 425 | defm V_FREXP_EXP_I32_F64 : VOP1_Real_si <0x3c>; |
| 426 | defm V_FREXP_MANT_F64 : VOP1_Real_si <0x3d>; |
| 427 | defm V_FRACT_F64 : VOP1_Real_si <0x3e>; |
| 428 | defm V_FREXP_EXP_I32_F32 : VOP1_Real_si <0x3f>; |
| 429 | defm V_FREXP_MANT_F32 : VOP1_Real_si <0x40>; |
| 430 | defm V_CLREXCP : VOP1_Real_si <0x41>; |
| 431 | defm V_MOVRELD_B32 : VOP1_Real_si <0x42>; |
| 432 | defm V_MOVRELS_B32 : VOP1_Real_si <0x43>; |
| 433 | defm V_MOVRELSD_B32 : VOP1_Real_si <0x44>; |
| 434 | |
| 435 | //===----------------------------------------------------------------------===// |
| 436 | // CI |
| 437 | //===----------------------------------------------------------------------===// |
| 438 | |
| 439 | multiclass VOP1_Real_ci <bits<9> op> { |
| 440 | let AssemblerPredicates = [isCIOnly], DecoderNamespace = "CI" in { |
| 441 | def _e32_ci : |
| 442 | VOP1_Real<!cast<VOP1_Pseudo>(NAME#"_e32"), SIEncodingFamily.SI>, |
| 443 | VOP1e<op{7-0}, !cast<VOP1_Pseudo>(NAME#"_e32").Pfl>; |
| 444 | def _e64_ci : |
| 445 | VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.SI>, |
| 446 | VOP3e_si <{1, 1, op{6-0}}, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>; |
| 447 | } |
| 448 | } |
| 449 | |
| 450 | defm V_TRUNC_F64 : VOP1_Real_ci <0x17>; |
| 451 | defm V_CEIL_F64 : VOP1_Real_ci <0x18>; |
| 452 | defm V_FLOOR_F64 : VOP1_Real_ci <0x1A>; |
| 453 | defm V_RNDNE_F64 : VOP1_Real_ci <0x19>; |
| 454 | defm V_LOG_LEGACY_F32 : VOP1_Real_ci <0x45>; |
| 455 | defm V_EXP_LEGACY_F32 : VOP1_Real_ci <0x46>; |
| 456 | |
| 457 | //===----------------------------------------------------------------------===// |
| 458 | // VI |
| 459 | //===----------------------------------------------------------------------===// |
| 460 | |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 461 | class VOP1_DPP <bits<8> op, VOP1_Pseudo ps, VOPProfile P = ps.Pfl> : |
| 462 | VOP_DPP <ps.OpName, P> { |
| 463 | let Defs = ps.Defs; |
| 464 | let Uses = ps.Uses; |
| 465 | let SchedRW = ps.SchedRW; |
| 466 | let hasSideEffects = ps.hasSideEffects; |
Sam Kolton | a6792a3 | 2016-12-22 11:30:48 +0000 | [diff] [blame] | 467 | let Constraints = ps.Constraints; |
| 468 | let DisableEncoding = ps.DisableEncoding; |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 469 | |
| 470 | bits<8> vdst; |
| 471 | let Inst{8-0} = 0xfa; // dpp |
| 472 | let Inst{16-9} = op; |
| 473 | let Inst{24-17} = !if(P.EmitDst, vdst{7-0}, 0); |
| 474 | let Inst{31-25} = 0x3f; //encoding |
| 475 | } |
| 476 | |
Matt Arsenault | 4d263f6 | 2017-02-28 21:09:04 +0000 | [diff] [blame] | 477 | multiclass VOP1Only_Real_vi <bits<10> op> { |
| 478 | let AssemblerPredicates = [isVI], DecoderNamespace = "VI" in { |
| 479 | def _vi : |
| 480 | VOP1_Real<!cast<VOP1_Pseudo>(NAME), SIEncodingFamily.VI>, |
| 481 | VOP1e<op{7-0}, !cast<VOP1_Pseudo>(NAME).Pfl>; |
| 482 | } |
| 483 | } |
| 484 | |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 485 | multiclass VOP1_Real_vi <bits<10> op> { |
| 486 | let AssemblerPredicates = [isVI], DecoderNamespace = "VI" in { |
| 487 | def _e32_vi : |
| 488 | VOP1_Real<!cast<VOP1_Pseudo>(NAME#"_e32"), SIEncodingFamily.VI>, |
| 489 | VOP1e<op{7-0}, !cast<VOP1_Pseudo>(NAME#"_e32").Pfl>; |
| 490 | def _e64_vi : |
| 491 | VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>, |
| 492 | VOP3e_vi <!add(0x140, op), !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>; |
| 493 | } |
| 494 | |
Sam Kolton | a568e3d | 2016-12-22 12:57:41 +0000 | [diff] [blame] | 495 | def _sdwa_vi : |
| 496 | VOP_SDWA_Real <!cast<VOP1_SDWA_Pseudo>(NAME#"_sdwa")>, |
| 497 | VOP1_SDWAe <op{7-0}, !cast<VOP1_SDWA_Pseudo>(NAME#"_sdwa").Pfl>; |
| 498 | |
| 499 | // For now left dpp only for asm/dasm |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 500 | // TODO: add corresponding pseudo |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 501 | def _dpp : VOP1_DPP<op{7-0}, !cast<VOP1_Pseudo>(NAME#"_e32")>; |
| 502 | } |
| 503 | |
| 504 | defm V_NOP : VOP1_Real_vi <0x0>; |
| 505 | defm V_MOV_B32 : VOP1_Real_vi <0x1>; |
| 506 | defm V_CVT_I32_F64 : VOP1_Real_vi <0x3>; |
| 507 | defm V_CVT_F64_I32 : VOP1_Real_vi <0x4>; |
| 508 | defm V_CVT_F32_I32 : VOP1_Real_vi <0x5>; |
| 509 | defm V_CVT_F32_U32 : VOP1_Real_vi <0x6>; |
| 510 | defm V_CVT_U32_F32 : VOP1_Real_vi <0x7>; |
| 511 | defm V_CVT_I32_F32 : VOP1_Real_vi <0x8>; |
| 512 | defm V_CVT_F16_F32 : VOP1_Real_vi <0xa>; |
| 513 | defm V_CVT_F32_F16 : VOP1_Real_vi <0xb>; |
| 514 | defm V_CVT_RPI_I32_F32 : VOP1_Real_vi <0xc>; |
| 515 | defm V_CVT_FLR_I32_F32 : VOP1_Real_vi <0xd>; |
| 516 | defm V_CVT_OFF_F32_I4 : VOP1_Real_vi <0xe>; |
| 517 | defm V_CVT_F32_F64 : VOP1_Real_vi <0xf>; |
| 518 | defm V_CVT_F64_F32 : VOP1_Real_vi <0x10>; |
| 519 | defm V_CVT_F32_UBYTE0 : VOP1_Real_vi <0x11>; |
| 520 | defm V_CVT_F32_UBYTE1 : VOP1_Real_vi <0x12>; |
| 521 | defm V_CVT_F32_UBYTE2 : VOP1_Real_vi <0x13>; |
| 522 | defm V_CVT_F32_UBYTE3 : VOP1_Real_vi <0x14>; |
| 523 | defm V_CVT_U32_F64 : VOP1_Real_vi <0x15>; |
| 524 | defm V_CVT_F64_U32 : VOP1_Real_vi <0x16>; |
| 525 | defm V_FRACT_F32 : VOP1_Real_vi <0x1b>; |
| 526 | defm V_TRUNC_F32 : VOP1_Real_vi <0x1c>; |
| 527 | defm V_CEIL_F32 : VOP1_Real_vi <0x1d>; |
| 528 | defm V_RNDNE_F32 : VOP1_Real_vi <0x1e>; |
| 529 | defm V_FLOOR_F32 : VOP1_Real_vi <0x1f>; |
| 530 | defm V_EXP_F32 : VOP1_Real_vi <0x20>; |
| 531 | defm V_LOG_F32 : VOP1_Real_vi <0x21>; |
| 532 | defm V_RCP_F32 : VOP1_Real_vi <0x22>; |
| 533 | defm V_RCP_IFLAG_F32 : VOP1_Real_vi <0x23>; |
| 534 | defm V_RSQ_F32 : VOP1_Real_vi <0x24>; |
| 535 | defm V_RCP_F64 : VOP1_Real_vi <0x25>; |
| 536 | defm V_RSQ_F64 : VOP1_Real_vi <0x26>; |
| 537 | defm V_SQRT_F32 : VOP1_Real_vi <0x27>; |
| 538 | defm V_SQRT_F64 : VOP1_Real_vi <0x28>; |
| 539 | defm V_SIN_F32 : VOP1_Real_vi <0x29>; |
| 540 | defm V_COS_F32 : VOP1_Real_vi <0x2a>; |
| 541 | defm V_NOT_B32 : VOP1_Real_vi <0x2b>; |
| 542 | defm V_BFREV_B32 : VOP1_Real_vi <0x2c>; |
| 543 | defm V_FFBH_U32 : VOP1_Real_vi <0x2d>; |
| 544 | defm V_FFBL_B32 : VOP1_Real_vi <0x2e>; |
| 545 | defm V_FFBH_I32 : VOP1_Real_vi <0x2f>; |
| 546 | defm V_FREXP_EXP_I32_F64 : VOP1_Real_vi <0x30>; |
| 547 | defm V_FREXP_MANT_F64 : VOP1_Real_vi <0x31>; |
| 548 | defm V_FRACT_F64 : VOP1_Real_vi <0x32>; |
| 549 | defm V_FREXP_EXP_I32_F32 : VOP1_Real_vi <0x33>; |
| 550 | defm V_FREXP_MANT_F32 : VOP1_Real_vi <0x34>; |
| 551 | defm V_CLREXCP : VOP1_Real_vi <0x35>; |
| 552 | defm V_MOVRELD_B32 : VOP1_Real_vi <0x36>; |
| 553 | defm V_MOVRELS_B32 : VOP1_Real_vi <0x37>; |
| 554 | defm V_MOVRELSD_B32 : VOP1_Real_vi <0x38>; |
| 555 | defm V_TRUNC_F64 : VOP1_Real_vi <0x17>; |
| 556 | defm V_CEIL_F64 : VOP1_Real_vi <0x18>; |
| 557 | defm V_FLOOR_F64 : VOP1_Real_vi <0x1A>; |
| 558 | defm V_RNDNE_F64 : VOP1_Real_vi <0x19>; |
| 559 | defm V_LOG_LEGACY_F32 : VOP1_Real_vi <0x4c>; |
| 560 | defm V_EXP_LEGACY_F32 : VOP1_Real_vi <0x4b>; |
| 561 | defm V_CVT_F16_U16 : VOP1_Real_vi <0x39>; |
| 562 | defm V_CVT_F16_I16 : VOP1_Real_vi <0x3a>; |
| 563 | defm V_CVT_U16_F16 : VOP1_Real_vi <0x3b>; |
| 564 | defm V_CVT_I16_F16 : VOP1_Real_vi <0x3c>; |
| 565 | defm V_RCP_F16 : VOP1_Real_vi <0x3d>; |
| 566 | defm V_SQRT_F16 : VOP1_Real_vi <0x3e>; |
| 567 | defm V_RSQ_F16 : VOP1_Real_vi <0x3f>; |
| 568 | defm V_LOG_F16 : VOP1_Real_vi <0x40>; |
| 569 | defm V_EXP_F16 : VOP1_Real_vi <0x41>; |
| 570 | defm V_FREXP_MANT_F16 : VOP1_Real_vi <0x42>; |
| 571 | defm V_FREXP_EXP_I16_F16 : VOP1_Real_vi <0x43>; |
| 572 | defm V_FLOOR_F16 : VOP1_Real_vi <0x44>; |
| 573 | defm V_CEIL_F16 : VOP1_Real_vi <0x45>; |
| 574 | defm V_TRUNC_F16 : VOP1_Real_vi <0x46>; |
| 575 | defm V_RNDNE_F16 : VOP1_Real_vi <0x47>; |
| 576 | defm V_FRACT_F16 : VOP1_Real_vi <0x48>; |
| 577 | defm V_SIN_F16 : VOP1_Real_vi <0x49>; |
| 578 | defm V_COS_F16 : VOP1_Real_vi <0x4a>; |
Matt Arsenault | 4d263f6 | 2017-02-28 21:09:04 +0000 | [diff] [blame] | 579 | defm V_SWAP_B32 : VOP1Only_Real_vi <0x51>; |
Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 580 | |
| 581 | // Copy of v_mov_b32 with $vdst as a use operand for use with VGPR |
| 582 | // indexing mode. vdst can't be treated as a def for codegen purposes, |
| 583 | // and an implicit use and def of the super register should be added. |
| 584 | def V_MOV_B32_indirect : VPseudoInstSI<(outs), |
| 585 | (ins getVALUDstForVT<i32>.ret:$vdst, getVOPSrc0ForVT<i32>.ret:$src0)>, |
| 586 | PseudoInstExpansion<(V_MOV_B32_e32_vi getVALUDstForVT<i32>.ret:$vdst, |
| 587 | getVOPSrc0ForVT<i32>.ret:$src0)> { |
| 588 | let VOP1 = 1; |
Daniel Sanders | 72db2a3 | 2016-11-19 13:05:44 +0000 | [diff] [blame] | 589 | let SubtargetPredicate = isVI; |
Matt Arsenault | d486d3f | 2016-10-12 18:49:05 +0000 | [diff] [blame] | 590 | } |
| 591 | |
Nicolai Haehnle | a785209 | 2016-10-24 14:56:02 +0000 | [diff] [blame] | 592 | // This is a pseudo variant of the v_movreld_b32 instruction in which the |
| 593 | // vector operand appears only twice, once as def and once as use. Using this |
| 594 | // pseudo avoids problems with the Two Address instructions pass. |
| 595 | class V_MOVRELD_B32_pseudo<RegisterClass rc> : VPseudoInstSI < |
| 596 | (outs rc:$vdst), |
| 597 | (ins rc:$vsrc, VSrc_b32:$val, i32imm:$offset)> { |
| 598 | let VOP1 = 1; |
| 599 | |
| 600 | let Constraints = "$vsrc = $vdst"; |
| 601 | let Uses = [M0, EXEC]; |
| 602 | |
| 603 | let SubtargetPredicate = HasMovrel; |
| 604 | } |
| 605 | |
| 606 | def V_MOVRELD_B32_V1 : V_MOVRELD_B32_pseudo<VGPR_32>; |
| 607 | def V_MOVRELD_B32_V2 : V_MOVRELD_B32_pseudo<VReg_64>; |
| 608 | def V_MOVRELD_B32_V4 : V_MOVRELD_B32_pseudo<VReg_128>; |
| 609 | def V_MOVRELD_B32_V8 : V_MOVRELD_B32_pseudo<VReg_256>; |
| 610 | def V_MOVRELD_B32_V16 : V_MOVRELD_B32_pseudo<VReg_512>; |
| 611 | |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 612 | let Predicates = [isVI] in { |
| 613 | |
| 614 | def : Pat < |
Tom Stellard | 115a615 | 2016-11-10 16:02:37 +0000 | [diff] [blame] | 615 | (i32 (int_amdgcn_mov_dpp i32:$src, imm:$dpp_ctrl, imm:$row_mask, imm:$bank_mask, |
| 616 | imm:$bound_ctrl)), |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 617 | (V_MOV_B32_dpp $src, (as_i32imm $dpp_ctrl), (as_i32imm $row_mask), |
| 618 | (as_i32imm $bank_mask), (as_i1imm $bound_ctrl)) |
| 619 | >; |
| 620 | |
Tom Stellard | 115a615 | 2016-11-10 16:02:37 +0000 | [diff] [blame] | 621 | |
| 622 | def : Pat< |
| 623 | (i32 (anyext i16:$src)), |
| 624 | (COPY $src) |
| 625 | >; |
| 626 | |
| 627 | def : Pat< |
| 628 | (i64 (anyext i16:$src)), |
| 629 | (REG_SEQUENCE VReg_64, |
| 630 | (i32 (COPY $src)), sub0, |
| 631 | (V_MOV_B32_e32 (i32 0)), sub1) |
| 632 | >; |
| 633 | |
| 634 | def : Pat< |
| 635 | (i16 (trunc i32:$src)), |
| 636 | (COPY $src) |
| 637 | >; |
| 638 | |
Tom Stellard | 115a615 | 2016-11-10 16:02:37 +0000 | [diff] [blame] | 639 | def : Pat < |
| 640 | (i16 (trunc i64:$src)), |
| 641 | (EXTRACT_SUBREG $src, sub0) |
| 642 | >; |
| 643 | |
Valery Pykhtin | 355103f | 2016-09-23 09:08:07 +0000 | [diff] [blame] | 644 | } // End Predicates = [isVI] |