Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 1 | //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | /// \file |
| 10 | /// This file implements the targeting of the InstructionSelector class for |
| 11 | /// AMDGPU. |
| 12 | /// \todo This should be generated by TableGen. |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #include "AMDGPUInstructionSelector.h" |
| 16 | #include "AMDGPUInstrInfo.h" |
| 17 | #include "AMDGPURegisterBankInfo.h" |
| 18 | #include "AMDGPURegisterInfo.h" |
| 19 | #include "AMDGPUSubtarget.h" |
Tom Stellard | 1dc9020 | 2018-05-10 20:53:06 +0000 | [diff] [blame] | 20 | #include "AMDGPUTargetMachine.h" |
Matt Arsenault | b1cc4f5 | 2018-06-25 16:17:48 +0000 | [diff] [blame] | 21 | #include "SIMachineFunctionInfo.h" |
Tom Stellard | 44b30b4 | 2018-05-22 02:03:23 +0000 | [diff] [blame] | 22 | #include "MCTargetDesc/AMDGPUMCTargetDesc.h" |
Tom Stellard | 1dc9020 | 2018-05-10 20:53:06 +0000 | [diff] [blame] | 23 | #include "llvm/CodeGen/GlobalISel/InstructionSelector.h" |
| 24 | #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h" |
Aditya Nandakumar | 18b3f9d | 2018-01-17 19:31:33 +0000 | [diff] [blame] | 25 | #include "llvm/CodeGen/GlobalISel/Utils.h" |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 26 | #include "llvm/CodeGen/MachineBasicBlock.h" |
| 27 | #include "llvm/CodeGen/MachineFunction.h" |
| 28 | #include "llvm/CodeGen/MachineInstr.h" |
| 29 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
| 30 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
| 31 | #include "llvm/IR/Type.h" |
| 32 | #include "llvm/Support/Debug.h" |
| 33 | #include "llvm/Support/raw_ostream.h" |
| 34 | |
| 35 | #define DEBUG_TYPE "amdgpu-isel" |
| 36 | |
| 37 | using namespace llvm; |
| 38 | |
Tom Stellard | 1dc9020 | 2018-05-10 20:53:06 +0000 | [diff] [blame] | 39 | #define GET_GLOBALISEL_IMPL |
Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame^] | 40 | #define AMDGPUSubtarget GCNSubtarget |
Tom Stellard | 1dc9020 | 2018-05-10 20:53:06 +0000 | [diff] [blame] | 41 | #include "AMDGPUGenGlobalISel.inc" |
| 42 | #undef GET_GLOBALISEL_IMPL |
Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame^] | 43 | #undef AMDGPUSubtarget |
Tom Stellard | 1dc9020 | 2018-05-10 20:53:06 +0000 | [diff] [blame] | 44 | |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 45 | AMDGPUInstructionSelector::AMDGPUInstructionSelector( |
Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame^] | 46 | const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI, |
Tom Stellard | 1dc9020 | 2018-05-10 20:53:06 +0000 | [diff] [blame] | 47 | const AMDGPUTargetMachine &TM) |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 48 | : InstructionSelector(), TII(*STI.getInstrInfo()), |
Tom Stellard | 1dc9020 | 2018-05-10 20:53:06 +0000 | [diff] [blame] | 49 | TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM), |
| 50 | STI(STI), |
| 51 | EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG), |
| 52 | #define GET_GLOBALISEL_PREDICATES_INIT |
| 53 | #include "AMDGPUGenGlobalISel.inc" |
| 54 | #undef GET_GLOBALISEL_PREDICATES_INIT |
| 55 | #define GET_GLOBALISEL_TEMPORARIES_INIT |
| 56 | #include "AMDGPUGenGlobalISel.inc" |
| 57 | #undef GET_GLOBALISEL_TEMPORARIES_INIT |
| 58 | ,AMDGPUASI(STI.getAMDGPUAS()) |
| 59 | { |
| 60 | } |
| 61 | |
| 62 | const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; } |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 63 | |
Tom Stellard | 1e0edad | 2018-05-10 21:20:10 +0000 | [diff] [blame] | 64 | bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const { |
| 65 | MachineBasicBlock *BB = I.getParent(); |
| 66 | MachineFunction *MF = BB->getParent(); |
| 67 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
| 68 | I.setDesc(TII.get(TargetOpcode::COPY)); |
| 69 | for (const MachineOperand &MO : I.operands()) { |
| 70 | if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) |
| 71 | continue; |
| 72 | |
| 73 | const TargetRegisterClass *RC = |
| 74 | TRI.getConstrainedRegClassForOperand(MO, MRI); |
| 75 | if (!RC) |
| 76 | continue; |
| 77 | RBI.constrainGenericRegister(MO.getReg(), *RC, MRI); |
| 78 | } |
| 79 | return true; |
| 80 | } |
| 81 | |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 82 | MachineOperand |
| 83 | AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO, |
| 84 | unsigned SubIdx) const { |
| 85 | |
| 86 | MachineInstr *MI = MO.getParent(); |
| 87 | MachineBasicBlock *BB = MO.getParent()->getParent(); |
| 88 | MachineFunction *MF = BB->getParent(); |
| 89 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
| 90 | unsigned DstReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); |
| 91 | |
| 92 | if (MO.isReg()) { |
| 93 | unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx); |
| 94 | unsigned Reg = MO.getReg(); |
| 95 | BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg) |
| 96 | .addReg(Reg, 0, ComposedSubIdx); |
| 97 | |
| 98 | return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(), |
| 99 | MO.isKill(), MO.isDead(), MO.isUndef(), |
| 100 | MO.isEarlyClobber(), 0, MO.isDebug(), |
| 101 | MO.isInternalRead()); |
| 102 | } |
| 103 | |
| 104 | assert(MO.isImm()); |
| 105 | |
| 106 | APInt Imm(64, MO.getImm()); |
| 107 | |
| 108 | switch (SubIdx) { |
| 109 | default: |
| 110 | llvm_unreachable("do not know to split immediate with this sub index."); |
| 111 | case AMDGPU::sub0: |
| 112 | return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue()); |
| 113 | case AMDGPU::sub1: |
| 114 | return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue()); |
| 115 | } |
| 116 | } |
| 117 | |
| 118 | bool AMDGPUInstructionSelector::selectG_ADD(MachineInstr &I) const { |
| 119 | MachineBasicBlock *BB = I.getParent(); |
| 120 | MachineFunction *MF = BB->getParent(); |
| 121 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
| 122 | unsigned Size = RBI.getSizeInBits(I.getOperand(0).getReg(), MRI, TRI); |
| 123 | unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); |
| 124 | unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); |
| 125 | |
| 126 | if (Size != 64) |
| 127 | return false; |
| 128 | |
| 129 | DebugLoc DL = I.getDebugLoc(); |
| 130 | |
Tom Stellard | 124f5cc | 2017-01-31 15:24:11 +0000 | [diff] [blame] | 131 | MachineOperand Lo1(getSubOperand64(I.getOperand(1), AMDGPU::sub0)); |
| 132 | MachineOperand Lo2(getSubOperand64(I.getOperand(2), AMDGPU::sub0)); |
| 133 | |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 134 | BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo) |
Tom Stellard | 124f5cc | 2017-01-31 15:24:11 +0000 | [diff] [blame] | 135 | .add(Lo1) |
| 136 | .add(Lo2); |
| 137 | |
| 138 | MachineOperand Hi1(getSubOperand64(I.getOperand(1), AMDGPU::sub1)); |
| 139 | MachineOperand Hi2(getSubOperand64(I.getOperand(2), AMDGPU::sub1)); |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 140 | |
| 141 | BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi) |
Tom Stellard | 124f5cc | 2017-01-31 15:24:11 +0000 | [diff] [blame] | 142 | .add(Hi1) |
| 143 | .add(Hi2); |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 144 | |
| 145 | BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), I.getOperand(0).getReg()) |
| 146 | .addReg(DstLo) |
| 147 | .addImm(AMDGPU::sub0) |
| 148 | .addReg(DstHi) |
| 149 | .addImm(AMDGPU::sub1); |
| 150 | |
| 151 | for (MachineOperand &MO : I.explicit_operands()) { |
| 152 | if (!MO.isReg() || TargetRegisterInfo::isPhysicalRegister(MO.getReg())) |
| 153 | continue; |
| 154 | RBI.constrainGenericRegister(MO.getReg(), AMDGPU::SReg_64RegClass, MRI); |
| 155 | } |
| 156 | |
| 157 | I.eraseFromParent(); |
| 158 | return true; |
| 159 | } |
| 160 | |
| 161 | bool AMDGPUInstructionSelector::selectG_GEP(MachineInstr &I) const { |
| 162 | return selectG_ADD(I); |
| 163 | } |
| 164 | |
Tom Stellard | 3f1c6fe | 2018-06-21 23:38:20 +0000 | [diff] [blame] | 165 | bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const { |
| 166 | MachineBasicBlock *BB = I.getParent(); |
| 167 | MachineFunction *MF = BB->getParent(); |
| 168 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
| 169 | const MachineOperand &MO = I.getOperand(0); |
| 170 | const TargetRegisterClass *RC = |
| 171 | TRI.getConstrainedRegClassForOperand(MO, MRI); |
| 172 | if (RC) |
| 173 | RBI.constrainGenericRegister(MO.getReg(), *RC, MRI); |
| 174 | I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF)); |
| 175 | return true; |
| 176 | } |
| 177 | |
Tom Stellard | a928473 | 2018-06-14 19:26:37 +0000 | [diff] [blame] | 178 | bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I, |
| 179 | CodeGenCoverage &CoverageInfo) const { |
| 180 | unsigned IntrinsicID = I.getOperand(1).getIntrinsicID(); |
| 181 | |
| 182 | switch (IntrinsicID) { |
| 183 | default: |
| 184 | break; |
| 185 | case Intrinsic::amdgcn_cvt_pkrtz: |
| 186 | return selectImpl(I, CoverageInfo); |
Matt Arsenault | b1cc4f5 | 2018-06-25 16:17:48 +0000 | [diff] [blame] | 187 | |
| 188 | case Intrinsic::amdgcn_kernarg_segment_ptr: { |
| 189 | MachineFunction *MF = I.getParent()->getParent(); |
| 190 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
| 191 | const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); |
| 192 | const ArgDescriptor *InputPtrReg; |
| 193 | const TargetRegisterClass *RC; |
| 194 | const DebugLoc &DL = I.getDebugLoc(); |
| 195 | |
| 196 | std::tie(InputPtrReg, RC) |
| 197 | = MFI->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); |
| 198 | if (!InputPtrReg) |
| 199 | report_fatal_error("missing kernarg segment ptr"); |
| 200 | |
| 201 | BuildMI(*I.getParent(), &I, DL, TII.get(AMDGPU::COPY)) |
| 202 | .add(I.getOperand(0)) |
| 203 | .addReg(MRI.getLiveInVirtReg(InputPtrReg->getRegister())); |
| 204 | I.eraseFromParent(); |
| 205 | return true; |
| 206 | } |
Tom Stellard | a928473 | 2018-06-14 19:26:37 +0000 | [diff] [blame] | 207 | } |
| 208 | return false; |
| 209 | } |
| 210 | |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 211 | bool AMDGPUInstructionSelector::selectG_STORE(MachineInstr &I) const { |
| 212 | MachineBasicBlock *BB = I.getParent(); |
Tom Stellard | 655fdd3 | 2018-05-11 23:12:49 +0000 | [diff] [blame] | 213 | MachineFunction *MF = BB->getParent(); |
| 214 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 215 | DebugLoc DL = I.getDebugLoc(); |
Tom Stellard | 655fdd3 | 2018-05-11 23:12:49 +0000 | [diff] [blame] | 216 | unsigned StoreSize = RBI.getSizeInBits(I.getOperand(0).getReg(), MRI, TRI); |
| 217 | unsigned Opcode; |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 218 | |
| 219 | // FIXME: Select store instruction based on address space |
Tom Stellard | 655fdd3 | 2018-05-11 23:12:49 +0000 | [diff] [blame] | 220 | switch (StoreSize) { |
| 221 | default: |
| 222 | return false; |
| 223 | case 32: |
| 224 | Opcode = AMDGPU::FLAT_STORE_DWORD; |
| 225 | break; |
| 226 | case 64: |
| 227 | Opcode = AMDGPU::FLAT_STORE_DWORDX2; |
| 228 | break; |
| 229 | case 96: |
| 230 | Opcode = AMDGPU::FLAT_STORE_DWORDX3; |
| 231 | break; |
| 232 | case 128: |
| 233 | Opcode = AMDGPU::FLAT_STORE_DWORDX4; |
| 234 | break; |
| 235 | } |
| 236 | |
| 237 | MachineInstr *Flat = BuildMI(*BB, &I, DL, TII.get(Opcode)) |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 238 | .add(I.getOperand(1)) |
| 239 | .add(I.getOperand(0)) |
Matt Arsenault | fd02314 | 2017-06-12 15:55:58 +0000 | [diff] [blame] | 240 | .addImm(0) // offset |
| 241 | .addImm(0) // glc |
| 242 | .addImm(0); // slc |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 243 | |
Matt Arsenault | 47ccafe | 2017-05-11 17:38:33 +0000 | [diff] [blame] | 244 | |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 245 | // Now that we selected an opcode, we need to constrain the register |
| 246 | // operands to use appropriate classes. |
| 247 | bool Ret = constrainSelectedInstRegOperands(*Flat, TII, TRI, RBI); |
| 248 | |
| 249 | I.eraseFromParent(); |
| 250 | return Ret; |
| 251 | } |
| 252 | |
| 253 | bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const { |
| 254 | MachineBasicBlock *BB = I.getParent(); |
| 255 | MachineFunction *MF = BB->getParent(); |
| 256 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
Tom Stellard | e182b28 | 2018-05-15 17:57:09 +0000 | [diff] [blame] | 257 | MachineOperand &ImmOp = I.getOperand(1); |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 258 | |
Tom Stellard | e182b28 | 2018-05-15 17:57:09 +0000 | [diff] [blame] | 259 | // The AMDGPU backend only supports Imm operands and not CImm or FPImm. |
| 260 | if (ImmOp.isFPImm()) { |
| 261 | const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt(); |
| 262 | ImmOp.ChangeToImmediate(Imm.getZExtValue()); |
| 263 | } else if (ImmOp.isCImm()) { |
| 264 | ImmOp.ChangeToImmediate(ImmOp.getCImm()->getZExtValue()); |
| 265 | } |
| 266 | |
| 267 | unsigned DstReg = I.getOperand(0).getReg(); |
| 268 | unsigned Size; |
| 269 | bool IsSgpr; |
| 270 | const RegisterBank *RB = MRI.getRegBankOrNull(I.getOperand(0).getReg()); |
| 271 | if (RB) { |
| 272 | IsSgpr = RB->getID() == AMDGPU::SGPRRegBankID; |
| 273 | Size = MRI.getType(DstReg).getSizeInBits(); |
| 274 | } else { |
| 275 | const TargetRegisterClass *RC = TRI.getRegClassForReg(MRI, DstReg); |
| 276 | IsSgpr = TRI.isSGPRClass(RC); |
Tom Stellard | a91ce17 | 2018-05-21 17:49:31 +0000 | [diff] [blame] | 277 | Size = TRI.getRegSizeInBits(*RC); |
Tom Stellard | e182b28 | 2018-05-15 17:57:09 +0000 | [diff] [blame] | 278 | } |
| 279 | |
| 280 | if (Size != 32 && Size != 64) |
| 281 | return false; |
| 282 | |
| 283 | unsigned Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 284 | if (Size == 32) { |
Tom Stellard | e182b28 | 2018-05-15 17:57:09 +0000 | [diff] [blame] | 285 | I.setDesc(TII.get(Opcode)); |
| 286 | I.addImplicitDefUseOperands(*MF); |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 287 | return constrainSelectedInstRegOperands(I, TII, TRI, RBI); |
| 288 | } |
| 289 | |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 290 | DebugLoc DL = I.getDebugLoc(); |
Tom Stellard | e182b28 | 2018-05-15 17:57:09 +0000 | [diff] [blame] | 291 | const TargetRegisterClass *RC = IsSgpr ? &AMDGPU::SReg_32_XM0RegClass : |
| 292 | &AMDGPU::VGPR_32RegClass; |
| 293 | unsigned LoReg = MRI.createVirtualRegister(RC); |
| 294 | unsigned HiReg = MRI.createVirtualRegister(RC); |
| 295 | const APInt &Imm = APInt(Size, I.getOperand(1).getImm()); |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 296 | |
Tom Stellard | e182b28 | 2018-05-15 17:57:09 +0000 | [diff] [blame] | 297 | BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg) |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 298 | .addImm(Imm.trunc(32).getZExtValue()); |
| 299 | |
Tom Stellard | e182b28 | 2018-05-15 17:57:09 +0000 | [diff] [blame] | 300 | BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg) |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 301 | .addImm(Imm.ashr(32).getZExtValue()); |
| 302 | |
Tom Stellard | e182b28 | 2018-05-15 17:57:09 +0000 | [diff] [blame] | 303 | const MachineInstr *RS = |
| 304 | BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) |
| 305 | .addReg(LoReg) |
| 306 | .addImm(AMDGPU::sub0) |
| 307 | .addReg(HiReg) |
| 308 | .addImm(AMDGPU::sub1); |
| 309 | |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 310 | // We can't call constrainSelectedInstRegOperands here, because it doesn't |
| 311 | // work for target independent opcodes |
| 312 | I.eraseFromParent(); |
Tom Stellard | e182b28 | 2018-05-15 17:57:09 +0000 | [diff] [blame] | 313 | const TargetRegisterClass *DstRC = |
| 314 | TRI.getConstrainedRegClassForOperand(RS->getOperand(0), MRI); |
| 315 | if (!DstRC) |
| 316 | return true; |
| 317 | return RBI.constrainGenericRegister(DstReg, *DstRC, MRI); |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 318 | } |
| 319 | |
| 320 | static bool isConstant(const MachineInstr &MI) { |
| 321 | return MI.getOpcode() == TargetOpcode::G_CONSTANT; |
| 322 | } |
| 323 | |
| 324 | void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load, |
| 325 | const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const { |
| 326 | |
| 327 | const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg()); |
| 328 | |
| 329 | assert(PtrMI); |
| 330 | |
| 331 | if (PtrMI->getOpcode() != TargetOpcode::G_GEP) |
| 332 | return; |
| 333 | |
| 334 | GEPInfo GEPInfo(*PtrMI); |
| 335 | |
| 336 | for (unsigned i = 1, e = 3; i < e; ++i) { |
| 337 | const MachineOperand &GEPOp = PtrMI->getOperand(i); |
| 338 | const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg()); |
| 339 | assert(OpDef); |
| 340 | if (isConstant(*OpDef)) { |
| 341 | // FIXME: Is it possible to have multiple Imm parts? Maybe if we |
| 342 | // are lacking other optimizations. |
| 343 | assert(GEPInfo.Imm == 0); |
| 344 | GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue(); |
| 345 | continue; |
| 346 | } |
| 347 | const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI); |
| 348 | if (OpBank->getID() == AMDGPU::SGPRRegBankID) |
| 349 | GEPInfo.SgprParts.push_back(GEPOp.getReg()); |
| 350 | else |
| 351 | GEPInfo.VgprParts.push_back(GEPOp.getReg()); |
| 352 | } |
| 353 | |
| 354 | AddrInfo.push_back(GEPInfo); |
| 355 | getAddrModeInfo(*PtrMI, MRI, AddrInfo); |
| 356 | } |
| 357 | |
| 358 | static bool isInstrUniform(const MachineInstr &MI) { |
| 359 | if (!MI.hasOneMemOperand()) |
| 360 | return false; |
| 361 | |
| 362 | const MachineMemOperand *MMO = *MI.memoperands_begin(); |
| 363 | const Value *Ptr = MMO->getValue(); |
| 364 | |
| 365 | // UndefValue means this is a load of a kernel input. These are uniform. |
| 366 | // Sometimes LDS instructions have constant pointers. |
| 367 | // If Ptr is null, then that means this mem operand contains a |
| 368 | // PseudoSourceValue like GOT. |
| 369 | if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) || |
| 370 | isa<Constant>(Ptr) || isa<GlobalValue>(Ptr)) |
| 371 | return true; |
| 372 | |
Matt Arsenault | 923712b | 2018-02-09 16:57:57 +0000 | [diff] [blame] | 373 | if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) |
| 374 | return true; |
| 375 | |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 376 | const Instruction *I = dyn_cast<Instruction>(Ptr); |
| 377 | return I && I->getMetadata("amdgpu.uniform"); |
| 378 | } |
| 379 | |
| 380 | static unsigned getSmrdOpcode(unsigned BaseOpcode, unsigned LoadSize) { |
| 381 | |
| 382 | if (LoadSize == 32) |
| 383 | return BaseOpcode; |
| 384 | |
| 385 | switch (BaseOpcode) { |
| 386 | case AMDGPU::S_LOAD_DWORD_IMM: |
| 387 | switch (LoadSize) { |
| 388 | case 64: |
| 389 | return AMDGPU::S_LOAD_DWORDX2_IMM; |
| 390 | case 128: |
| 391 | return AMDGPU::S_LOAD_DWORDX4_IMM; |
| 392 | case 256: |
| 393 | return AMDGPU::S_LOAD_DWORDX8_IMM; |
| 394 | case 512: |
| 395 | return AMDGPU::S_LOAD_DWORDX16_IMM; |
| 396 | } |
| 397 | break; |
| 398 | case AMDGPU::S_LOAD_DWORD_IMM_ci: |
| 399 | switch (LoadSize) { |
| 400 | case 64: |
| 401 | return AMDGPU::S_LOAD_DWORDX2_IMM_ci; |
| 402 | case 128: |
| 403 | return AMDGPU::S_LOAD_DWORDX4_IMM_ci; |
| 404 | case 256: |
| 405 | return AMDGPU::S_LOAD_DWORDX8_IMM_ci; |
| 406 | case 512: |
| 407 | return AMDGPU::S_LOAD_DWORDX16_IMM_ci; |
| 408 | } |
| 409 | break; |
| 410 | case AMDGPU::S_LOAD_DWORD_SGPR: |
| 411 | switch (LoadSize) { |
| 412 | case 64: |
| 413 | return AMDGPU::S_LOAD_DWORDX2_SGPR; |
| 414 | case 128: |
| 415 | return AMDGPU::S_LOAD_DWORDX4_SGPR; |
| 416 | case 256: |
| 417 | return AMDGPU::S_LOAD_DWORDX8_SGPR; |
| 418 | case 512: |
| 419 | return AMDGPU::S_LOAD_DWORDX16_SGPR; |
| 420 | } |
| 421 | break; |
| 422 | } |
| 423 | llvm_unreachable("Invalid base smrd opcode or size"); |
| 424 | } |
| 425 | |
| 426 | bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const { |
| 427 | for (const GEPInfo &GEPInfo : AddrInfo) { |
| 428 | if (!GEPInfo.VgprParts.empty()) |
| 429 | return true; |
| 430 | } |
| 431 | return false; |
| 432 | } |
| 433 | |
| 434 | bool AMDGPUInstructionSelector::selectSMRD(MachineInstr &I, |
| 435 | ArrayRef<GEPInfo> AddrInfo) const { |
| 436 | |
| 437 | if (!I.hasOneMemOperand()) |
| 438 | return false; |
| 439 | |
Matt Arsenault | 923712b | 2018-02-09 16:57:57 +0000 | [diff] [blame] | 440 | if ((*I.memoperands_begin())->getAddrSpace() != AMDGPUASI.CONSTANT_ADDRESS && |
| 441 | (*I.memoperands_begin())->getAddrSpace() != AMDGPUASI.CONSTANT_ADDRESS_32BIT) |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 442 | return false; |
| 443 | |
| 444 | if (!isInstrUniform(I)) |
| 445 | return false; |
| 446 | |
| 447 | if (hasVgprParts(AddrInfo)) |
| 448 | return false; |
| 449 | |
| 450 | MachineBasicBlock *BB = I.getParent(); |
| 451 | MachineFunction *MF = BB->getParent(); |
Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame^] | 452 | const GCNSubtarget &Subtarget = MF->getSubtarget<GCNSubtarget>(); |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 453 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
| 454 | unsigned DstReg = I.getOperand(0).getReg(); |
| 455 | const DebugLoc &DL = I.getDebugLoc(); |
| 456 | unsigned Opcode; |
| 457 | unsigned LoadSize = RBI.getSizeInBits(DstReg, MRI, TRI); |
| 458 | |
| 459 | if (!AddrInfo.empty() && AddrInfo[0].SgprParts.size() == 1) { |
| 460 | |
| 461 | const GEPInfo &GEPInfo = AddrInfo[0]; |
| 462 | |
| 463 | unsigned PtrReg = GEPInfo.SgprParts[0]; |
| 464 | int64_t EncodedImm = AMDGPU::getSMRDEncodedOffset(Subtarget, GEPInfo.Imm); |
| 465 | if (AMDGPU::isLegalSMRDImmOffset(Subtarget, GEPInfo.Imm)) { |
| 466 | Opcode = getSmrdOpcode(AMDGPU::S_LOAD_DWORD_IMM, LoadSize); |
| 467 | |
| 468 | MachineInstr *SMRD = BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg) |
| 469 | .addReg(PtrReg) |
| 470 | .addImm(EncodedImm) |
| 471 | .addImm(0); // glc |
| 472 | return constrainSelectedInstRegOperands(*SMRD, TII, TRI, RBI); |
| 473 | } |
| 474 | |
| 475 | if (Subtarget.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS && |
| 476 | isUInt<32>(EncodedImm)) { |
| 477 | Opcode = getSmrdOpcode(AMDGPU::S_LOAD_DWORD_IMM_ci, LoadSize); |
| 478 | MachineInstr *SMRD = BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg) |
| 479 | .addReg(PtrReg) |
| 480 | .addImm(EncodedImm) |
| 481 | .addImm(0); // glc |
| 482 | return constrainSelectedInstRegOperands(*SMRD, TII, TRI, RBI); |
| 483 | } |
| 484 | |
| 485 | if (isUInt<32>(GEPInfo.Imm)) { |
| 486 | Opcode = getSmrdOpcode(AMDGPU::S_LOAD_DWORD_SGPR, LoadSize); |
| 487 | unsigned OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); |
| 488 | BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B32), OffsetReg) |
| 489 | .addImm(GEPInfo.Imm); |
| 490 | |
| 491 | MachineInstr *SMRD = BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg) |
| 492 | .addReg(PtrReg) |
| 493 | .addReg(OffsetReg) |
| 494 | .addImm(0); // glc |
| 495 | return constrainSelectedInstRegOperands(*SMRD, TII, TRI, RBI); |
| 496 | } |
| 497 | } |
| 498 | |
| 499 | unsigned PtrReg = I.getOperand(1).getReg(); |
| 500 | Opcode = getSmrdOpcode(AMDGPU::S_LOAD_DWORD_IMM, LoadSize); |
| 501 | MachineInstr *SMRD = BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg) |
| 502 | .addReg(PtrReg) |
| 503 | .addImm(0) |
| 504 | .addImm(0); // glc |
| 505 | return constrainSelectedInstRegOperands(*SMRD, TII, TRI, RBI); |
| 506 | } |
| 507 | |
| 508 | |
| 509 | bool AMDGPUInstructionSelector::selectG_LOAD(MachineInstr &I) const { |
| 510 | MachineBasicBlock *BB = I.getParent(); |
| 511 | MachineFunction *MF = BB->getParent(); |
| 512 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
| 513 | DebugLoc DL = I.getDebugLoc(); |
| 514 | unsigned DstReg = I.getOperand(0).getReg(); |
| 515 | unsigned PtrReg = I.getOperand(1).getReg(); |
| 516 | unsigned LoadSize = RBI.getSizeInBits(DstReg, MRI, TRI); |
| 517 | unsigned Opcode; |
| 518 | |
| 519 | SmallVector<GEPInfo, 4> AddrInfo; |
| 520 | |
| 521 | getAddrModeInfo(I, MRI, AddrInfo); |
| 522 | |
| 523 | if (selectSMRD(I, AddrInfo)) { |
| 524 | I.eraseFromParent(); |
| 525 | return true; |
| 526 | } |
| 527 | |
| 528 | switch (LoadSize) { |
| 529 | default: |
| 530 | llvm_unreachable("Load size not supported\n"); |
| 531 | case 32: |
| 532 | Opcode = AMDGPU::FLAT_LOAD_DWORD; |
| 533 | break; |
| 534 | case 64: |
| 535 | Opcode = AMDGPU::FLAT_LOAD_DWORDX2; |
| 536 | break; |
| 537 | } |
| 538 | |
| 539 | MachineInstr *Flat = BuildMI(*BB, &I, DL, TII.get(Opcode)) |
| 540 | .add(I.getOperand(0)) |
| 541 | .addReg(PtrReg) |
Matt Arsenault | fd02314 | 2017-06-12 15:55:58 +0000 | [diff] [blame] | 542 | .addImm(0) // offset |
| 543 | .addImm(0) // glc |
| 544 | .addImm(0); // slc |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 545 | |
| 546 | bool Ret = constrainSelectedInstRegOperands(*Flat, TII, TRI, RBI); |
| 547 | I.eraseFromParent(); |
| 548 | return Ret; |
| 549 | } |
| 550 | |
Daniel Sanders | f76f315 | 2017-11-16 00:46:35 +0000 | [diff] [blame] | 551 | bool AMDGPUInstructionSelector::select(MachineInstr &I, |
| 552 | CodeGenCoverage &CoverageInfo) const { |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 553 | |
Tom Stellard | 7712ee8 | 2018-06-22 00:44:29 +0000 | [diff] [blame] | 554 | if (!isPreISelGenericOpcode(I.getOpcode())) { |
| 555 | if (I.isCopy()) |
| 556 | return selectCOPY(I); |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 557 | return true; |
Tom Stellard | 7712ee8 | 2018-06-22 00:44:29 +0000 | [diff] [blame] | 558 | } |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 559 | |
| 560 | switch (I.getOpcode()) { |
| 561 | default: |
Tom Stellard | 1dc9020 | 2018-05-10 20:53:06 +0000 | [diff] [blame] | 562 | return selectImpl(I, CoverageInfo); |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 563 | case TargetOpcode::G_ADD: |
| 564 | return selectG_ADD(I); |
Tom Stellard | 1e0edad | 2018-05-10 21:20:10 +0000 | [diff] [blame] | 565 | case TargetOpcode::G_BITCAST: |
| 566 | return selectCOPY(I); |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 567 | case TargetOpcode::G_CONSTANT: |
Tom Stellard | e182b28 | 2018-05-15 17:57:09 +0000 | [diff] [blame] | 568 | case TargetOpcode::G_FCONSTANT: |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 569 | return selectG_CONSTANT(I); |
| 570 | case TargetOpcode::G_GEP: |
| 571 | return selectG_GEP(I); |
Tom Stellard | 3f1c6fe | 2018-06-21 23:38:20 +0000 | [diff] [blame] | 572 | case TargetOpcode::G_IMPLICIT_DEF: |
| 573 | return selectG_IMPLICIT_DEF(I); |
Tom Stellard | a928473 | 2018-06-14 19:26:37 +0000 | [diff] [blame] | 574 | case TargetOpcode::G_INTRINSIC: |
| 575 | return selectG_INTRINSIC(I, CoverageInfo); |
Tom Stellard | ca16621 | 2017-01-30 21:56:46 +0000 | [diff] [blame] | 576 | case TargetOpcode::G_LOAD: |
| 577 | return selectG_LOAD(I); |
| 578 | case TargetOpcode::G_STORE: |
| 579 | return selectG_STORE(I); |
| 580 | } |
| 581 | return false; |
| 582 | } |
Tom Stellard | 1dc9020 | 2018-05-10 20:53:06 +0000 | [diff] [blame] | 583 | |
Tom Stellard | 26fac0f | 2018-06-22 02:54:57 +0000 | [diff] [blame] | 584 | InstructionSelector::ComplexRendererFns |
| 585 | AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const { |
| 586 | return {{ |
| 587 | [=](MachineInstrBuilder &MIB) { MIB.add(Root); } |
| 588 | }}; |
| 589 | |
| 590 | } |
| 591 | |
Tom Stellard | 1dc9020 | 2018-05-10 20:53:06 +0000 | [diff] [blame] | 592 | /// |
| 593 | /// This will select either an SGPR or VGPR operand and will save us from |
| 594 | /// having to write an extra tablegen pattern. |
| 595 | InstructionSelector::ComplexRendererFns |
| 596 | AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const { |
| 597 | return {{ |
| 598 | [=](MachineInstrBuilder &MIB) { MIB.add(Root); } |
| 599 | }}; |
| 600 | } |
Tom Stellard | dcc95e9 | 2018-05-11 05:44:16 +0000 | [diff] [blame] | 601 | |
| 602 | InstructionSelector::ComplexRendererFns |
| 603 | AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const { |
| 604 | return {{ |
| 605 | [=](MachineInstrBuilder &MIB) { MIB.add(Root); }, |
| 606 | [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // src0_mods |
| 607 | [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp |
| 608 | [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod |
| 609 | }}; |
| 610 | } |
Tom Stellard | 9a65357 | 2018-06-22 02:34:29 +0000 | [diff] [blame] | 611 | InstructionSelector::ComplexRendererFns |
| 612 | AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const { |
| 613 | return {{ |
| 614 | [=](MachineInstrBuilder &MIB) { MIB.add(Root); }, |
| 615 | [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp |
| 616 | [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod |
| 617 | }}; |
| 618 | } |
Tom Stellard | 46bbbc3 | 2018-06-13 22:30:47 +0000 | [diff] [blame] | 619 | |
| 620 | InstructionSelector::ComplexRendererFns |
| 621 | AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const { |
| 622 | return {{ |
| 623 | [=](MachineInstrBuilder &MIB) { MIB.add(Root); }, |
| 624 | [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods |
| 625 | }}; |
| 626 | } |