Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 1 | //===-- SIFixSGPRCopies.cpp - Remove potential VGPR => SGPR copies --------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | /// \file |
| 11 | /// Copies from VGPR to SGPR registers are illegal and the register coalescer |
| 12 | /// will sometimes generate these illegal copies in situations like this: |
| 13 | /// |
| 14 | /// Register Class <vsrc> is the union of <vgpr> and <sgpr> |
| 15 | /// |
| 16 | /// BB0: |
| 17 | /// %vreg0 <sgpr> = SCALAR_INST |
| 18 | /// %vreg1 <vsrc> = COPY %vreg0 <sgpr> |
| 19 | /// ... |
| 20 | /// BRANCH %cond BB1, BB2 |
| 21 | /// BB1: |
| 22 | /// %vreg2 <vgpr> = VECTOR_INST |
| 23 | /// %vreg3 <vsrc> = COPY %vreg2 <vgpr> |
| 24 | /// BB2: |
| 25 | /// %vreg4 <vsrc> = PHI %vreg1 <vsrc>, <BB#0>, %vreg3 <vrsc>, <BB#1> |
NAKAMURA Takumi | 78e80cd | 2013-11-14 04:05:22 +0000 | [diff] [blame] | 26 | /// %vreg5 <vgpr> = VECTOR_INST %vreg4 <vsrc> |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 27 | /// |
NAKAMURA Takumi | 78e80cd | 2013-11-14 04:05:22 +0000 | [diff] [blame] | 28 | /// |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 29 | /// The coalescer will begin at BB0 and eliminate its copy, then the resulting |
| 30 | /// code will look like this: |
| 31 | /// |
| 32 | /// BB0: |
| 33 | /// %vreg0 <sgpr> = SCALAR_INST |
| 34 | /// ... |
| 35 | /// BRANCH %cond BB1, BB2 |
| 36 | /// BB1: |
| 37 | /// %vreg2 <vgpr> = VECTOR_INST |
| 38 | /// %vreg3 <vsrc> = COPY %vreg2 <vgpr> |
| 39 | /// BB2: |
| 40 | /// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <vsrc>, <BB#1> |
| 41 | /// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr> |
| 42 | /// |
| 43 | /// Now that the result of the PHI instruction is an SGPR, the register |
| 44 | /// allocator is now forced to constrain the register class of %vreg3 to |
| 45 | /// <sgpr> so we end up with final code like this: |
NAKAMURA Takumi | 78e80cd | 2013-11-14 04:05:22 +0000 | [diff] [blame] | 46 | /// |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 47 | /// BB0: |
| 48 | /// %vreg0 <sgpr> = SCALAR_INST |
| 49 | /// ... |
| 50 | /// BRANCH %cond BB1, BB2 |
| 51 | /// BB1: |
| 52 | /// %vreg2 <vgpr> = VECTOR_INST |
| 53 | /// %vreg3 <sgpr> = COPY %vreg2 <vgpr> |
| 54 | /// BB2: |
| 55 | /// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <sgpr>, <BB#1> |
| 56 | /// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr> |
| 57 | /// |
NAKAMURA Takumi | 78e80cd | 2013-11-14 04:05:22 +0000 | [diff] [blame] | 58 | /// Now this code contains an illegal copy from a VGPR to an SGPR. |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 59 | /// |
| 60 | /// In order to avoid this problem, this pass searches for PHI instructions |
| 61 | /// which define a <vsrc> register and constrains its definition class to |
| 62 | /// <vgpr> if the user of the PHI's definition register is a vector instruction. |
| 63 | /// If the PHI's definition class is constrained to <vgpr> then the coalescer |
| 64 | /// will be unable to perform the COPY removal from the above example which |
| 65 | /// ultimately led to the creation of an illegal COPY. |
| 66 | //===----------------------------------------------------------------------===// |
| 67 | |
| 68 | #include "AMDGPU.h" |
Eric Christopher | d913448 | 2014-08-04 21:25:23 +0000 | [diff] [blame] | 69 | #include "AMDGPUSubtarget.h" |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 70 | #include "SIInstrInfo.h" |
Chandler Carruth | 6bda14b | 2017-06-06 11:49:48 +0000 | [diff] [blame^] | 71 | #include "llvm/ADT/DenseSet.h" |
Tom Stellard | 0bc6881 | 2016-11-29 00:46:46 +0000 | [diff] [blame] | 72 | #include "llvm/CodeGen/MachineDominators.h" |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 73 | #include "llvm/CodeGen/MachineFunctionPass.h" |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 74 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 75 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 76 | #include "llvm/Support/Debug.h" |
Hans Wennborg | a74fd70 | 2013-11-14 23:24:09 +0000 | [diff] [blame] | 77 | #include "llvm/Support/raw_ostream.h" |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 78 | #include "llvm/Target/TargetMachine.h" |
| 79 | |
| 80 | using namespace llvm; |
| 81 | |
Matt Arsenault | 98f8394 | 2016-04-21 18:21:54 +0000 | [diff] [blame] | 82 | #define DEBUG_TYPE "si-fix-sgpr-copies" |
Chandler Carruth | 84e68b2 | 2014-04-22 02:41:26 +0000 | [diff] [blame] | 83 | |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 84 | static cl::opt<bool> EnableM0Merge( |
| 85 | "amdgpu-enable-merge-m0", |
| 86 | cl::desc("Merge and hoist M0 initializations"), |
| 87 | cl::init(false)); |
| 88 | |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 89 | namespace { |
| 90 | |
| 91 | class SIFixSGPRCopies : public MachineFunctionPass { |
Tom Stellard | 0bc6881 | 2016-11-29 00:46:46 +0000 | [diff] [blame] | 92 | |
| 93 | MachineDominatorTree *MDT; |
| 94 | |
Matt Arsenault | 782c03b | 2015-11-03 22:30:13 +0000 | [diff] [blame] | 95 | public: |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 96 | static char ID; |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 97 | |
Matt Arsenault | 782c03b | 2015-11-03 22:30:13 +0000 | [diff] [blame] | 98 | SIFixSGPRCopies() : MachineFunctionPass(ID) { } |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 99 | |
Craig Topper | 5656db4 | 2014-04-29 07:57:24 +0000 | [diff] [blame] | 100 | bool runOnMachineFunction(MachineFunction &MF) override; |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 101 | |
Mehdi Amini | 117296c | 2016-10-01 02:56:57 +0000 | [diff] [blame] | 102 | StringRef getPassName() const override { return "SI Fix SGPR copies"; } |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 103 | |
Matt Arsenault | 0cb8517 | 2015-09-25 17:21:28 +0000 | [diff] [blame] | 104 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
Tom Stellard | 0bc6881 | 2016-11-29 00:46:46 +0000 | [diff] [blame] | 105 | AU.addRequired<MachineDominatorTree>(); |
| 106 | AU.addPreserved<MachineDominatorTree>(); |
Matt Arsenault | 0cb8517 | 2015-09-25 17:21:28 +0000 | [diff] [blame] | 107 | AU.setPreservesCFG(); |
| 108 | MachineFunctionPass::getAnalysisUsage(AU); |
| 109 | } |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 110 | }; |
| 111 | |
| 112 | } // End anonymous namespace |
| 113 | |
Tom Stellard | 0bc6881 | 2016-11-29 00:46:46 +0000 | [diff] [blame] | 114 | INITIALIZE_PASS_BEGIN(SIFixSGPRCopies, DEBUG_TYPE, |
| 115 | "SI Fix SGPR copies", false, false) |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 116 | INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) |
Tom Stellard | 0bc6881 | 2016-11-29 00:46:46 +0000 | [diff] [blame] | 117 | INITIALIZE_PASS_END(SIFixSGPRCopies, DEBUG_TYPE, |
| 118 | "SI Fix SGPR copies", false, false) |
| 119 | |
Matt Arsenault | 782c03b | 2015-11-03 22:30:13 +0000 | [diff] [blame] | 120 | |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 121 | char SIFixSGPRCopies::ID = 0; |
| 122 | |
Matt Arsenault | 782c03b | 2015-11-03 22:30:13 +0000 | [diff] [blame] | 123 | char &llvm::SIFixSGPRCopiesID = SIFixSGPRCopies::ID; |
| 124 | |
| 125 | FunctionPass *llvm::createSIFixSGPRCopiesPass() { |
| 126 | return new SIFixSGPRCopies(); |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 127 | } |
| 128 | |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 129 | static bool hasVGPROperands(const MachineInstr &MI, const SIRegisterInfo *TRI) { |
| 130 | const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); |
| 131 | for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { |
| 132 | if (!MI.getOperand(i).isReg() || |
| 133 | !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg())) |
| 134 | continue; |
| 135 | |
| 136 | if (TRI->hasVGPRs(MRI.getRegClass(MI.getOperand(i).getReg()))) |
| 137 | return true; |
| 138 | } |
| 139 | return false; |
| 140 | } |
| 141 | |
Matt Arsenault | 0de924b | 2015-11-02 23:15:42 +0000 | [diff] [blame] | 142 | static std::pair<const TargetRegisterClass *, const TargetRegisterClass *> |
| 143 | getCopyRegClasses(const MachineInstr &Copy, |
| 144 | const SIRegisterInfo &TRI, |
| 145 | const MachineRegisterInfo &MRI) { |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 146 | unsigned DstReg = Copy.getOperand(0).getReg(); |
| 147 | unsigned SrcReg = Copy.getOperand(1).getReg(); |
Matt Arsenault | 120a0c9 | 2014-12-03 05:22:39 +0000 | [diff] [blame] | 148 | |
Matt Arsenault | f0d9e47 | 2015-10-13 00:07:54 +0000 | [diff] [blame] | 149 | const TargetRegisterClass *SrcRC = |
| 150 | TargetRegisterInfo::isVirtualRegister(SrcReg) ? |
| 151 | MRI.getRegClass(SrcReg) : |
| 152 | TRI.getPhysRegClass(SrcReg); |
Tom Stellard | d33d7f1 | 2015-05-12 14:18:11 +0000 | [diff] [blame] | 153 | |
Matt Arsenault | f0d9e47 | 2015-10-13 00:07:54 +0000 | [diff] [blame] | 154 | // We don't really care about the subregister here. |
| 155 | // SrcRC = TRI.getSubRegClass(SrcRC, Copy.getOperand(1).getSubReg()); |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 156 | |
Matt Arsenault | f0d9e47 | 2015-10-13 00:07:54 +0000 | [diff] [blame] | 157 | const TargetRegisterClass *DstRC = |
| 158 | TargetRegisterInfo::isVirtualRegister(DstReg) ? |
| 159 | MRI.getRegClass(DstReg) : |
| 160 | TRI.getPhysRegClass(DstReg); |
| 161 | |
| 162 | return std::make_pair(SrcRC, DstRC); |
| 163 | } |
| 164 | |
Matt Arsenault | 0de924b | 2015-11-02 23:15:42 +0000 | [diff] [blame] | 165 | static bool isVGPRToSGPRCopy(const TargetRegisterClass *SrcRC, |
| 166 | const TargetRegisterClass *DstRC, |
| 167 | const SIRegisterInfo &TRI) { |
Matt Arsenault | f0d9e47 | 2015-10-13 00:07:54 +0000 | [diff] [blame] | 168 | return TRI.isSGPRClass(DstRC) && TRI.hasVGPRs(SrcRC); |
| 169 | } |
| 170 | |
Matt Arsenault | 0de924b | 2015-11-02 23:15:42 +0000 | [diff] [blame] | 171 | static bool isSGPRToVGPRCopy(const TargetRegisterClass *SrcRC, |
| 172 | const TargetRegisterClass *DstRC, |
| 173 | const SIRegisterInfo &TRI) { |
Matt Arsenault | f0d9e47 | 2015-10-13 00:07:54 +0000 | [diff] [blame] | 174 | return TRI.isSGPRClass(SrcRC) && TRI.hasVGPRs(DstRC); |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 175 | } |
| 176 | |
Matt Arsenault | 0de924b | 2015-11-02 23:15:42 +0000 | [diff] [blame] | 177 | // Distribute an SGPR->VGPR copy of a REG_SEQUENCE into a VGPR REG_SEQUENCE. |
| 178 | // |
| 179 | // SGPRx = ... |
| 180 | // SGPRy = REG_SEQUENCE SGPRx, sub0 ... |
| 181 | // VGPRz = COPY SGPRy |
| 182 | // |
| 183 | // ==> |
| 184 | // |
| 185 | // VGPRx = COPY SGPRx |
| 186 | // VGPRz = REG_SEQUENCE VGPRx, sub0 |
| 187 | // |
| 188 | // This exposes immediate folding opportunities when materializing 64-bit |
| 189 | // immediates. |
| 190 | static bool foldVGPRCopyIntoRegSequence(MachineInstr &MI, |
| 191 | const SIRegisterInfo *TRI, |
| 192 | const SIInstrInfo *TII, |
| 193 | MachineRegisterInfo &MRI) { |
| 194 | assert(MI.isRegSequence()); |
| 195 | |
| 196 | unsigned DstReg = MI.getOperand(0).getReg(); |
| 197 | if (!TRI->isSGPRClass(MRI.getRegClass(DstReg))) |
| 198 | return false; |
| 199 | |
| 200 | if (!MRI.hasOneUse(DstReg)) |
| 201 | return false; |
| 202 | |
| 203 | MachineInstr &CopyUse = *MRI.use_instr_begin(DstReg); |
| 204 | if (!CopyUse.isCopy()) |
| 205 | return false; |
| 206 | |
Matt Arsenault | fe78ffb | 2017-04-11 22:29:19 +0000 | [diff] [blame] | 207 | // It is illegal to have vreg inputs to a physreg defining reg_sequence. |
| 208 | if (TargetRegisterInfo::isPhysicalRegister(CopyUse.getOperand(0).getReg())) |
| 209 | return false; |
| 210 | |
Matt Arsenault | 0de924b | 2015-11-02 23:15:42 +0000 | [diff] [blame] | 211 | const TargetRegisterClass *SrcRC, *DstRC; |
| 212 | std::tie(SrcRC, DstRC) = getCopyRegClasses(CopyUse, *TRI, MRI); |
| 213 | |
| 214 | if (!isSGPRToVGPRCopy(SrcRC, DstRC, *TRI)) |
| 215 | return false; |
| 216 | |
| 217 | // TODO: Could have multiple extracts? |
| 218 | unsigned SubReg = CopyUse.getOperand(1).getSubReg(); |
| 219 | if (SubReg != AMDGPU::NoSubRegister) |
| 220 | return false; |
| 221 | |
| 222 | MRI.setRegClass(DstReg, DstRC); |
| 223 | |
| 224 | // SGPRx = ... |
| 225 | // SGPRy = REG_SEQUENCE SGPRx, sub0 ... |
| 226 | // VGPRz = COPY SGPRy |
| 227 | |
| 228 | // => |
| 229 | // VGPRx = COPY SGPRx |
| 230 | // VGPRz = REG_SEQUENCE VGPRx, sub0 |
| 231 | |
| 232 | MI.getOperand(0).setReg(CopyUse.getOperand(0).getReg()); |
| 233 | |
| 234 | for (unsigned I = 1, N = MI.getNumOperands(); I != N; I += 2) { |
| 235 | unsigned SrcReg = MI.getOperand(I).getReg(); |
Nicolai Haehnle | 82fc962 | 2016-01-07 17:10:29 +0000 | [diff] [blame] | 236 | unsigned SrcSubReg = MI.getOperand(I).getSubReg(); |
Matt Arsenault | 0de924b | 2015-11-02 23:15:42 +0000 | [diff] [blame] | 237 | |
| 238 | const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg); |
| 239 | assert(TRI->isSGPRClass(SrcRC) && |
| 240 | "Expected SGPR REG_SEQUENCE to only have SGPR inputs"); |
| 241 | |
| 242 | SrcRC = TRI->getSubRegClass(SrcRC, SrcSubReg); |
| 243 | const TargetRegisterClass *NewSrcRC = TRI->getEquivalentVGPRClass(SrcRC); |
| 244 | |
| 245 | unsigned TmpReg = MRI.createVirtualRegister(NewSrcRC); |
| 246 | |
Diana Picus | 116bbab | 2017-01-13 09:58:52 +0000 | [diff] [blame] | 247 | BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(), TII->get(AMDGPU::COPY), |
| 248 | TmpReg) |
| 249 | .add(MI.getOperand(I)); |
Matt Arsenault | 0de924b | 2015-11-02 23:15:42 +0000 | [diff] [blame] | 250 | |
| 251 | MI.getOperand(I).setReg(TmpReg); |
| 252 | } |
| 253 | |
| 254 | CopyUse.eraseFromParent(); |
| 255 | return true; |
| 256 | } |
| 257 | |
Tom Stellard | 9fdbec8 | 2016-11-11 23:35:42 +0000 | [diff] [blame] | 258 | static bool phiHasVGPROperands(const MachineInstr &PHI, |
| 259 | const MachineRegisterInfo &MRI, |
| 260 | const SIRegisterInfo *TRI, |
| 261 | const SIInstrInfo *TII) { |
| 262 | |
| 263 | for (unsigned i = 1; i < PHI.getNumOperands(); i += 2) { |
| 264 | unsigned Reg = PHI.getOperand(i).getReg(); |
| 265 | if (TRI->hasVGPRs(MRI.getRegClass(Reg))) |
| 266 | return true; |
| 267 | } |
| 268 | return false; |
| 269 | } |
| 270 | static bool phiHasBreakDef(const MachineInstr &PHI, |
| 271 | const MachineRegisterInfo &MRI, |
| 272 | SmallSet<unsigned, 8> &Visited) { |
| 273 | |
| 274 | for (unsigned i = 1; i < PHI.getNumOperands(); i += 2) { |
| 275 | unsigned Reg = PHI.getOperand(i).getReg(); |
| 276 | if (Visited.count(Reg)) |
| 277 | continue; |
| 278 | |
| 279 | Visited.insert(Reg); |
| 280 | |
Matt Arsenault | 2a80369 | 2017-04-29 01:26:34 +0000 | [diff] [blame] | 281 | MachineInstr *DefInstr = MRI.getVRegDef(Reg); |
Tom Stellard | 9fdbec8 | 2016-11-11 23:35:42 +0000 | [diff] [blame] | 282 | switch (DefInstr->getOpcode()) { |
| 283 | default: |
| 284 | break; |
| 285 | case AMDGPU::SI_BREAK: |
| 286 | case AMDGPU::SI_IF_BREAK: |
| 287 | case AMDGPU::SI_ELSE_BREAK: |
| 288 | return true; |
| 289 | case AMDGPU::PHI: |
| 290 | if (phiHasBreakDef(*DefInstr, MRI, Visited)) |
| 291 | return true; |
| 292 | } |
| 293 | } |
| 294 | return false; |
| 295 | } |
| 296 | |
Tom Stellard | 0bc6881 | 2016-11-29 00:46:46 +0000 | [diff] [blame] | 297 | static bool hasTerminatorThatModifiesExec(const MachineBasicBlock &MBB, |
| 298 | const TargetRegisterInfo &TRI) { |
| 299 | for (MachineBasicBlock::const_iterator I = MBB.getFirstTerminator(), |
| 300 | E = MBB.end(); I != E; ++I) { |
| 301 | if (I->modifiesRegister(AMDGPU::EXEC, &TRI)) |
| 302 | return true; |
| 303 | } |
| 304 | return false; |
| 305 | } |
| 306 | |
Tom Stellard | 00cfa74 | 2016-12-06 21:13:30 +0000 | [diff] [blame] | 307 | static bool isSafeToFoldImmIntoCopy(const MachineInstr *Copy, |
| 308 | const MachineInstr *MoveImm, |
| 309 | const SIInstrInfo *TII, |
| 310 | unsigned &SMovOp, |
| 311 | int64_t &Imm) { |
| 312 | |
| 313 | if (!MoveImm->isMoveImmediate()) |
| 314 | return false; |
| 315 | |
| 316 | const MachineOperand *ImmOp = |
| 317 | TII->getNamedOperand(*MoveImm, AMDGPU::OpName::src0); |
| 318 | if (!ImmOp->isImm()) |
| 319 | return false; |
| 320 | |
| 321 | // FIXME: Handle copies with sub-regs. |
| 322 | if (Copy->getOperand(0).getSubReg()) |
| 323 | return false; |
| 324 | |
| 325 | switch (MoveImm->getOpcode()) { |
| 326 | default: |
| 327 | return false; |
| 328 | case AMDGPU::V_MOV_B32_e32: |
| 329 | SMovOp = AMDGPU::S_MOV_B32; |
| 330 | break; |
| 331 | case AMDGPU::V_MOV_B64_PSEUDO: |
| 332 | SMovOp = AMDGPU::S_MOV_B64; |
| 333 | break; |
| 334 | } |
| 335 | Imm = ImmOp->getImm(); |
| 336 | return true; |
| 337 | } |
| 338 | |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 339 | template <class UnaryPredicate> |
| 340 | bool searchPredecessors(const MachineBasicBlock *MBB, |
| 341 | const MachineBasicBlock *CutOff, |
| 342 | UnaryPredicate Predicate) { |
| 343 | |
| 344 | if (MBB == CutOff) |
| 345 | return false; |
| 346 | |
| 347 | DenseSet<const MachineBasicBlock*> Visited; |
Matt Arsenault | 2a80369 | 2017-04-29 01:26:34 +0000 | [diff] [blame] | 348 | SmallVector<MachineBasicBlock*, 4> Worklist(MBB->pred_begin(), |
Wei Ding | 74da350 | 2017-04-12 23:51:47 +0000 | [diff] [blame] | 349 | MBB->pred_end()); |
| 350 | |
| 351 | while (!Worklist.empty()) { |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 352 | MachineBasicBlock *MBB = Worklist.pop_back_val(); |
Wei Ding | 74da350 | 2017-04-12 23:51:47 +0000 | [diff] [blame] | 353 | |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 354 | if (!Visited.insert(MBB).second) |
Wei Ding | 74da350 | 2017-04-12 23:51:47 +0000 | [diff] [blame] | 355 | continue; |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 356 | if (MBB == CutOff) |
| 357 | continue; |
| 358 | if (Predicate(MBB)) |
Wei Ding | 74da350 | 2017-04-12 23:51:47 +0000 | [diff] [blame] | 359 | return true; |
| 360 | |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 361 | Worklist.append(MBB->pred_begin(), MBB->pred_end()); |
Wei Ding | 74da350 | 2017-04-12 23:51:47 +0000 | [diff] [blame] | 362 | } |
| 363 | |
| 364 | return false; |
| 365 | } |
| 366 | |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 367 | static bool predsHasDivergentTerminator(MachineBasicBlock *MBB, |
| 368 | const TargetRegisterInfo *TRI) { |
| 369 | return searchPredecessors(MBB, nullptr, [TRI](MachineBasicBlock *MBB) { |
| 370 | return hasTerminatorThatModifiesExec(*MBB, *TRI); }); |
| 371 | } |
| 372 | |
| 373 | // Checks if there is potential path From instruction To instruction. |
| 374 | // If CutOff is specified and it sits in between of that path we ignore |
| 375 | // a higher portion of the path and report it is not reachable. |
| 376 | static bool isReachable(const MachineInstr *From, |
| 377 | const MachineInstr *To, |
| 378 | const MachineBasicBlock *CutOff, |
| 379 | MachineDominatorTree &MDT) { |
| 380 | // If either From block dominates To block or instructions are in the same |
| 381 | // block and From is higher. |
| 382 | if (MDT.dominates(From, To)) |
| 383 | return true; |
| 384 | |
| 385 | const MachineBasicBlock *MBBFrom = From->getParent(); |
| 386 | const MachineBasicBlock *MBBTo = To->getParent(); |
| 387 | if (MBBFrom == MBBTo) |
| 388 | return false; |
| 389 | |
| 390 | // Instructions are in different blocks, do predecessor search. |
| 391 | // We should almost never get here since we do not usually produce M0 stores |
| 392 | // other than -1. |
| 393 | return searchPredecessors(MBBTo, CutOff, [MBBFrom] |
| 394 | (const MachineBasicBlock *MBB) { return MBB == MBBFrom; }); |
| 395 | } |
| 396 | |
| 397 | // Hoist and merge identical SGPR initializations into a common predecessor. |
| 398 | // This is intended to combine M0 initializations, but can work with any |
| 399 | // SGPR. A VGPR cannot be processed since we cannot guarantee vector |
| 400 | // executioon. |
| 401 | static bool hoistAndMergeSGPRInits(unsigned Reg, |
| 402 | const MachineRegisterInfo &MRI, |
| 403 | MachineDominatorTree &MDT) { |
| 404 | // List of inits by immediate value. |
| 405 | typedef std::map<unsigned, std::list<MachineInstr*>> InitListMap; |
| 406 | InitListMap Inits; |
| 407 | // List of clobbering instructions. |
| 408 | SmallVector<MachineInstr*, 8> Clobbers; |
| 409 | bool Changed = false; |
| 410 | |
| 411 | for (auto &MI : MRI.def_instructions(Reg)) { |
| 412 | MachineOperand *Imm = nullptr; |
| 413 | for (auto &MO: MI.operands()) { |
| 414 | if ((MO.isReg() && ((MO.isDef() && MO.getReg() != Reg) || !MO.isDef())) || |
| 415 | (!MO.isImm() && !MO.isReg()) || (MO.isImm() && Imm)) { |
| 416 | Imm = nullptr; |
| 417 | break; |
| 418 | } else if (MO.isImm()) |
| 419 | Imm = &MO; |
| 420 | } |
| 421 | if (Imm) |
| 422 | Inits[Imm->getImm()].push_front(&MI); |
| 423 | else |
| 424 | Clobbers.push_back(&MI); |
| 425 | } |
| 426 | |
| 427 | for (auto &Init : Inits) { |
| 428 | auto &Defs = Init.second; |
| 429 | |
| 430 | for (auto I1 = Defs.begin(), E = Defs.end(); I1 != E; ) { |
| 431 | MachineInstr *MI1 = *I1; |
| 432 | |
| 433 | for (auto I2 = std::next(I1); I2 != E; ) { |
| 434 | MachineInstr *MI2 = *I2; |
| 435 | |
| 436 | // Check any possible interference |
| 437 | auto intereferes = [&](MachineBasicBlock::iterator From, |
| 438 | MachineBasicBlock::iterator To) -> bool { |
| 439 | |
| 440 | assert(MDT.dominates(&*To, &*From)); |
| 441 | |
| 442 | auto interferes = [&MDT, From, To](MachineInstr* &Clobber) -> bool { |
| 443 | const MachineBasicBlock *MBBFrom = From->getParent(); |
| 444 | const MachineBasicBlock *MBBTo = To->getParent(); |
| 445 | bool MayClobberFrom = isReachable(Clobber, &*From, MBBTo, MDT); |
| 446 | bool MayClobberTo = isReachable(Clobber, &*To, MBBTo, MDT); |
| 447 | if (!MayClobberFrom && !MayClobberTo) |
| 448 | return false; |
| 449 | if ((MayClobberFrom && !MayClobberTo) || |
| 450 | (!MayClobberFrom && MayClobberTo)) |
| 451 | return true; |
| 452 | // Both can clobber, this is not an interference only if both are |
| 453 | // dominated by Clobber and belong to the same block or if Clobber |
| 454 | // properly dominates To, given that To >> From, so it dominates |
| 455 | // both and located in a common dominator. |
| 456 | return !((MBBFrom == MBBTo && |
| 457 | MDT.dominates(Clobber, &*From) && |
| 458 | MDT.dominates(Clobber, &*To)) || |
| 459 | MDT.properlyDominates(Clobber->getParent(), MBBTo)); |
| 460 | }; |
| 461 | |
| 462 | return (any_of(Clobbers, interferes)) || |
| 463 | (any_of(Inits, [&](InitListMap::value_type &C) { |
| 464 | return C.first != Init.first && any_of(C.second, interferes); |
| 465 | })); |
| 466 | }; |
| 467 | |
| 468 | if (MDT.dominates(MI1, MI2)) { |
| 469 | if (!intereferes(MI2, MI1)) { |
| 470 | DEBUG(dbgs() << "Erasing from BB#" << MI2->getParent()->getNumber() |
| 471 | << " " << *MI2); |
| 472 | MI2->eraseFromParent(); |
| 473 | Defs.erase(I2++); |
| 474 | Changed = true; |
| 475 | continue; |
| 476 | } |
| 477 | } else if (MDT.dominates(MI2, MI1)) { |
| 478 | if (!intereferes(MI1, MI2)) { |
| 479 | DEBUG(dbgs() << "Erasing from BB#" << MI1->getParent()->getNumber() |
| 480 | << " " << *MI1); |
| 481 | MI1->eraseFromParent(); |
| 482 | Defs.erase(I1++); |
| 483 | Changed = true; |
| 484 | break; |
| 485 | } |
| 486 | } else { |
| 487 | auto *MBB = MDT.findNearestCommonDominator(MI1->getParent(), |
| 488 | MI2->getParent()); |
| 489 | if (!MBB) { |
| 490 | ++I2; |
| 491 | continue; |
| 492 | } |
| 493 | |
| 494 | MachineBasicBlock::iterator I = MBB->getFirstNonPHI(); |
| 495 | if (!intereferes(MI1, I) && !intereferes(MI2, I)) { |
| 496 | DEBUG(dbgs() << "Erasing from BB#" << MI1->getParent()->getNumber() |
| 497 | << " " << *MI1 << "and moving from BB#" |
| 498 | << MI2->getParent()->getNumber() << " to BB#" |
| 499 | << I->getParent()->getNumber() << " " << *MI2); |
| 500 | I->getParent()->splice(I, MI2->getParent(), MI2); |
| 501 | MI1->eraseFromParent(); |
| 502 | Defs.erase(I1++); |
| 503 | Changed = true; |
| 504 | break; |
| 505 | } |
| 506 | } |
| 507 | ++I2; |
| 508 | } |
| 509 | ++I1; |
| 510 | } |
| 511 | } |
| 512 | |
| 513 | if (Changed) |
| 514 | MRI.clearKillFlags(Reg); |
| 515 | |
| 516 | return Changed; |
| 517 | } |
| 518 | |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 519 | bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) { |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 520 | const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 521 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 522 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); |
| 523 | const SIInstrInfo *TII = ST.getInstrInfo(); |
Tom Stellard | 0bc6881 | 2016-11-29 00:46:46 +0000 | [diff] [blame] | 524 | MDT = &getAnalysis<MachineDominatorTree>(); |
Matt Arsenault | f1aebbf | 2015-11-02 23:30:48 +0000 | [diff] [blame] | 525 | |
| 526 | SmallVector<MachineInstr *, 16> Worklist; |
| 527 | |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 528 | for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); |
| 529 | BI != BE; ++BI) { |
| 530 | |
| 531 | MachineBasicBlock &MBB = *BI; |
| 532 | for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); |
Matt Arsenault | f1aebbf | 2015-11-02 23:30:48 +0000 | [diff] [blame] | 533 | I != E; ++I) { |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 534 | MachineInstr &MI = *I; |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 535 | |
| 536 | switch (MI.getOpcode()) { |
Matt Arsenault | 85441dd | 2015-09-21 16:27:22 +0000 | [diff] [blame] | 537 | default: |
| 538 | continue; |
| 539 | case AMDGPU::COPY: { |
Matt Arsenault | f0d9e47 | 2015-10-13 00:07:54 +0000 | [diff] [blame] | 540 | // If the destination register is a physical register there isn't really |
| 541 | // much we can do to fix this. |
| 542 | if (!TargetRegisterInfo::isVirtualRegister(MI.getOperand(0).getReg())) |
| 543 | continue; |
| 544 | |
| 545 | const TargetRegisterClass *SrcRC, *DstRC; |
| 546 | std::tie(SrcRC, DstRC) = getCopyRegClasses(MI, *TRI, MRI); |
| 547 | if (isVGPRToSGPRCopy(SrcRC, DstRC, *TRI)) { |
Matt Arsenault | 2a80369 | 2017-04-29 01:26:34 +0000 | [diff] [blame] | 548 | unsigned SrcReg = MI.getOperand(1).getReg(); |
| 549 | if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) { |
| 550 | TII->moveToVALU(MI); |
| 551 | break; |
| 552 | } |
| 553 | |
| 554 | MachineInstr *DefMI = MRI.getVRegDef(SrcReg); |
Tom Stellard | 00cfa74 | 2016-12-06 21:13:30 +0000 | [diff] [blame] | 555 | unsigned SMovOp; |
| 556 | int64_t Imm; |
| 557 | // If we are just copying an immediate, we can replace the copy with |
| 558 | // s_mov_b32. |
| 559 | if (isSafeToFoldImmIntoCopy(&MI, DefMI, TII, SMovOp, Imm)) { |
| 560 | MI.getOperand(1).ChangeToImmediate(Imm); |
| 561 | MI.addImplicitDefUseOperands(MF); |
| 562 | MI.setDesc(TII->get(SMovOp)); |
| 563 | break; |
| 564 | } |
Matt Arsenault | 85441dd | 2015-09-21 16:27:22 +0000 | [diff] [blame] | 565 | TII->moveToVALU(MI); |
| 566 | } |
| 567 | |
| 568 | break; |
| 569 | } |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 570 | case AMDGPU::PHI: { |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 571 | unsigned Reg = MI.getOperand(0).getReg(); |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 572 | if (!TRI->isSGPRClass(MRI.getRegClass(Reg))) |
| 573 | break; |
| 574 | |
Tom Stellard | 0bc6881 | 2016-11-29 00:46:46 +0000 | [diff] [blame] | 575 | // We don't need to fix the PHI if the common dominator of the |
| 576 | // two incoming blocks terminates with a uniform branch. |
| 577 | if (MI.getNumExplicitOperands() == 5) { |
| 578 | MachineBasicBlock *MBB0 = MI.getOperand(2).getMBB(); |
| 579 | MachineBasicBlock *MBB1 = MI.getOperand(4).getMBB(); |
| 580 | |
Wei Ding | 74da350 | 2017-04-12 23:51:47 +0000 | [diff] [blame] | 581 | if (!predsHasDivergentTerminator(MBB0, TRI) && |
| 582 | !predsHasDivergentTerminator(MBB1, TRI)) { |
Tom Stellard | 0bc6881 | 2016-11-29 00:46:46 +0000 | [diff] [blame] | 583 | DEBUG(dbgs() << "Not fixing PHI for uniform branch: " << MI << '\n'); |
| 584 | break; |
| 585 | } |
| 586 | } |
| 587 | |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 588 | // If a PHI node defines an SGPR and any of its operands are VGPRs, |
| 589 | // then we need to move it to the VALU. |
Tom Stellard | deb3f9e | 2014-09-24 01:33:26 +0000 | [diff] [blame] | 590 | // |
| 591 | // Also, if a PHI node defines an SGPR and has all SGPR operands |
| 592 | // we must move it to the VALU, because the SGPR operands will |
| 593 | // all end up being assigned the same register, which means |
| 594 | // there is a potential for a conflict if different threads take |
Matt Arsenault | bfaab76 | 2014-10-17 00:36:20 +0000 | [diff] [blame] | 595 | // different control flow paths. |
Tom Stellard | deb3f9e | 2014-09-24 01:33:26 +0000 | [diff] [blame] | 596 | // |
| 597 | // For Example: |
| 598 | // |
| 599 | // sgpr0 = def; |
| 600 | // ... |
| 601 | // sgpr1 = def; |
| 602 | // ... |
| 603 | // sgpr2 = PHI sgpr0, sgpr1 |
| 604 | // use sgpr2; |
| 605 | // |
| 606 | // Will Become: |
| 607 | // |
| 608 | // sgpr2 = def; |
| 609 | // ... |
| 610 | // sgpr2 = def; |
| 611 | // ... |
| 612 | // use sgpr2 |
| 613 | // |
Tom Stellard | deb3f9e | 2014-09-24 01:33:26 +0000 | [diff] [blame] | 614 | // The one exception to this rule is when one of the operands |
| 615 | // is defined by a SI_BREAK, SI_IF_BREAK, or SI_ELSE_BREAK |
| 616 | // instruction. In this case, there we know the program will |
| 617 | // never enter the second block (the loop) without entering |
| 618 | // the first block (where the condition is computed), so there |
| 619 | // is no chance for values to be over-written. |
| 620 | |
Tom Stellard | 9fdbec8 | 2016-11-11 23:35:42 +0000 | [diff] [blame] | 621 | SmallSet<unsigned, 8> Visited; |
| 622 | if (phiHasVGPROperands(MI, MRI, TRI, TII) || |
Tom Stellard | 0bc6881 | 2016-11-29 00:46:46 +0000 | [diff] [blame] | 623 | !phiHasBreakDef(MI, MRI, Visited)) { |
| 624 | DEBUG(dbgs() << "Fixing PHI: " << MI); |
Tom Stellard | deb3f9e | 2014-09-24 01:33:26 +0000 | [diff] [blame] | 625 | TII->moveToVALU(MI); |
Tom Stellard | 9fdbec8 | 2016-11-11 23:35:42 +0000 | [diff] [blame] | 626 | } |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 627 | break; |
| 628 | } |
| 629 | case AMDGPU::REG_SEQUENCE: { |
| 630 | if (TRI->hasVGPRs(TII->getOpRegClass(MI, 0)) || |
Matt Arsenault | 0de924b | 2015-11-02 23:15:42 +0000 | [diff] [blame] | 631 | !hasVGPROperands(MI, TRI)) { |
| 632 | foldVGPRCopyIntoRegSequence(MI, TRI, TII, MRI); |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 633 | continue; |
Matt Arsenault | 0de924b | 2015-11-02 23:15:42 +0000 | [diff] [blame] | 634 | } |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 635 | |
Matt Arsenault | bfaab76 | 2014-10-17 00:36:20 +0000 | [diff] [blame] | 636 | DEBUG(dbgs() << "Fixing REG_SEQUENCE: " << MI); |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 637 | |
| 638 | TII->moveToVALU(MI); |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 639 | break; |
| 640 | } |
Tom Stellard | 204e61b | 2014-04-07 19:45:45 +0000 | [diff] [blame] | 641 | case AMDGPU::INSERT_SUBREG: { |
Tom Stellard | a568738 | 2014-05-15 14:41:55 +0000 | [diff] [blame] | 642 | const TargetRegisterClass *DstRC, *Src0RC, *Src1RC; |
Tom Stellard | 204e61b | 2014-04-07 19:45:45 +0000 | [diff] [blame] | 643 | DstRC = MRI.getRegClass(MI.getOperand(0).getReg()); |
Tom Stellard | a568738 | 2014-05-15 14:41:55 +0000 | [diff] [blame] | 644 | Src0RC = MRI.getRegClass(MI.getOperand(1).getReg()); |
| 645 | Src1RC = MRI.getRegClass(MI.getOperand(2).getReg()); |
| 646 | if (TRI->isSGPRClass(DstRC) && |
| 647 | (TRI->hasVGPRs(Src0RC) || TRI->hasVGPRs(Src1RC))) { |
Matt Arsenault | bfaab76 | 2014-10-17 00:36:20 +0000 | [diff] [blame] | 648 | DEBUG(dbgs() << " Fixing INSERT_SUBREG: " << MI); |
Tom Stellard | a568738 | 2014-05-15 14:41:55 +0000 | [diff] [blame] | 649 | TII->moveToVALU(MI); |
| 650 | } |
| 651 | break; |
Tom Stellard | 204e61b | 2014-04-07 19:45:45 +0000 | [diff] [blame] | 652 | } |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 653 | } |
| 654 | } |
| 655 | } |
Matt Arsenault | 6f67978 | 2014-11-17 21:11:34 +0000 | [diff] [blame] | 656 | |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 657 | if (MF.getTarget().getOptLevel() > CodeGenOpt::None && EnableM0Merge) |
| 658 | hoistAndMergeSGPRInits(AMDGPU::M0, MRI, *MDT); |
| 659 | |
Matt Arsenault | 6f67978 | 2014-11-17 21:11:34 +0000 | [diff] [blame] | 660 | return true; |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 661 | } |