Stanislav Mekhanoshin | 37e7f95 | 2017-08-01 23:14:32 +0000 | [diff] [blame] | 1 | //===-- SIOptimizeExecMaskingPreRA.cpp ------------------------------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | /// \file |
| 11 | /// \brief This pass removes redundant S_OR_B64 instructions enabling lanes in |
| 12 | /// the exec. If two SI_END_CF (lowered as S_OR_B64) come together without any |
| 13 | /// vector instructions between them we can only keep outer SI_END_CF, given |
| 14 | /// that CFG is structured and exec bits of the outer end statement are always |
| 15 | /// not less than exec bit of the inner one. |
| 16 | /// |
| 17 | /// This needs to be done before the RA to eliminate saved exec bits registers |
| 18 | /// but after register coalescer to have no vector registers copies in between |
| 19 | /// of different end cf statements. |
| 20 | /// |
| 21 | //===----------------------------------------------------------------------===// |
| 22 | |
| 23 | #include "AMDGPU.h" |
| 24 | #include "AMDGPUSubtarget.h" |
| 25 | #include "SIInstrInfo.h" |
| 26 | #include "llvm/CodeGen/LiveIntervalAnalysis.h" |
| 27 | #include "llvm/CodeGen/MachineFunctionPass.h" |
| 28 | |
| 29 | using namespace llvm; |
| 30 | |
| 31 | #define DEBUG_TYPE "si-optimize-exec-masking-pre-ra" |
| 32 | |
| 33 | namespace { |
| 34 | |
| 35 | class SIOptimizeExecMaskingPreRA : public MachineFunctionPass { |
| 36 | public: |
| 37 | static char ID; |
| 38 | |
| 39 | public: |
| 40 | SIOptimizeExecMaskingPreRA() : MachineFunctionPass(ID) { |
| 41 | initializeSIOptimizeExecMaskingPreRAPass(*PassRegistry::getPassRegistry()); |
| 42 | } |
| 43 | |
| 44 | bool runOnMachineFunction(MachineFunction &MF) override; |
| 45 | |
| 46 | StringRef getPassName() const override { |
| 47 | return "SI optimize exec mask operations pre-RA"; |
| 48 | } |
| 49 | |
| 50 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
| 51 | AU.addRequired<LiveIntervals>(); |
| 52 | AU.setPreservesAll(); |
| 53 | MachineFunctionPass::getAnalysisUsage(AU); |
| 54 | } |
| 55 | }; |
| 56 | |
| 57 | } // End anonymous namespace. |
| 58 | |
| 59 | INITIALIZE_PASS_BEGIN(SIOptimizeExecMaskingPreRA, DEBUG_TYPE, |
| 60 | "SI optimize exec mask operations pre-RA", false, false) |
| 61 | INITIALIZE_PASS_DEPENDENCY(LiveIntervals) |
| 62 | INITIALIZE_PASS_END(SIOptimizeExecMaskingPreRA, DEBUG_TYPE, |
| 63 | "SI optimize exec mask operations pre-RA", false, false) |
| 64 | |
| 65 | char SIOptimizeExecMaskingPreRA::ID = 0; |
| 66 | |
| 67 | char &llvm::SIOptimizeExecMaskingPreRAID = SIOptimizeExecMaskingPreRA::ID; |
| 68 | |
| 69 | FunctionPass *llvm::createSIOptimizeExecMaskingPreRAPass() { |
| 70 | return new SIOptimizeExecMaskingPreRA(); |
| 71 | } |
| 72 | |
| 73 | static bool isEndCF(const MachineInstr& MI, const SIRegisterInfo* TRI) { |
| 74 | return MI.getOpcode() == AMDGPU::S_OR_B64 && |
| 75 | MI.modifiesRegister(AMDGPU::EXEC, TRI); |
| 76 | } |
| 77 | |
| 78 | static bool isFullExecCopy(const MachineInstr& MI) { |
| 79 | return MI.isFullCopy() && MI.getOperand(1).getReg() == AMDGPU::EXEC; |
| 80 | } |
| 81 | |
| 82 | static unsigned getOrNonExecReg(const MachineInstr &MI, |
| 83 | const SIInstrInfo &TII) { |
| 84 | auto Op = TII.getNamedOperand(MI, AMDGPU::OpName::src1); |
| 85 | if (Op->isReg() && Op->getReg() != AMDGPU::EXEC) |
| 86 | return Op->getReg(); |
| 87 | Op = TII.getNamedOperand(MI, AMDGPU::OpName::src0); |
| 88 | if (Op->isReg() && Op->getReg() != AMDGPU::EXEC) |
| 89 | return Op->getReg(); |
| 90 | return AMDGPU::NoRegister; |
| 91 | } |
| 92 | |
| 93 | static MachineInstr* getOrExecSource(const MachineInstr &MI, |
| 94 | const SIInstrInfo &TII, |
| 95 | const MachineRegisterInfo &MRI) { |
| 96 | auto SavedExec = getOrNonExecReg(MI, TII); |
| 97 | if (SavedExec == AMDGPU::NoRegister) |
| 98 | return nullptr; |
| 99 | auto SaveExecInst = MRI.getUniqueVRegDef(SavedExec); |
| 100 | if (!SaveExecInst || !isFullExecCopy(*SaveExecInst)) |
| 101 | return nullptr; |
| 102 | return SaveExecInst; |
| 103 | } |
| 104 | |
| 105 | bool SIOptimizeExecMaskingPreRA::runOnMachineFunction(MachineFunction &MF) { |
| 106 | if (skipFunction(*MF.getFunction())) |
| 107 | return false; |
| 108 | |
| 109 | const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); |
| 110 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); |
| 111 | const SIInstrInfo *TII = ST.getInstrInfo(); |
| 112 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
| 113 | LiveIntervals *LIS = &getAnalysis<LiveIntervals>(); |
| 114 | bool Changed = false; |
| 115 | |
| 116 | for (MachineBasicBlock &MBB : MF) { |
| 117 | auto Lead = MBB.begin(), E = MBB.end(); |
| 118 | if (MBB.succ_size() != 1 || Lead == E || !isEndCF(*Lead, TRI)) |
| 119 | continue; |
| 120 | |
| 121 | const MachineBasicBlock* Succ = *MBB.succ_begin(); |
| 122 | if (!MBB.isLayoutSuccessor(Succ)) |
| 123 | continue; |
| 124 | |
| 125 | auto I = std::next(Lead); |
| 126 | |
| 127 | for ( ; I != E; ++I) |
| 128 | if (!TII->isSALU(*I) || I->readsRegister(AMDGPU::EXEC, TRI)) |
| 129 | break; |
| 130 | |
| 131 | if (I != E) |
| 132 | continue; |
| 133 | |
| 134 | const auto NextLead = Succ->begin(); |
| 135 | if (NextLead == Succ->end() || !isEndCF(*NextLead, TRI) || |
| 136 | !getOrExecSource(*NextLead, *TII, MRI)) |
| 137 | continue; |
| 138 | |
| 139 | DEBUG(dbgs() << "Redundant EXEC = S_OR_B64 found: " << *Lead << '\n'); |
| 140 | |
Stanislav Mekhanoshin | f23ae4f | 2017-08-02 01:18:57 +0000 | [diff] [blame] | 141 | auto SaveExec = getOrExecSource(*Lead, *TII, MRI); |
Stanislav Mekhanoshin | 37e7f95 | 2017-08-01 23:14:32 +0000 | [diff] [blame] | 142 | unsigned SaveExecReg = getOrNonExecReg(*Lead, *TII); |
| 143 | LIS->RemoveMachineInstrFromMaps(*Lead); |
| 144 | Lead->eraseFromParent(); |
| 145 | if (SaveExecReg) { |
| 146 | LIS->removeInterval(SaveExecReg); |
| 147 | LIS->createAndComputeVirtRegInterval(SaveExecReg); |
| 148 | } |
| 149 | |
| 150 | Changed = true; |
Stanislav Mekhanoshin | da0edef | 2017-08-01 23:44:35 +0000 | [diff] [blame] | 151 | |
| 152 | // If the only use of saved exec in the removed instruction is S_AND_B64 |
| 153 | // fold the copy now. |
Stanislav Mekhanoshin | da0edef | 2017-08-01 23:44:35 +0000 | [diff] [blame] | 154 | if (!SaveExec || !SaveExec->isFullCopy()) |
| 155 | continue; |
| 156 | |
| 157 | unsigned SavedExec = SaveExec->getOperand(0).getReg(); |
| 158 | bool SafeToReplace = true; |
| 159 | for (auto& U : MRI.use_nodbg_instructions(SavedExec)) { |
| 160 | if (U.getParent() != SaveExec->getParent()) { |
| 161 | SafeToReplace = false; |
| 162 | break; |
| 163 | } |
| 164 | |
| 165 | DEBUG(dbgs() << "Redundant EXEC COPY: " << *SaveExec << '\n'); |
| 166 | } |
| 167 | |
| 168 | if (SafeToReplace) { |
| 169 | LIS->RemoveMachineInstrFromMaps(*SaveExec); |
| 170 | SaveExec->eraseFromParent(); |
| 171 | MRI.replaceRegWith(SavedExec, AMDGPU::EXEC); |
| 172 | LIS->removeInterval(SavedExec); |
| 173 | } |
Stanislav Mekhanoshin | 37e7f95 | 2017-08-01 23:14:32 +0000 | [diff] [blame] | 174 | } |
| 175 | |
| 176 | if (Changed) { |
| 177 | // Recompute liveness for both reg units of exec. |
| 178 | LIS->removeRegUnit(*MCRegUnitIterator(AMDGPU::EXEC_LO, TRI)); |
| 179 | LIS->removeRegUnit(*MCRegUnitIterator(AMDGPU::EXEC_HI, TRI)); |
| 180 | } |
| 181 | |
| 182 | return Changed; |
| 183 | } |