| Akira Hatanaka | 168d4e5 | 2013-11-27 23:38:42 +0000 | [diff] [blame] | 1 | //===--------- MipsOptimizePICCall.cpp - Optimize PIC Calls ---------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This pass eliminates unnecessary instructions that set up $gp and replace |
| 11 | // instructions that load target function addresses with copy instructions. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #define DEBUG_TYPE "optimize-mips-pic-call" |
| 16 | |
| 17 | #include "Mips.h" |
| Akira Hatanaka | 168d4e5 | 2013-11-27 23:38:42 +0000 | [diff] [blame] | 18 | #include "MCTargetDesc/MipsBaseInfo.h" |
| Chandler Carruth | 8a8cd2b | 2014-01-07 11:48:04 +0000 | [diff] [blame] | 19 | #include "MipsMachineFunction.h" |
| 20 | #include "MipsTargetMachine.h" |
| Akira Hatanaka | 168d4e5 | 2013-11-27 23:38:42 +0000 | [diff] [blame] | 21 | #include "llvm/ADT/ScopedHashTable.h" |
| 22 | #include "llvm/CodeGen/MachineDominators.h" |
| 23 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
| 24 | #include "llvm/Support/CommandLine.h" |
| 25 | |
| 26 | using namespace llvm; |
| 27 | |
| 28 | static cl::opt<bool> LoadTargetFromGOT("mips-load-target-from-got", |
| 29 | cl::init(true), |
| 30 | cl::desc("Load target address from GOT"), |
| 31 | cl::Hidden); |
| 32 | |
| 33 | static cl::opt<bool> EraseGPOpnd("mips-erase-gp-opnd", |
| 34 | cl::init(true), cl::desc("Erase GP Operand"), |
| 35 | cl::Hidden); |
| 36 | |
| 37 | namespace { |
| 38 | typedef std::pair<unsigned, unsigned> CntRegP; |
| 39 | typedef RecyclingAllocator<BumpPtrAllocator, |
| 40 | ScopedHashTableVal<const Value *, CntRegP> > |
| 41 | AllocatorTy; |
| 42 | typedef ScopedHashTable<const Value *, CntRegP, DenseMapInfo<const Value *>, |
| 43 | AllocatorTy> ScopedHTType; |
| 44 | |
| 45 | class MBBInfo { |
| 46 | public: |
| 47 | MBBInfo(MachineDomTreeNode *N); |
| 48 | const MachineDomTreeNode *getNode() const; |
| 49 | bool isVisited() const; |
| 50 | void preVisit(ScopedHTType &ScopedHT); |
| 51 | void postVisit(); |
| 52 | |
| 53 | private: |
| 54 | MachineDomTreeNode *Node; |
| 55 | ScopedHTType::ScopeTy *HTScope; |
| 56 | }; |
| 57 | |
| 58 | class OptimizePICCall : public MachineFunctionPass { |
| 59 | public: |
| 60 | OptimizePICCall(TargetMachine &tm) : MachineFunctionPass(ID) {} |
| 61 | |
| 62 | virtual const char *getPassName() const { return "Mips OptimizePICCall"; } |
| 63 | |
| 64 | bool runOnMachineFunction(MachineFunction &F); |
| 65 | |
| 66 | void getAnalysisUsage(AnalysisUsage &AU) const { |
| 67 | AU.addRequired<MachineDominatorTree>(); |
| 68 | MachineFunctionPass::getAnalysisUsage(AU); |
| 69 | } |
| 70 | |
| 71 | private: |
| 72 | /// \brief Visit MBB. |
| 73 | bool visitNode(MBBInfo &MBBI); |
| 74 | |
| 75 | /// \brief Test if MI jumps to a function via a register. |
| 76 | /// |
| 77 | /// Also, return the virtual register containing the target function's address |
| 78 | /// and the underlying object in Reg and Val respectively, if the function's |
| 79 | /// address can be resolved lazily. |
| 80 | bool isCallViaRegister(MachineInstr &MI, unsigned &Reg, |
| 81 | const Value *&Val) const; |
| 82 | |
| 83 | /// \brief Return the number of instructions that dominate the current |
| 84 | /// instruction and load the function address from object Entry. |
| 85 | unsigned getCount(const Value *Entry); |
| 86 | |
| 87 | /// \brief Return the destination virtual register of the last instruction |
| 88 | /// that loads from object Entry. |
| 89 | unsigned getReg(const Value *Entry); |
| 90 | |
| 91 | /// \brief Update ScopedHT. |
| 92 | void incCntAndSetReg(const Value *Entry, unsigned Reg); |
| 93 | |
| 94 | ScopedHTType ScopedHT; |
| 95 | static char ID; |
| 96 | }; |
| 97 | |
| 98 | char OptimizePICCall::ID = 0; |
| 99 | } // end of anonymous namespace |
| 100 | |
| 101 | /// Return the first MachineOperand of MI if it is a used virtual register. |
| 102 | static MachineOperand *getCallTargetRegOpnd(MachineInstr &MI) { |
| 103 | if (MI.getNumOperands() == 0) |
| 104 | return 0; |
| 105 | |
| 106 | MachineOperand &MO = MI.getOperand(0); |
| 107 | |
| 108 | if (!MO.isReg() || !MO.isUse() || |
| 109 | !TargetRegisterInfo::isVirtualRegister(MO.getReg())) |
| 110 | return 0; |
| 111 | |
| 112 | return &MO; |
| 113 | } |
| 114 | |
| 115 | /// Return type of register Reg. |
| 116 | static MVT::SimpleValueType getRegTy(unsigned Reg, MachineFunction &MF) { |
| 117 | const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(Reg); |
| 118 | assert(RC->vt_end() - RC->vt_begin() == 1); |
| 119 | return *RC->vt_begin(); |
| 120 | } |
| 121 | |
| 122 | /// Do the following transformation: |
| 123 | /// |
| 124 | /// jalr $vreg |
| 125 | /// => |
| 126 | /// copy $t9, $vreg |
| 127 | /// jalr $t9 |
| 128 | static void setCallTargetReg(MachineBasicBlock *MBB, |
| 129 | MachineBasicBlock::iterator I) { |
| 130 | MachineFunction &MF = *MBB->getParent(); |
| 131 | const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo(); |
| 132 | unsigned SrcReg = I->getOperand(0).getReg(); |
| 133 | unsigned DstReg = getRegTy(SrcReg, MF) == MVT::i32 ? Mips::T9 : Mips::T9_64; |
| 134 | BuildMI(*MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), DstReg) |
| 135 | .addReg(SrcReg); |
| 136 | I->getOperand(0).setReg(DstReg); |
| 137 | } |
| 138 | |
| 139 | /// Search MI's operands for register GP and erase it. |
| 140 | static void eraseGPOpnd(MachineInstr &MI) { |
| 141 | if (!EraseGPOpnd) |
| 142 | return; |
| 143 | |
| 144 | MachineFunction &MF = *MI.getParent()->getParent(); |
| 145 | MVT::SimpleValueType Ty = getRegTy(MI.getOperand(0).getReg(), MF); |
| 146 | unsigned Reg = Ty == MVT::i32 ? Mips::GP : Mips::GP_64; |
| 147 | |
| 148 | for (unsigned I = 0; I < MI.getNumOperands(); ++I) { |
| 149 | MachineOperand &MO = MI.getOperand(I); |
| 150 | if (MO.isReg() && MO.getReg() == Reg) { |
| 151 | MI.RemoveOperand(I); |
| 152 | return; |
| 153 | } |
| 154 | } |
| 155 | |
| 156 | llvm_unreachable(0); |
| 157 | } |
| 158 | |
| 159 | MBBInfo::MBBInfo(MachineDomTreeNode *N) : Node(N), HTScope(0) {} |
| 160 | |
| 161 | const MachineDomTreeNode *MBBInfo::getNode() const { return Node; } |
| 162 | |
| 163 | bool MBBInfo::isVisited() const { return HTScope; } |
| 164 | |
| 165 | void MBBInfo::preVisit(ScopedHTType &ScopedHT) { |
| 166 | HTScope = new ScopedHTType::ScopeTy(ScopedHT); |
| 167 | } |
| 168 | |
| 169 | void MBBInfo::postVisit() { |
| 170 | delete HTScope; |
| 171 | } |
| 172 | |
| 173 | // OptimizePICCall methods. |
| 174 | bool OptimizePICCall::runOnMachineFunction(MachineFunction &F) { |
| 175 | if (F.getTarget().getSubtarget<MipsSubtarget>().inMips16Mode()) |
| 176 | return false; |
| 177 | |
| 178 | // Do a pre-order traversal of the dominator tree. |
| 179 | MachineDominatorTree *MDT = &getAnalysis<MachineDominatorTree>(); |
| 180 | bool Changed = false; |
| 181 | |
| 182 | SmallVector<MBBInfo, 8> WorkList(1, MBBInfo(MDT->getRootNode())); |
| 183 | |
| 184 | while (!WorkList.empty()) { |
| 185 | MBBInfo &MBBI = WorkList.back(); |
| 186 | |
| 187 | // If this MBB has already been visited, destroy the scope for the MBB and |
| 188 | // pop it from the work list. |
| 189 | if (MBBI.isVisited()) { |
| 190 | MBBI.postVisit(); |
| 191 | WorkList.pop_back(); |
| 192 | continue; |
| 193 | } |
| 194 | |
| 195 | // Visit the MBB and add its children to the work list. |
| 196 | MBBI.preVisit(ScopedHT); |
| 197 | Changed |= visitNode(MBBI); |
| 198 | const MachineDomTreeNode *Node = MBBI.getNode(); |
| 199 | const std::vector<MachineDomTreeNode *> &Children = Node->getChildren(); |
| 200 | WorkList.append(Children.begin(), Children.end()); |
| 201 | } |
| 202 | |
| 203 | return Changed; |
| 204 | } |
| 205 | |
| 206 | bool OptimizePICCall::visitNode(MBBInfo &MBBI) { |
| 207 | bool Changed = false; |
| 208 | MachineBasicBlock *MBB = MBBI.getNode()->getBlock(); |
| 209 | |
| 210 | for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E; |
| 211 | ++I) { |
| 212 | unsigned Reg; |
| 213 | const Value *Entry; |
| 214 | |
| 215 | // Skip instructions that are not call instructions via registers. |
| 216 | if (!isCallViaRegister(*I, Reg, Entry)) |
| 217 | continue; |
| 218 | |
| 219 | Changed = true; |
| 220 | unsigned N = getCount(Entry); |
| 221 | |
| 222 | if (N != 0) { |
| 223 | // If a function has been called more than twice, we do not have to emit a |
| 224 | // load instruction to get the function address from the GOT, but can |
| 225 | // instead reuse the address that has been loaded before. |
| 226 | if (N >= 2 && !LoadTargetFromGOT) |
| 227 | getCallTargetRegOpnd(*I)->setReg(getReg(Entry)); |
| 228 | |
| 229 | // Erase the $gp operand if this isn't the first time a function has |
| 230 | // been called. $gp needs to be set up only if the function call can go |
| 231 | // through a lazy binding stub. |
| 232 | eraseGPOpnd(*I); |
| 233 | } |
| 234 | |
| 235 | if (Entry) |
| 236 | incCntAndSetReg(Entry, Reg); |
| 237 | |
| 238 | setCallTargetReg(MBB, I); |
| 239 | } |
| 240 | |
| 241 | return Changed; |
| 242 | } |
| 243 | |
| 244 | bool OptimizePICCall::isCallViaRegister(MachineInstr &MI, unsigned &Reg, |
| 245 | const Value *&Val) const { |
| 246 | if (!MI.isCall()) |
| 247 | return false; |
| 248 | |
| 249 | MachineOperand *MO = getCallTargetRegOpnd(MI); |
| 250 | |
| 251 | // Return if MI is not a function call via a register. |
| 252 | if (!MO) |
| 253 | return false; |
| 254 | |
| 255 | // Get the instruction that loads the function address from the GOT. |
| 256 | Reg = MO->getReg(); |
| 257 | Val = 0; |
| 258 | MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); |
| 259 | MachineInstr *DefMI = MRI.getVRegDef(Reg); |
| 260 | |
| 261 | assert(DefMI); |
| 262 | |
| 263 | // See if DefMI is an instruction that loads from a GOT entry that holds the |
| 264 | // address of a lazy binding stub. |
| 265 | if (!DefMI->mayLoad() || DefMI->getNumOperands() < 3) |
| 266 | return true; |
| 267 | |
| 268 | unsigned Flags = DefMI->getOperand(2).getTargetFlags(); |
| 269 | |
| 270 | if (Flags != MipsII::MO_GOT_CALL && Flags != MipsII::MO_CALL_LO16) |
| 271 | return true; |
| 272 | |
| 273 | // Return the underlying object for the GOT entry in Val. |
| 274 | assert(DefMI->hasOneMemOperand()); |
| 275 | Val = (*DefMI->memoperands_begin())->getValue(); |
| 276 | return true; |
| 277 | } |
| 278 | |
| 279 | unsigned OptimizePICCall::getCount(const Value *Entry) { |
| 280 | return ScopedHT.lookup(Entry).first; |
| 281 | } |
| 282 | |
| 283 | unsigned OptimizePICCall::getReg(const Value *Entry) { |
| 284 | unsigned Reg = ScopedHT.lookup(Entry).second; |
| 285 | assert(Reg); |
| 286 | return Reg; |
| 287 | } |
| 288 | |
| 289 | void OptimizePICCall::incCntAndSetReg(const Value *Entry, unsigned Reg) { |
| 290 | CntRegP P = ScopedHT.lookup(Entry); |
| 291 | ScopedHT.insert(Entry, std::make_pair(P.first + 1, Reg)); |
| 292 | } |
| 293 | |
| 294 | /// Return an OptimizeCall object. |
| 295 | FunctionPass *llvm::createMipsOptimizePICCallPass(MipsTargetMachine &TM) { |
| 296 | return new OptimizePICCall(TM); |
| 297 | } |