Dan Gohman | e4aeec0 | 2009-10-13 18:30:07 +0000 | [diff] [blame] | 1 | //===- InlineCost.cpp - Cost analysis for inliner -------------------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file implements inline cost analysis. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 14 | #define DEBUG_TYPE "inline-cost" |
Dan Gohman | e4aeec0 | 2009-10-13 18:30:07 +0000 | [diff] [blame] | 15 | #include "llvm/Analysis/InlineCost.h" |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 16 | #include "llvm/Analysis/ConstantFolding.h" |
| 17 | #include "llvm/Analysis/InstructionSimplify.h" |
Dan Gohman | e4aeec0 | 2009-10-13 18:30:07 +0000 | [diff] [blame] | 18 | #include "llvm/Support/CallSite.h" |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 19 | #include "llvm/Support/Debug.h" |
| 20 | #include "llvm/Support/InstVisitor.h" |
| 21 | #include "llvm/Support/GetElementPtrTypeIterator.h" |
| 22 | #include "llvm/Support/raw_ostream.h" |
Dan Gohman | e4aeec0 | 2009-10-13 18:30:07 +0000 | [diff] [blame] | 23 | #include "llvm/CallingConv.h" |
| 24 | #include "llvm/IntrinsicInst.h" |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 25 | #include "llvm/Operator.h" |
| 26 | #include "llvm/GlobalAlias.h" |
Andrew Trick | b2ab2fa | 2011-10-01 01:39:05 +0000 | [diff] [blame] | 27 | #include "llvm/Target/TargetData.h" |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 28 | #include "llvm/ADT/STLExtras.h" |
| 29 | #include "llvm/ADT/SetVector.h" |
| 30 | #include "llvm/ADT/SmallVector.h" |
Dan Gohman | e4aeec0 | 2009-10-13 18:30:07 +0000 | [diff] [blame] | 31 | #include "llvm/ADT/SmallPtrSet.h" |
Chandler Carruth | d6fc262 | 2012-04-11 10:15:10 +0000 | [diff] [blame] | 32 | #include "llvm/ADT/Statistic.h" |
Eric Christopher | 4e8af6d | 2011-02-05 00:49:15 +0000 | [diff] [blame] | 33 | |
Dan Gohman | e4aeec0 | 2009-10-13 18:30:07 +0000 | [diff] [blame] | 34 | using namespace llvm; |
| 35 | |
Chandler Carruth | d6fc262 | 2012-04-11 10:15:10 +0000 | [diff] [blame] | 36 | STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed"); |
| 37 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 38 | namespace { |
Chandler Carruth | 3d1d895 | 2012-03-14 07:32:53 +0000 | [diff] [blame] | 39 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 40 | class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> { |
| 41 | typedef InstVisitor<CallAnalyzer, bool> Base; |
| 42 | friend class InstVisitor<CallAnalyzer, bool>; |
Owen Anderson | 082bf2a | 2010-09-09 16:56:42 +0000 | [diff] [blame] | 43 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 44 | // TargetData if available, or null. |
| 45 | const TargetData *const TD; |
Owen Anderson | 082bf2a | 2010-09-09 16:56:42 +0000 | [diff] [blame] | 46 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 47 | // The called function. |
| 48 | Function &F; |
Owen Anderson | 082bf2a | 2010-09-09 16:56:42 +0000 | [diff] [blame] | 49 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 50 | int Threshold; |
| 51 | int Cost; |
| 52 | const bool AlwaysInline; |
Owen Anderson | 082bf2a | 2010-09-09 16:56:42 +0000 | [diff] [blame] | 53 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 54 | bool IsRecursive; |
| 55 | bool ExposesReturnsTwice; |
| 56 | bool HasDynamicAlloca; |
| 57 | unsigned NumInstructions, NumVectorInstructions; |
| 58 | int FiftyPercentVectorBonus, TenPercentVectorBonus; |
| 59 | int VectorBonus; |
| 60 | |
| 61 | // While we walk the potentially-inlined instructions, we build up and |
| 62 | // maintain a mapping of simplified values specific to this callsite. The |
| 63 | // idea is to propagate any special information we have about arguments to |
| 64 | // this call through the inlinable section of the function, and account for |
| 65 | // likely simplifications post-inlining. The most important aspect we track |
| 66 | // is CFG altering simplifications -- when we prove a basic block dead, that |
| 67 | // can cause dramatic shifts in the cost of inlining a function. |
| 68 | DenseMap<Value *, Constant *> SimplifiedValues; |
| 69 | |
| 70 | // Keep track of the values which map back (through function arguments) to |
| 71 | // allocas on the caller stack which could be simplified through SROA. |
| 72 | DenseMap<Value *, Value *> SROAArgValues; |
| 73 | |
| 74 | // The mapping of caller Alloca values to their accumulated cost savings. If |
| 75 | // we have to disable SROA for one of the allocas, this tells us how much |
| 76 | // cost must be added. |
| 77 | DenseMap<Value *, int> SROAArgCosts; |
| 78 | |
| 79 | // Keep track of values which map to a pointer base and constant offset. |
| 80 | DenseMap<Value *, std::pair<Value *, APInt> > ConstantOffsetPtrs; |
| 81 | |
| 82 | // Custom simplification helper routines. |
| 83 | bool isAllocaDerivedArg(Value *V); |
| 84 | bool lookupSROAArgAndCost(Value *V, Value *&Arg, |
| 85 | DenseMap<Value *, int>::iterator &CostIt); |
| 86 | void disableSROA(DenseMap<Value *, int>::iterator CostIt); |
| 87 | void disableSROA(Value *V); |
| 88 | void accumulateSROACost(DenseMap<Value *, int>::iterator CostIt, |
| 89 | int InstructionCost); |
| 90 | bool handleSROACandidate(bool IsSROAValid, |
| 91 | DenseMap<Value *, int>::iterator CostIt, |
| 92 | int InstructionCost); |
| 93 | bool isGEPOffsetConstant(GetElementPtrInst &GEP); |
| 94 | bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset); |
| 95 | ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V); |
| 96 | |
| 97 | // Custom analysis routines. |
| 98 | bool analyzeBlock(BasicBlock *BB); |
| 99 | |
| 100 | // Disable several entry points to the visitor so we don't accidentally use |
| 101 | // them by declaring but not defining them here. |
| 102 | void visit(Module *); void visit(Module &); |
| 103 | void visit(Function *); void visit(Function &); |
| 104 | void visit(BasicBlock *); void visit(BasicBlock &); |
| 105 | |
| 106 | // Provide base case for our instruction visit. |
| 107 | bool visitInstruction(Instruction &I); |
| 108 | |
| 109 | // Our visit overrides. |
| 110 | bool visitAlloca(AllocaInst &I); |
| 111 | bool visitPHI(PHINode &I); |
| 112 | bool visitGetElementPtr(GetElementPtrInst &I); |
| 113 | bool visitBitCast(BitCastInst &I); |
| 114 | bool visitPtrToInt(PtrToIntInst &I); |
| 115 | bool visitIntToPtr(IntToPtrInst &I); |
| 116 | bool visitCastInst(CastInst &I); |
| 117 | bool visitUnaryInstruction(UnaryInstruction &I); |
| 118 | bool visitICmp(ICmpInst &I); |
| 119 | bool visitSub(BinaryOperator &I); |
| 120 | bool visitBinaryOperator(BinaryOperator &I); |
| 121 | bool visitLoad(LoadInst &I); |
| 122 | bool visitStore(StoreInst &I); |
| 123 | bool visitCallSite(CallSite CS); |
| 124 | |
| 125 | public: |
| 126 | CallAnalyzer(const TargetData *TD, Function &Callee, int Threshold) |
| 127 | : TD(TD), F(Callee), Threshold(Threshold), Cost(0), |
| 128 | AlwaysInline(F.hasFnAttr(Attribute::AlwaysInline)), |
| 129 | IsRecursive(false), ExposesReturnsTwice(false), HasDynamicAlloca(false), |
| 130 | NumInstructions(0), NumVectorInstructions(0), |
| 131 | FiftyPercentVectorBonus(0), TenPercentVectorBonus(0), VectorBonus(0), |
| 132 | NumConstantArgs(0), NumConstantOffsetPtrArgs(0), NumAllocaArgs(0), |
| 133 | NumConstantPtrCmps(0), NumConstantPtrDiffs(0), |
| 134 | NumInstructionsSimplified(0), SROACostSavings(0), SROACostSavingsLost(0) { |
| 135 | } |
| 136 | |
| 137 | bool analyzeCall(CallSite CS); |
| 138 | |
| 139 | int getThreshold() { return Threshold; } |
| 140 | int getCost() { return Cost; } |
| 141 | |
| 142 | // Keep a bunch of stats about the cost savings found so we can print them |
| 143 | // out when debugging. |
| 144 | unsigned NumConstantArgs; |
| 145 | unsigned NumConstantOffsetPtrArgs; |
| 146 | unsigned NumAllocaArgs; |
| 147 | unsigned NumConstantPtrCmps; |
| 148 | unsigned NumConstantPtrDiffs; |
| 149 | unsigned NumInstructionsSimplified; |
| 150 | unsigned SROACostSavings; |
| 151 | unsigned SROACostSavingsLost; |
| 152 | |
| 153 | void dump(); |
| 154 | }; |
| 155 | |
| 156 | } // namespace |
| 157 | |
| 158 | /// \brief Test whether the given value is an Alloca-derived function argument. |
| 159 | bool CallAnalyzer::isAllocaDerivedArg(Value *V) { |
| 160 | return SROAArgValues.count(V); |
Owen Anderson | 082bf2a | 2010-09-09 16:56:42 +0000 | [diff] [blame] | 161 | } |
| 162 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 163 | /// \brief Lookup the SROA-candidate argument and cost iterator which V maps to. |
| 164 | /// Returns false if V does not map to a SROA-candidate. |
| 165 | bool CallAnalyzer::lookupSROAArgAndCost( |
| 166 | Value *V, Value *&Arg, DenseMap<Value *, int>::iterator &CostIt) { |
| 167 | if (SROAArgValues.empty() || SROAArgCosts.empty()) |
| 168 | return false; |
Chandler Carruth | e8187e0 | 2012-03-09 02:49:36 +0000 | [diff] [blame] | 169 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 170 | DenseMap<Value *, Value *>::iterator ArgIt = SROAArgValues.find(V); |
| 171 | if (ArgIt == SROAArgValues.end()) |
| 172 | return false; |
Chandler Carruth | e8187e0 | 2012-03-09 02:49:36 +0000 | [diff] [blame] | 173 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 174 | Arg = ArgIt->second; |
| 175 | CostIt = SROAArgCosts.find(Arg); |
| 176 | return CostIt != SROAArgCosts.end(); |
Chandler Carruth | e8187e0 | 2012-03-09 02:49:36 +0000 | [diff] [blame] | 177 | } |
| 178 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 179 | /// \brief Disable SROA for the candidate marked by this cost iterator. |
Chandler Carruth | e8187e0 | 2012-03-09 02:49:36 +0000 | [diff] [blame] | 180 | /// |
Benjamin Kramer | d9b0b02 | 2012-06-02 10:20:22 +0000 | [diff] [blame] | 181 | /// This marks the candidate as no longer viable for SROA, and adds the cost |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 182 | /// savings associated with it back into the inline cost measurement. |
| 183 | void CallAnalyzer::disableSROA(DenseMap<Value *, int>::iterator CostIt) { |
| 184 | // If we're no longer able to perform SROA we need to undo its cost savings |
| 185 | // and prevent subsequent analysis. |
| 186 | Cost += CostIt->second; |
| 187 | SROACostSavings -= CostIt->second; |
| 188 | SROACostSavingsLost += CostIt->second; |
| 189 | SROAArgCosts.erase(CostIt); |
| 190 | } |
| 191 | |
| 192 | /// \brief If 'V' maps to a SROA candidate, disable SROA for it. |
| 193 | void CallAnalyzer::disableSROA(Value *V) { |
| 194 | Value *SROAArg; |
| 195 | DenseMap<Value *, int>::iterator CostIt; |
| 196 | if (lookupSROAArgAndCost(V, SROAArg, CostIt)) |
| 197 | disableSROA(CostIt); |
| 198 | } |
| 199 | |
| 200 | /// \brief Accumulate the given cost for a particular SROA candidate. |
| 201 | void CallAnalyzer::accumulateSROACost(DenseMap<Value *, int>::iterator CostIt, |
| 202 | int InstructionCost) { |
| 203 | CostIt->second += InstructionCost; |
| 204 | SROACostSavings += InstructionCost; |
| 205 | } |
| 206 | |
| 207 | /// \brief Helper for the common pattern of handling a SROA candidate. |
| 208 | /// Either accumulates the cost savings if the SROA remains valid, or disables |
| 209 | /// SROA for the candidate. |
| 210 | bool CallAnalyzer::handleSROACandidate(bool IsSROAValid, |
| 211 | DenseMap<Value *, int>::iterator CostIt, |
| 212 | int InstructionCost) { |
| 213 | if (IsSROAValid) { |
| 214 | accumulateSROACost(CostIt, InstructionCost); |
| 215 | return true; |
| 216 | } |
| 217 | |
| 218 | disableSROA(CostIt); |
| 219 | return false; |
| 220 | } |
| 221 | |
| 222 | /// \brief Check whether a GEP's indices are all constant. |
| 223 | /// |
| 224 | /// Respects any simplified values known during the analysis of this callsite. |
| 225 | bool CallAnalyzer::isGEPOffsetConstant(GetElementPtrInst &GEP) { |
| 226 | for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I) |
| 227 | if (!isa<Constant>(*I) && !SimplifiedValues.lookup(*I)) |
Chandler Carruth | e8187e0 | 2012-03-09 02:49:36 +0000 | [diff] [blame] | 228 | return false; |
Chandler Carruth | e8187e0 | 2012-03-09 02:49:36 +0000 | [diff] [blame] | 229 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 230 | return true; |
| 231 | } |
| 232 | |
| 233 | /// \brief Accumulate a constant GEP offset into an APInt if possible. |
| 234 | /// |
| 235 | /// Returns false if unable to compute the offset for any reason. Respects any |
| 236 | /// simplified values known during the analysis of this callsite. |
| 237 | bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) { |
| 238 | if (!TD) |
| 239 | return false; |
| 240 | |
| 241 | unsigned IntPtrWidth = TD->getPointerSizeInBits(); |
| 242 | assert(IntPtrWidth == Offset.getBitWidth()); |
| 243 | |
| 244 | for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); |
| 245 | GTI != GTE; ++GTI) { |
| 246 | ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand()); |
| 247 | if (!OpC) |
| 248 | if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand())) |
| 249 | OpC = dyn_cast<ConstantInt>(SimpleOp); |
| 250 | if (!OpC) |
Chandler Carruth | e8187e0 | 2012-03-09 02:49:36 +0000 | [diff] [blame] | 251 | return false; |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 252 | if (OpC->isZero()) continue; |
Chandler Carruth | e8187e0 | 2012-03-09 02:49:36 +0000 | [diff] [blame] | 253 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 254 | // Handle a struct index, which adds its field offset to the pointer. |
| 255 | if (StructType *STy = dyn_cast<StructType>(*GTI)) { |
| 256 | unsigned ElementIdx = OpC->getZExtValue(); |
| 257 | const StructLayout *SL = TD->getStructLayout(STy); |
| 258 | Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx)); |
| 259 | continue; |
Chandler Carruth | e8187e0 | 2012-03-09 02:49:36 +0000 | [diff] [blame] | 260 | } |
Chandler Carruth | e8187e0 | 2012-03-09 02:49:36 +0000 | [diff] [blame] | 261 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 262 | APInt TypeSize(IntPtrWidth, TD->getTypeAllocSize(GTI.getIndexedType())); |
| 263 | Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize; |
| 264 | } |
| 265 | return true; |
| 266 | } |
| 267 | |
| 268 | bool CallAnalyzer::visitAlloca(AllocaInst &I) { |
| 269 | // FIXME: Check whether inlining will turn a dynamic alloca into a static |
| 270 | // alloca, and handle that case. |
| 271 | |
Chandler Carruth | f5f256c | 2012-03-31 13:18:09 +0000 | [diff] [blame] | 272 | // We will happily inline static alloca instructions or dynamic alloca |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 273 | // instructions in always-inline situations. |
| 274 | if (AlwaysInline || I.isStaticAlloca()) |
| 275 | return Base::visitAlloca(I); |
| 276 | |
| 277 | // FIXME: This is overly conservative. Dynamic allocas are inefficient for |
| 278 | // a variety of reasons, and so we would like to not inline them into |
| 279 | // functions which don't currently have a dynamic alloca. This simply |
| 280 | // disables inlining altogether in the presence of a dynamic alloca. |
| 281 | HasDynamicAlloca = true; |
| 282 | return false; |
| 283 | } |
| 284 | |
| 285 | bool CallAnalyzer::visitPHI(PHINode &I) { |
| 286 | // FIXME: We should potentially be tracking values through phi nodes, |
| 287 | // especially when they collapse to a single value due to deleted CFG edges |
| 288 | // during inlining. |
| 289 | |
| 290 | // FIXME: We need to propagate SROA *disabling* through phi nodes, even |
| 291 | // though we don't want to propagate it's bonuses. The idea is to disable |
| 292 | // SROA if it *might* be used in an inappropriate manner. |
| 293 | |
| 294 | // Phi nodes are always zero-cost. |
| 295 | return true; |
| 296 | } |
| 297 | |
| 298 | bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) { |
| 299 | Value *SROAArg; |
| 300 | DenseMap<Value *, int>::iterator CostIt; |
| 301 | bool SROACandidate = lookupSROAArgAndCost(I.getPointerOperand(), |
| 302 | SROAArg, CostIt); |
| 303 | |
| 304 | // Try to fold GEPs of constant-offset call site argument pointers. This |
| 305 | // requires target data and inbounds GEPs. |
| 306 | if (TD && I.isInBounds()) { |
| 307 | // Check if we have a base + offset for the pointer. |
| 308 | Value *Ptr = I.getPointerOperand(); |
| 309 | std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Ptr); |
| 310 | if (BaseAndOffset.first) { |
| 311 | // Check if the offset of this GEP is constant, and if so accumulate it |
| 312 | // into Offset. |
| 313 | if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second)) { |
| 314 | // Non-constant GEPs aren't folded, and disable SROA. |
| 315 | if (SROACandidate) |
| 316 | disableSROA(CostIt); |
| 317 | return false; |
| 318 | } |
| 319 | |
| 320 | // Add the result as a new mapping to Base + Offset. |
| 321 | ConstantOffsetPtrs[&I] = BaseAndOffset; |
| 322 | |
| 323 | // Also handle SROA candidates here, we already know that the GEP is |
| 324 | // all-constant indexed. |
| 325 | if (SROACandidate) |
| 326 | SROAArgValues[&I] = SROAArg; |
| 327 | |
Chandler Carruth | e8187e0 | 2012-03-09 02:49:36 +0000 | [diff] [blame] | 328 | return true; |
| 329 | } |
| 330 | } |
| 331 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 332 | if (isGEPOffsetConstant(I)) { |
| 333 | if (SROACandidate) |
| 334 | SROAArgValues[&I] = SROAArg; |
| 335 | |
| 336 | // Constant GEPs are modeled as free. |
| 337 | return true; |
| 338 | } |
| 339 | |
| 340 | // Variable GEPs will require math and will disable SROA. |
| 341 | if (SROACandidate) |
| 342 | disableSROA(CostIt); |
Chandler Carruth | e8187e0 | 2012-03-09 02:49:36 +0000 | [diff] [blame] | 343 | return false; |
| 344 | } |
| 345 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 346 | bool CallAnalyzer::visitBitCast(BitCastInst &I) { |
| 347 | // Propagate constants through bitcasts. |
| 348 | if (Constant *COp = dyn_cast<Constant>(I.getOperand(0))) |
| 349 | if (Constant *C = ConstantExpr::getBitCast(COp, I.getType())) { |
| 350 | SimplifiedValues[&I] = C; |
| 351 | return true; |
Owen Anderson | 082bf2a | 2010-09-09 16:56:42 +0000 | [diff] [blame] | 352 | } |
Owen Anderson | 082bf2a | 2010-09-09 16:56:42 +0000 | [diff] [blame] | 353 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 354 | // Track base/offsets through casts |
| 355 | std::pair<Value *, APInt> BaseAndOffset |
| 356 | = ConstantOffsetPtrs.lookup(I.getOperand(0)); |
| 357 | // Casts don't change the offset, just wrap it up. |
| 358 | if (BaseAndOffset.first) |
| 359 | ConstantOffsetPtrs[&I] = BaseAndOffset; |
| 360 | |
| 361 | // Also look for SROA candidates here. |
| 362 | Value *SROAArg; |
| 363 | DenseMap<Value *, int>::iterator CostIt; |
| 364 | if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) |
| 365 | SROAArgValues[&I] = SROAArg; |
| 366 | |
| 367 | // Bitcasts are always zero cost. |
| 368 | return true; |
Owen Anderson | 082bf2a | 2010-09-09 16:56:42 +0000 | [diff] [blame] | 369 | } |
| 370 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 371 | bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) { |
| 372 | // Propagate constants through ptrtoint. |
| 373 | if (Constant *COp = dyn_cast<Constant>(I.getOperand(0))) |
| 374 | if (Constant *C = ConstantExpr::getPtrToInt(COp, I.getType())) { |
| 375 | SimplifiedValues[&I] = C; |
| 376 | return true; |
Chandler Carruth | 274d377 | 2012-03-14 23:19:53 +0000 | [diff] [blame] | 377 | } |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 378 | |
| 379 | // Track base/offset pairs when converted to a plain integer provided the |
| 380 | // integer is large enough to represent the pointer. |
| 381 | unsigned IntegerSize = I.getType()->getScalarSizeInBits(); |
| 382 | if (TD && IntegerSize >= TD->getPointerSizeInBits()) { |
| 383 | std::pair<Value *, APInt> BaseAndOffset |
| 384 | = ConstantOffsetPtrs.lookup(I.getOperand(0)); |
| 385 | if (BaseAndOffset.first) |
| 386 | ConstantOffsetPtrs[&I] = BaseAndOffset; |
| 387 | } |
| 388 | |
| 389 | // This is really weird. Technically, ptrtoint will disable SROA. However, |
| 390 | // unless that ptrtoint is *used* somewhere in the live basic blocks after |
| 391 | // inlining, it will be nuked, and SROA should proceed. All of the uses which |
| 392 | // would block SROA would also block SROA if applied directly to a pointer, |
| 393 | // and so we can just add the integer in here. The only places where SROA is |
| 394 | // preserved either cannot fire on an integer, or won't in-and-of themselves |
| 395 | // disable SROA (ext) w/o some later use that we would see and disable. |
| 396 | Value *SROAArg; |
| 397 | DenseMap<Value *, int>::iterator CostIt; |
| 398 | if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) |
| 399 | SROAArgValues[&I] = SROAArg; |
| 400 | |
Chandler Carruth | d5003ca | 2012-05-04 00:58:03 +0000 | [diff] [blame] | 401 | return isInstructionFree(&I, TD); |
Chandler Carruth | 274d377 | 2012-03-14 23:19:53 +0000 | [diff] [blame] | 402 | } |
| 403 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 404 | bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) { |
| 405 | // Propagate constants through ptrtoint. |
| 406 | if (Constant *COp = dyn_cast<Constant>(I.getOperand(0))) |
| 407 | if (Constant *C = ConstantExpr::getIntToPtr(COp, I.getType())) { |
| 408 | SimplifiedValues[&I] = C; |
| 409 | return true; |
| 410 | } |
Dan Gohman | e4aeec0 | 2009-10-13 18:30:07 +0000 | [diff] [blame] | 411 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 412 | // Track base/offset pairs when round-tripped through a pointer without |
| 413 | // modifications provided the integer is not too large. |
| 414 | Value *Op = I.getOperand(0); |
| 415 | unsigned IntegerSize = Op->getType()->getScalarSizeInBits(); |
| 416 | if (TD && IntegerSize <= TD->getPointerSizeInBits()) { |
| 417 | std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op); |
| 418 | if (BaseAndOffset.first) |
| 419 | ConstantOffsetPtrs[&I] = BaseAndOffset; |
| 420 | } |
Dan Gohman | e4aeec0 | 2009-10-13 18:30:07 +0000 | [diff] [blame] | 421 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 422 | // "Propagate" SROA here in the same manner as we do for ptrtoint above. |
| 423 | Value *SROAArg; |
| 424 | DenseMap<Value *, int>::iterator CostIt; |
| 425 | if (lookupSROAArgAndCost(Op, SROAArg, CostIt)) |
| 426 | SROAArgValues[&I] = SROAArg; |
Chandler Carruth | 274d377 | 2012-03-14 23:19:53 +0000 | [diff] [blame] | 427 | |
Chandler Carruth | d5003ca | 2012-05-04 00:58:03 +0000 | [diff] [blame] | 428 | return isInstructionFree(&I, TD); |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 429 | } |
| 430 | |
| 431 | bool CallAnalyzer::visitCastInst(CastInst &I) { |
| 432 | // Propagate constants through ptrtoint. |
| 433 | if (Constant *COp = dyn_cast<Constant>(I.getOperand(0))) |
| 434 | if (Constant *C = ConstantExpr::getCast(I.getOpcode(), COp, I.getType())) { |
| 435 | SimplifiedValues[&I] = C; |
| 436 | return true; |
| 437 | } |
| 438 | |
| 439 | // Disable SROA in the face of arbitrary casts we don't whitelist elsewhere. |
| 440 | disableSROA(I.getOperand(0)); |
| 441 | |
Chandler Carruth | d5003ca | 2012-05-04 00:58:03 +0000 | [diff] [blame] | 442 | return isInstructionFree(&I, TD); |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 443 | } |
| 444 | |
| 445 | bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) { |
| 446 | Value *Operand = I.getOperand(0); |
| 447 | Constant *Ops[1] = { dyn_cast<Constant>(Operand) }; |
| 448 | if (Ops[0] || (Ops[0] = SimplifiedValues.lookup(Operand))) |
| 449 | if (Constant *C = ConstantFoldInstOperands(I.getOpcode(), I.getType(), |
| 450 | Ops, TD)) { |
| 451 | SimplifiedValues[&I] = C; |
| 452 | return true; |
| 453 | } |
| 454 | |
| 455 | // Disable any SROA on the argument to arbitrary unary operators. |
| 456 | disableSROA(Operand); |
| 457 | |
| 458 | return false; |
| 459 | } |
| 460 | |
| 461 | bool CallAnalyzer::visitICmp(ICmpInst &I) { |
| 462 | Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); |
| 463 | // First try to handle simplified comparisons. |
| 464 | if (!isa<Constant>(LHS)) |
| 465 | if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS)) |
| 466 | LHS = SimpleLHS; |
| 467 | if (!isa<Constant>(RHS)) |
| 468 | if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS)) |
| 469 | RHS = SimpleRHS; |
| 470 | if (Constant *CLHS = dyn_cast<Constant>(LHS)) |
| 471 | if (Constant *CRHS = dyn_cast<Constant>(RHS)) |
| 472 | if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) { |
| 473 | SimplifiedValues[&I] = C; |
| 474 | return true; |
| 475 | } |
| 476 | |
| 477 | // Otherwise look for a comparison between constant offset pointers with |
| 478 | // a common base. |
| 479 | Value *LHSBase, *RHSBase; |
| 480 | APInt LHSOffset, RHSOffset; |
| 481 | llvm::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS); |
| 482 | if (LHSBase) { |
| 483 | llvm::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS); |
| 484 | if (RHSBase && LHSBase == RHSBase) { |
| 485 | // We have common bases, fold the icmp to a constant based on the |
| 486 | // offsets. |
| 487 | Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset); |
| 488 | Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset); |
| 489 | if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) { |
| 490 | SimplifiedValues[&I] = C; |
| 491 | ++NumConstantPtrCmps; |
| 492 | return true; |
| 493 | } |
| 494 | } |
| 495 | } |
| 496 | |
| 497 | // If the comparison is an equality comparison with null, we can simplify it |
| 498 | // for any alloca-derived argument. |
| 499 | if (I.isEquality() && isa<ConstantPointerNull>(I.getOperand(1))) |
| 500 | if (isAllocaDerivedArg(I.getOperand(0))) { |
| 501 | // We can actually predict the result of comparisons between an |
| 502 | // alloca-derived value and null. Note that this fires regardless of |
| 503 | // SROA firing. |
| 504 | bool IsNotEqual = I.getPredicate() == CmpInst::ICMP_NE; |
| 505 | SimplifiedValues[&I] = IsNotEqual ? ConstantInt::getTrue(I.getType()) |
| 506 | : ConstantInt::getFalse(I.getType()); |
| 507 | return true; |
| 508 | } |
| 509 | |
| 510 | // Finally check for SROA candidates in comparisons. |
| 511 | Value *SROAArg; |
| 512 | DenseMap<Value *, int>::iterator CostIt; |
| 513 | if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) { |
| 514 | if (isa<ConstantPointerNull>(I.getOperand(1))) { |
| 515 | accumulateSROACost(CostIt, InlineConstants::InstrCost); |
| 516 | return true; |
| 517 | } |
| 518 | |
| 519 | disableSROA(CostIt); |
| 520 | } |
| 521 | |
| 522 | return false; |
| 523 | } |
| 524 | |
| 525 | bool CallAnalyzer::visitSub(BinaryOperator &I) { |
| 526 | // Try to handle a special case: we can fold computing the difference of two |
| 527 | // constant-related pointers. |
| 528 | Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); |
| 529 | Value *LHSBase, *RHSBase; |
| 530 | APInt LHSOffset, RHSOffset; |
| 531 | llvm::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS); |
| 532 | if (LHSBase) { |
| 533 | llvm::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS); |
| 534 | if (RHSBase && LHSBase == RHSBase) { |
| 535 | // We have common bases, fold the subtract to a constant based on the |
| 536 | // offsets. |
| 537 | Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset); |
| 538 | Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset); |
| 539 | if (Constant *C = ConstantExpr::getSub(CLHS, CRHS)) { |
| 540 | SimplifiedValues[&I] = C; |
| 541 | ++NumConstantPtrDiffs; |
| 542 | return true; |
| 543 | } |
| 544 | } |
| 545 | } |
| 546 | |
| 547 | // Otherwise, fall back to the generic logic for simplifying and handling |
| 548 | // instructions. |
| 549 | return Base::visitSub(I); |
| 550 | } |
| 551 | |
| 552 | bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) { |
| 553 | Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); |
| 554 | if (!isa<Constant>(LHS)) |
| 555 | if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS)) |
| 556 | LHS = SimpleLHS; |
| 557 | if (!isa<Constant>(RHS)) |
| 558 | if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS)) |
| 559 | RHS = SimpleRHS; |
| 560 | Value *SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, TD); |
| 561 | if (Constant *C = dyn_cast_or_null<Constant>(SimpleV)) { |
| 562 | SimplifiedValues[&I] = C; |
| 563 | return true; |
| 564 | } |
| 565 | |
| 566 | // Disable any SROA on arguments to arbitrary, unsimplified binary operators. |
| 567 | disableSROA(LHS); |
| 568 | disableSROA(RHS); |
| 569 | |
| 570 | return false; |
| 571 | } |
| 572 | |
| 573 | bool CallAnalyzer::visitLoad(LoadInst &I) { |
| 574 | Value *SROAArg; |
| 575 | DenseMap<Value *, int>::iterator CostIt; |
| 576 | if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) { |
| 577 | if (I.isSimple()) { |
| 578 | accumulateSROACost(CostIt, InlineConstants::InstrCost); |
| 579 | return true; |
| 580 | } |
| 581 | |
| 582 | disableSROA(CostIt); |
| 583 | } |
| 584 | |
| 585 | return false; |
| 586 | } |
| 587 | |
| 588 | bool CallAnalyzer::visitStore(StoreInst &I) { |
| 589 | Value *SROAArg; |
| 590 | DenseMap<Value *, int>::iterator CostIt; |
| 591 | if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) { |
| 592 | if (I.isSimple()) { |
| 593 | accumulateSROACost(CostIt, InlineConstants::InstrCost); |
| 594 | return true; |
| 595 | } |
| 596 | |
| 597 | disableSROA(CostIt); |
| 598 | } |
| 599 | |
| 600 | return false; |
| 601 | } |
| 602 | |
| 603 | bool CallAnalyzer::visitCallSite(CallSite CS) { |
| 604 | if (CS.isCall() && cast<CallInst>(CS.getInstruction())->canReturnTwice() && |
| 605 | !F.hasFnAttr(Attribute::ReturnsTwice)) { |
| 606 | // This aborts the entire analysis. |
| 607 | ExposesReturnsTwice = true; |
| 608 | return false; |
| 609 | } |
| 610 | |
| 611 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) { |
| 612 | switch (II->getIntrinsicID()) { |
| 613 | default: |
| 614 | return Base::visitCallSite(CS); |
| 615 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 616 | case Intrinsic::memset: |
| 617 | case Intrinsic::memcpy: |
| 618 | case Intrinsic::memmove: |
Chandler Carruth | d5003ca | 2012-05-04 00:58:03 +0000 | [diff] [blame] | 619 | // SROA can usually chew through these intrinsics, but they aren't free. |
| 620 | return false; |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 621 | } |
| 622 | } |
| 623 | |
| 624 | if (Function *F = CS.getCalledFunction()) { |
| 625 | if (F == CS.getInstruction()->getParent()->getParent()) { |
| 626 | // This flag will fully abort the analysis, so don't bother with anything |
| 627 | // else. |
| 628 | IsRecursive = true; |
| 629 | return false; |
| 630 | } |
| 631 | |
Chandler Carruth | d5003ca | 2012-05-04 00:58:03 +0000 | [diff] [blame] | 632 | if (!callIsSmall(CS)) { |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 633 | // We account for the average 1 instruction per call argument setup |
| 634 | // here. |
| 635 | Cost += CS.arg_size() * InlineConstants::InstrCost; |
| 636 | |
| 637 | // Everything other than inline ASM will also have a significant cost |
| 638 | // merely from making the call. |
| 639 | if (!isa<InlineAsm>(CS.getCalledValue())) |
| 640 | Cost += InlineConstants::CallPenalty; |
| 641 | } |
| 642 | |
| 643 | return Base::visitCallSite(CS); |
| 644 | } |
| 645 | |
| 646 | // Otherwise we're in a very special case -- an indirect function call. See |
| 647 | // if we can be particularly clever about this. |
| 648 | Value *Callee = CS.getCalledValue(); |
| 649 | |
| 650 | // First, pay the price of the argument setup. We account for the average |
| 651 | // 1 instruction per call argument setup here. |
| 652 | Cost += CS.arg_size() * InlineConstants::InstrCost; |
| 653 | |
| 654 | // Next, check if this happens to be an indirect function call to a known |
| 655 | // function in this inline context. If not, we've done all we can. |
| 656 | Function *F = dyn_cast_or_null<Function>(SimplifiedValues.lookup(Callee)); |
| 657 | if (!F) |
| 658 | return Base::visitCallSite(CS); |
| 659 | |
| 660 | // If we have a constant that we are calling as a function, we can peer |
| 661 | // through it and see the function target. This happens not infrequently |
| 662 | // during devirtualization and so we want to give it a hefty bonus for |
| 663 | // inlining, but cap that bonus in the event that inlining wouldn't pan |
| 664 | // out. Pretend to inline the function, with a custom threshold. |
| 665 | CallAnalyzer CA(TD, *F, InlineConstants::IndirectCallThreshold); |
| 666 | if (CA.analyzeCall(CS)) { |
| 667 | // We were able to inline the indirect call! Subtract the cost from the |
| 668 | // bonus we want to apply, but don't go below zero. |
| 669 | Cost -= std::max(0, InlineConstants::IndirectCallThreshold - CA.getCost()); |
| 670 | } |
| 671 | |
| 672 | return Base::visitCallSite(CS); |
| 673 | } |
| 674 | |
| 675 | bool CallAnalyzer::visitInstruction(Instruction &I) { |
Chandler Carruth | d5003ca | 2012-05-04 00:58:03 +0000 | [diff] [blame] | 676 | // Some instructions are free. All of the free intrinsics can also be |
| 677 | // handled by SROA, etc. |
| 678 | if (isInstructionFree(&I, TD)) |
| 679 | return true; |
| 680 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 681 | // We found something we don't understand or can't handle. Mark any SROA-able |
| 682 | // values in the operand list as no longer viable. |
| 683 | for (User::op_iterator OI = I.op_begin(), OE = I.op_end(); OI != OE; ++OI) |
| 684 | disableSROA(*OI); |
| 685 | |
| 686 | return false; |
| 687 | } |
| 688 | |
| 689 | |
| 690 | /// \brief Analyze a basic block for its contribution to the inline cost. |
| 691 | /// |
| 692 | /// This method walks the analyzer over every instruction in the given basic |
| 693 | /// block and accounts for their cost during inlining at this callsite. It |
| 694 | /// aborts early if the threshold has been exceeded or an impossible to inline |
| 695 | /// construct has been detected. It returns false if inlining is no longer |
| 696 | /// viable, and true if inlining remains viable. |
| 697 | bool CallAnalyzer::analyzeBlock(BasicBlock *BB) { |
| 698 | for (BasicBlock::iterator I = BB->begin(), E = llvm::prior(BB->end()); |
| 699 | I != E; ++I) { |
| 700 | ++NumInstructions; |
| 701 | if (isa<ExtractElementInst>(I) || I->getType()->isVectorTy()) |
| 702 | ++NumVectorInstructions; |
| 703 | |
| 704 | // If the instruction simplified to a constant, there is no cost to this |
| 705 | // instruction. Visit the instructions using our InstVisitor to account for |
| 706 | // all of the per-instruction logic. The visit tree returns true if we |
| 707 | // consumed the instruction in any way, and false if the instruction's base |
| 708 | // cost should count against inlining. |
| 709 | if (Base::visit(I)) |
| 710 | ++NumInstructionsSimplified; |
| 711 | else |
| 712 | Cost += InlineConstants::InstrCost; |
| 713 | |
| 714 | // If the visit this instruction detected an uninlinable pattern, abort. |
| 715 | if (IsRecursive || ExposesReturnsTwice || HasDynamicAlloca) |
| 716 | return false; |
| 717 | |
| 718 | if (NumVectorInstructions > NumInstructions/2) |
| 719 | VectorBonus = FiftyPercentVectorBonus; |
| 720 | else if (NumVectorInstructions > NumInstructions/10) |
| 721 | VectorBonus = TenPercentVectorBonus; |
| 722 | else |
| 723 | VectorBonus = 0; |
| 724 | |
| 725 | // Check if we've past the threshold so we don't spin in huge basic |
| 726 | // blocks that will never inline. |
| 727 | if (!AlwaysInline && Cost > (Threshold + VectorBonus)) |
| 728 | return false; |
| 729 | } |
| 730 | |
| 731 | return true; |
| 732 | } |
| 733 | |
| 734 | /// \brief Compute the base pointer and cumulative constant offsets for V. |
| 735 | /// |
| 736 | /// This strips all constant offsets off of V, leaving it the base pointer, and |
| 737 | /// accumulates the total constant offset applied in the returned constant. It |
| 738 | /// returns 0 if V is not a pointer, and returns the constant '0' if there are |
| 739 | /// no constant offsets applied. |
| 740 | ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) { |
| 741 | if (!TD || !V->getType()->isPointerTy()) |
| 742 | return 0; |
| 743 | |
| 744 | unsigned IntPtrWidth = TD->getPointerSizeInBits(); |
| 745 | APInt Offset = APInt::getNullValue(IntPtrWidth); |
| 746 | |
| 747 | // Even though we don't look through PHI nodes, we could be called on an |
| 748 | // instruction in an unreachable block, which may be on a cycle. |
| 749 | SmallPtrSet<Value *, 4> Visited; |
| 750 | Visited.insert(V); |
| 751 | do { |
| 752 | if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { |
| 753 | if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset)) |
| 754 | return 0; |
| 755 | V = GEP->getPointerOperand(); |
| 756 | } else if (Operator::getOpcode(V) == Instruction::BitCast) { |
| 757 | V = cast<Operator>(V)->getOperand(0); |
| 758 | } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { |
| 759 | if (GA->mayBeOverridden()) |
| 760 | break; |
| 761 | V = GA->getAliasee(); |
| 762 | } else { |
| 763 | break; |
| 764 | } |
| 765 | assert(V->getType()->isPointerTy() && "Unexpected operand type!"); |
| 766 | } while (Visited.insert(V)); |
| 767 | |
| 768 | Type *IntPtrTy = TD->getIntPtrType(V->getContext()); |
| 769 | return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset)); |
| 770 | } |
| 771 | |
| 772 | /// \brief Analyze a call site for potential inlining. |
| 773 | /// |
| 774 | /// Returns true if inlining this call is viable, and false if it is not |
| 775 | /// viable. It computes the cost and adjusts the threshold based on numerous |
| 776 | /// factors and heuristics. If this method returns false but the computed cost |
| 777 | /// is below the computed threshold, then inlining was forcibly disabled by |
| 778 | /// some artifact of the rountine. |
| 779 | bool CallAnalyzer::analyzeCall(CallSite CS) { |
Chandler Carruth | d6fc262 | 2012-04-11 10:15:10 +0000 | [diff] [blame] | 780 | ++NumCallsAnalyzed; |
| 781 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 782 | // Track whether the post-inlining function would have more than one basic |
| 783 | // block. A single basic block is often intended for inlining. Balloon the |
| 784 | // threshold by 50% until we pass the single-BB phase. |
| 785 | bool SingleBB = true; |
| 786 | int SingleBBBonus = Threshold / 2; |
| 787 | Threshold += SingleBBBonus; |
| 788 | |
| 789 | // Unless we are always-inlining, perform some tweaks to the cost and |
| 790 | // threshold based on the direct callsite information. |
| 791 | if (!AlwaysInline) { |
| 792 | // We want to more aggressively inline vector-dense kernels, so up the |
| 793 | // threshold, and we'll lower it if the % of vector instructions gets too |
| 794 | // low. |
| 795 | assert(NumInstructions == 0); |
| 796 | assert(NumVectorInstructions == 0); |
| 797 | FiftyPercentVectorBonus = Threshold; |
| 798 | TenPercentVectorBonus = Threshold / 2; |
| 799 | |
Benjamin Kramer | b6fdd02 | 2012-08-07 11:13:19 +0000 | [diff] [blame] | 800 | // Give out bonuses per argument, as the instructions setting them up will |
| 801 | // be gone after inlining. |
| 802 | for (unsigned I = 0, E = CS.arg_size(); I != E; ++I) { |
| 803 | if (TD && CS.isByValArgument(I)) { |
| 804 | // We approximate the number of loads and stores needed by dividing the |
| 805 | // size of the byval type by the target's pointer size. |
| 806 | PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType()); |
| 807 | unsigned TypeSize = TD->getTypeSizeInBits(PTy->getElementType()); |
| 808 | unsigned PointerSize = TD->getPointerSizeInBits(); |
| 809 | // Ceiling division. |
| 810 | unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize; |
| 811 | |
| 812 | // If it generates more than 8 stores it is likely to be expanded as an |
| 813 | // inline memcpy so we take that as an upper bound. Otherwise we assume |
| 814 | // one load and one store per word copied. |
| 815 | // FIXME: The maxStoresPerMemcpy setting from the target should be used |
| 816 | // here instead of a magic number of 8, but it's not available via |
| 817 | // TargetData. |
| 818 | NumStores = std::min(NumStores, 8U); |
| 819 | |
| 820 | Cost -= 2 * NumStores * InlineConstants::InstrCost; |
| 821 | } else { |
| 822 | // For non-byval arguments subtract off one instruction per call |
| 823 | // argument. |
| 824 | Cost -= InlineConstants::InstrCost; |
| 825 | } |
| 826 | } |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 827 | |
| 828 | // If there is only one call of the function, and it has internal linkage, |
| 829 | // the cost of inlining it drops dramatically. |
| 830 | if (F.hasLocalLinkage() && F.hasOneUse() && &F == CS.getCalledFunction()) |
| 831 | Cost += InlineConstants::LastCallToStaticBonus; |
| 832 | |
| 833 | // If the instruction after the call, or if the normal destination of the |
| 834 | // invoke is an unreachable instruction, the function is noreturn. As such, |
| 835 | // there is little point in inlining this unless there is literally zero cost. |
| 836 | if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) { |
| 837 | if (isa<UnreachableInst>(II->getNormalDest()->begin())) |
| 838 | Threshold = 1; |
| 839 | } else if (isa<UnreachableInst>(++BasicBlock::iterator(CS.getInstruction()))) |
| 840 | Threshold = 1; |
| 841 | |
| 842 | // If this function uses the coldcc calling convention, prefer not to inline |
| 843 | // it. |
| 844 | if (F.getCallingConv() == CallingConv::Cold) |
| 845 | Cost += InlineConstants::ColdccPenalty; |
| 846 | |
| 847 | // Check if we're done. This can happen due to bonuses and penalties. |
| 848 | if (Cost > Threshold) |
| 849 | return false; |
| 850 | } |
| 851 | |
| 852 | if (F.empty()) |
| 853 | return true; |
| 854 | |
| 855 | // Track whether we've seen a return instruction. The first return |
| 856 | // instruction is free, as at least one will usually disappear in inlining. |
| 857 | bool HasReturn = false; |
| 858 | |
| 859 | // Populate our simplified values by mapping from function arguments to call |
| 860 | // arguments with known important simplifications. |
| 861 | CallSite::arg_iterator CAI = CS.arg_begin(); |
| 862 | for (Function::arg_iterator FAI = F.arg_begin(), FAE = F.arg_end(); |
| 863 | FAI != FAE; ++FAI, ++CAI) { |
| 864 | assert(CAI != CS.arg_end()); |
| 865 | if (Constant *C = dyn_cast<Constant>(CAI)) |
| 866 | SimplifiedValues[FAI] = C; |
| 867 | |
| 868 | Value *PtrArg = *CAI; |
| 869 | if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) { |
| 870 | ConstantOffsetPtrs[FAI] = std::make_pair(PtrArg, C->getValue()); |
| 871 | |
| 872 | // We can SROA any pointer arguments derived from alloca instructions. |
| 873 | if (isa<AllocaInst>(PtrArg)) { |
| 874 | SROAArgValues[FAI] = PtrArg; |
| 875 | SROAArgCosts[PtrArg] = 0; |
| 876 | } |
| 877 | } |
| 878 | } |
| 879 | NumConstantArgs = SimplifiedValues.size(); |
| 880 | NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size(); |
| 881 | NumAllocaArgs = SROAArgValues.size(); |
| 882 | |
| 883 | // The worklist of live basic blocks in the callee *after* inlining. We avoid |
| 884 | // adding basic blocks of the callee which can be proven to be dead for this |
| 885 | // particular call site in order to get more accurate cost estimates. This |
| 886 | // requires a somewhat heavyweight iteration pattern: we need to walk the |
| 887 | // basic blocks in a breadth-first order as we insert live successors. To |
| 888 | // accomplish this, prioritizing for small iterations because we exit after |
| 889 | // crossing our threshold, we use a small-size optimized SetVector. |
| 890 | typedef SetVector<BasicBlock *, SmallVector<BasicBlock *, 16>, |
| 891 | SmallPtrSet<BasicBlock *, 16> > BBSetVector; |
| 892 | BBSetVector BBWorklist; |
| 893 | BBWorklist.insert(&F.getEntryBlock()); |
| 894 | // Note that we *must not* cache the size, this loop grows the worklist. |
| 895 | for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) { |
| 896 | // Bail out the moment we cross the threshold. This means we'll under-count |
| 897 | // the cost, but only when undercounting doesn't matter. |
| 898 | if (!AlwaysInline && Cost > (Threshold + VectorBonus)) |
| 899 | break; |
| 900 | |
| 901 | BasicBlock *BB = BBWorklist[Idx]; |
| 902 | if (BB->empty()) |
Chandler Carruth | 274d377 | 2012-03-14 23:19:53 +0000 | [diff] [blame] | 903 | continue; |
Dan Gohman | e4aeec0 | 2009-10-13 18:30:07 +0000 | [diff] [blame] | 904 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 905 | // Handle the terminator cost here where we can track returns and other |
| 906 | // function-wide constructs. |
| 907 | TerminatorInst *TI = BB->getTerminator(); |
Kenneth Uildriks | 74fa732 | 2010-10-09 22:06:36 +0000 | [diff] [blame] | 908 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 909 | // We never want to inline functions that contain an indirectbr. This is |
| 910 | // incorrect because all the blockaddress's (in static global initializers |
| 911 | // for example) would be referring to the original function, and this indirect |
| 912 | // jump would jump from the inlined copy of the function into the original |
| 913 | // function which is extremely undefined behavior. |
| 914 | // FIXME: This logic isn't really right; we can safely inline functions |
| 915 | // with indirectbr's as long as no other function or global references the |
| 916 | // blockaddress of a block within the current function. And as a QOI issue, |
| 917 | // if someone is using a blockaddress without an indirectbr, and that |
| 918 | // reference somehow ends up in another function or global, we probably |
| 919 | // don't want to inline this function. |
| 920 | if (isa<IndirectBrInst>(TI)) |
| 921 | return false; |
Andrew Trick | 5c65541 | 2011-10-01 01:27:56 +0000 | [diff] [blame] | 922 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 923 | if (!HasReturn && isa<ReturnInst>(TI)) |
| 924 | HasReturn = true; |
| 925 | else |
| 926 | Cost += InlineConstants::InstrCost; |
Andrew Trick | 5c65541 | 2011-10-01 01:27:56 +0000 | [diff] [blame] | 927 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 928 | // Analyze the cost of this block. If we blow through the threshold, this |
| 929 | // returns false, and we can bail on out. |
| 930 | if (!analyzeBlock(BB)) { |
| 931 | if (IsRecursive || ExposesReturnsTwice || HasDynamicAlloca) |
| 932 | return false; |
| 933 | break; |
Eric Christopher | 8e2da0c | 2011-02-01 01:16:32 +0000 | [diff] [blame] | 934 | } |
Eric Christopher | 8e2da0c | 2011-02-01 01:16:32 +0000 | [diff] [blame] | 935 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 936 | // Add in the live successors by first checking whether we have terminator |
| 937 | // that may be simplified based on the values simplified by this call. |
| 938 | if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { |
| 939 | if (BI->isConditional()) { |
| 940 | Value *Cond = BI->getCondition(); |
| 941 | if (ConstantInt *SimpleCond |
| 942 | = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) { |
| 943 | BBWorklist.insert(BI->getSuccessor(SimpleCond->isZero() ? 1 : 0)); |
| 944 | continue; |
Eric Christopher | 8e2da0c | 2011-02-01 01:16:32 +0000 | [diff] [blame] | 945 | } |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 946 | } |
| 947 | } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) { |
| 948 | Value *Cond = SI->getCondition(); |
| 949 | if (ConstantInt *SimpleCond |
| 950 | = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) { |
| 951 | BBWorklist.insert(SI->findCaseValue(SimpleCond).getCaseSuccessor()); |
| 952 | continue; |
| 953 | } |
| 954 | } |
Eric Christopher | 8e2da0c | 2011-02-01 01:16:32 +0000 | [diff] [blame] | 955 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 956 | // If we're unable to select a particular successor, just count all of |
| 957 | // them. |
| 958 | for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize; ++TIdx) |
| 959 | BBWorklist.insert(TI->getSuccessor(TIdx)); |
| 960 | |
| 961 | // If we had any successors at this point, than post-inlining is likely to |
| 962 | // have them as well. Note that we assume any basic blocks which existed |
| 963 | // due to branches or switches which folded above will also fold after |
| 964 | // inlining. |
| 965 | if (SingleBB && TI->getNumSuccessors() > 1) { |
| 966 | // Take off the bonus we applied to the threshold. |
| 967 | Threshold -= SingleBBBonus; |
| 968 | SingleBB = false; |
Eric Christopher | 8e2da0c | 2011-02-01 01:16:32 +0000 | [diff] [blame] | 969 | } |
| 970 | } |
Andrew Trick | 5c65541 | 2011-10-01 01:27:56 +0000 | [diff] [blame] | 971 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 972 | Threshold += VectorBonus; |
| 973 | |
| 974 | return AlwaysInline || Cost < Threshold; |
Eric Christopher | 4e8af6d | 2011-02-05 00:49:15 +0000 | [diff] [blame] | 975 | } |
| 976 | |
Manman Ren | cc77eec | 2012-09-06 19:55:56 +0000 | [diff] [blame^] | 977 | #ifndef NDEBUG |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 978 | /// \brief Dump stats about this call's analysis. |
| 979 | void CallAnalyzer::dump() { |
| 980 | #define DEBUG_PRINT_STAT(x) llvm::dbgs() << " " #x ": " << x << "\n" |
| 981 | DEBUG_PRINT_STAT(NumConstantArgs); |
| 982 | DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs); |
| 983 | DEBUG_PRINT_STAT(NumAllocaArgs); |
| 984 | DEBUG_PRINT_STAT(NumConstantPtrCmps); |
| 985 | DEBUG_PRINT_STAT(NumConstantPtrDiffs); |
| 986 | DEBUG_PRINT_STAT(NumInstructionsSimplified); |
| 987 | DEBUG_PRINT_STAT(SROACostSavings); |
| 988 | DEBUG_PRINT_STAT(SROACostSavingsLost); |
| 989 | #undef DEBUG_PRINT_STAT |
Eric Christopher | 4e8af6d | 2011-02-05 00:49:15 +0000 | [diff] [blame] | 990 | } |
Manman Ren | cc77eec | 2012-09-06 19:55:56 +0000 | [diff] [blame^] | 991 | #endif |
Eric Christopher | 4e8af6d | 2011-02-05 00:49:15 +0000 | [diff] [blame] | 992 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 993 | InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, int Threshold) { |
David Chisnall | b381578 | 2012-04-06 17:27:41 +0000 | [diff] [blame] | 994 | return getInlineCost(CS, CS.getCalledFunction(), Threshold); |
| 995 | } |
Dan Gohman | e4aeec0 | 2009-10-13 18:30:07 +0000 | [diff] [blame] | 996 | |
David Chisnall | b381578 | 2012-04-06 17:27:41 +0000 | [diff] [blame] | 997 | InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, Function *Callee, |
| 998 | int Threshold) { |
Dan Gohman | e4aeec0 | 2009-10-13 18:30:07 +0000 | [diff] [blame] | 999 | // Don't inline functions which can be redefined at link-time to mean |
Eric Christopher | f27e608 | 2010-03-25 04:49:10 +0000 | [diff] [blame] | 1000 | // something else. Don't inline functions marked noinline or call sites |
| 1001 | // marked noinline. |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 1002 | if (!Callee || Callee->mayBeOverridden() || |
| 1003 | Callee->hasFnAttr(Attribute::NoInline) || CS.isNoInline()) |
Dan Gohman | e4aeec0 | 2009-10-13 18:30:07 +0000 | [diff] [blame] | 1004 | return llvm::InlineCost::getNever(); |
| 1005 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 1006 | DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName() << "...\n"); |
Andrew Trick | 5c65541 | 2011-10-01 01:27:56 +0000 | [diff] [blame] | 1007 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 1008 | CallAnalyzer CA(TD, *Callee, Threshold); |
| 1009 | bool ShouldInline = CA.analyzeCall(CS); |
Dan Gohman | e4aeec0 | 2009-10-13 18:30:07 +0000 | [diff] [blame] | 1010 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 1011 | DEBUG(CA.dump()); |
| 1012 | |
| 1013 | // Check if there was a reason to force inlining or no inlining. |
| 1014 | if (!ShouldInline && CA.getCost() < CA.getThreshold()) |
Dan Gohman | e4aeec0 | 2009-10-13 18:30:07 +0000 | [diff] [blame] | 1015 | return InlineCost::getNever(); |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 1016 | if (ShouldInline && CA.getCost() >= CA.getThreshold()) |
Dan Gohman | e4aeec0 | 2009-10-13 18:30:07 +0000 | [diff] [blame] | 1017 | return InlineCost::getAlways(); |
Andrew Trick | 5c65541 | 2011-10-01 01:27:56 +0000 | [diff] [blame] | 1018 | |
Chandler Carruth | f2286b0 | 2012-03-31 12:42:41 +0000 | [diff] [blame] | 1019 | return llvm::InlineCost::get(CA.getCost(), CA.getThreshold()); |
Dan Gohman | e4aeec0 | 2009-10-13 18:30:07 +0000 | [diff] [blame] | 1020 | } |