blob: 95e58022ca14591afa79620a78bdea279fc5fc66 [file] [log] [blame]
Dan Gohmane4aeec02009-10-13 18:30:07 +00001//===- InlineCost.cpp - Cost analysis for inliner -------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements inline cost analysis.
11//
12//===----------------------------------------------------------------------===//
13
Chandler Carruthf2286b02012-03-31 12:42:41 +000014#define DEBUG_TYPE "inline-cost"
Dan Gohmane4aeec02009-10-13 18:30:07 +000015#include "llvm/Analysis/InlineCost.h"
Chandler Carruthf2286b02012-03-31 12:42:41 +000016#include "llvm/Analysis/ConstantFolding.h"
17#include "llvm/Analysis/InstructionSimplify.h"
Dan Gohmane4aeec02009-10-13 18:30:07 +000018#include "llvm/Support/CallSite.h"
Chandler Carruthf2286b02012-03-31 12:42:41 +000019#include "llvm/Support/Debug.h"
20#include "llvm/Support/InstVisitor.h"
21#include "llvm/Support/GetElementPtrTypeIterator.h"
22#include "llvm/Support/raw_ostream.h"
Dan Gohmane4aeec02009-10-13 18:30:07 +000023#include "llvm/CallingConv.h"
24#include "llvm/IntrinsicInst.h"
Chandler Carruthf2286b02012-03-31 12:42:41 +000025#include "llvm/Operator.h"
26#include "llvm/GlobalAlias.h"
Micah Villmow3574eca2012-10-08 16:38:25 +000027#include "llvm/DataLayout.h"
Chandler Carruthf2286b02012-03-31 12:42:41 +000028#include "llvm/ADT/STLExtras.h"
29#include "llvm/ADT/SetVector.h"
30#include "llvm/ADT/SmallVector.h"
Dan Gohmane4aeec02009-10-13 18:30:07 +000031#include "llvm/ADT/SmallPtrSet.h"
Chandler Carruthd6fc2622012-04-11 10:15:10 +000032#include "llvm/ADT/Statistic.h"
Eric Christopher4e8af6d2011-02-05 00:49:15 +000033
Dan Gohmane4aeec02009-10-13 18:30:07 +000034using namespace llvm;
35
Chandler Carruthd6fc2622012-04-11 10:15:10 +000036STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed");
37
Chandler Carruthf2286b02012-03-31 12:42:41 +000038namespace {
Chandler Carruth3d1d8952012-03-14 07:32:53 +000039
Chandler Carruthf2286b02012-03-31 12:42:41 +000040class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
41 typedef InstVisitor<CallAnalyzer, bool> Base;
42 friend class InstVisitor<CallAnalyzer, bool>;
Owen Anderson082bf2a2010-09-09 16:56:42 +000043
Micah Villmow3574eca2012-10-08 16:38:25 +000044 // DataLayout if available, or null.
45 const DataLayout *const TD;
Owen Anderson082bf2a2010-09-09 16:56:42 +000046
Chandler Carruthf2286b02012-03-31 12:42:41 +000047 // The called function.
48 Function &F;
Owen Anderson082bf2a2010-09-09 16:56:42 +000049
Chandler Carruthf2286b02012-03-31 12:42:41 +000050 int Threshold;
51 int Cost;
52 const bool AlwaysInline;
Owen Anderson082bf2a2010-09-09 16:56:42 +000053
Nadav Rotem92df0262012-09-19 08:08:04 +000054 bool IsCallerRecursive;
55 bool IsRecursiveCall;
Chandler Carruthf2286b02012-03-31 12:42:41 +000056 bool ExposesReturnsTwice;
57 bool HasDynamicAlloca;
Nadav Rotem92df0262012-09-19 08:08:04 +000058 /// Number of bytes allocated statically by the callee.
59 uint64_t AllocatedSize;
Chandler Carruthf2286b02012-03-31 12:42:41 +000060 unsigned NumInstructions, NumVectorInstructions;
61 int FiftyPercentVectorBonus, TenPercentVectorBonus;
62 int VectorBonus;
63
64 // While we walk the potentially-inlined instructions, we build up and
65 // maintain a mapping of simplified values specific to this callsite. The
66 // idea is to propagate any special information we have about arguments to
67 // this call through the inlinable section of the function, and account for
68 // likely simplifications post-inlining. The most important aspect we track
69 // is CFG altering simplifications -- when we prove a basic block dead, that
70 // can cause dramatic shifts in the cost of inlining a function.
71 DenseMap<Value *, Constant *> SimplifiedValues;
72
73 // Keep track of the values which map back (through function arguments) to
74 // allocas on the caller stack which could be simplified through SROA.
75 DenseMap<Value *, Value *> SROAArgValues;
76
77 // The mapping of caller Alloca values to their accumulated cost savings. If
78 // we have to disable SROA for one of the allocas, this tells us how much
79 // cost must be added.
80 DenseMap<Value *, int> SROAArgCosts;
81
82 // Keep track of values which map to a pointer base and constant offset.
83 DenseMap<Value *, std::pair<Value *, APInt> > ConstantOffsetPtrs;
84
85 // Custom simplification helper routines.
86 bool isAllocaDerivedArg(Value *V);
87 bool lookupSROAArgAndCost(Value *V, Value *&Arg,
88 DenseMap<Value *, int>::iterator &CostIt);
89 void disableSROA(DenseMap<Value *, int>::iterator CostIt);
90 void disableSROA(Value *V);
91 void accumulateSROACost(DenseMap<Value *, int>::iterator CostIt,
92 int InstructionCost);
93 bool handleSROACandidate(bool IsSROAValid,
94 DenseMap<Value *, int>::iterator CostIt,
95 int InstructionCost);
96 bool isGEPOffsetConstant(GetElementPtrInst &GEP);
97 bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset);
98 ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V);
99
100 // Custom analysis routines.
101 bool analyzeBlock(BasicBlock *BB);
102
103 // Disable several entry points to the visitor so we don't accidentally use
104 // them by declaring but not defining them here.
105 void visit(Module *); void visit(Module &);
106 void visit(Function *); void visit(Function &);
107 void visit(BasicBlock *); void visit(BasicBlock &);
108
109 // Provide base case for our instruction visit.
110 bool visitInstruction(Instruction &I);
111
112 // Our visit overrides.
113 bool visitAlloca(AllocaInst &I);
114 bool visitPHI(PHINode &I);
115 bool visitGetElementPtr(GetElementPtrInst &I);
116 bool visitBitCast(BitCastInst &I);
117 bool visitPtrToInt(PtrToIntInst &I);
118 bool visitIntToPtr(IntToPtrInst &I);
119 bool visitCastInst(CastInst &I);
120 bool visitUnaryInstruction(UnaryInstruction &I);
121 bool visitICmp(ICmpInst &I);
122 bool visitSub(BinaryOperator &I);
123 bool visitBinaryOperator(BinaryOperator &I);
124 bool visitLoad(LoadInst &I);
125 bool visitStore(StoreInst &I);
126 bool visitCallSite(CallSite CS);
127
128public:
Micah Villmow3574eca2012-10-08 16:38:25 +0000129 CallAnalyzer(const DataLayout *TD, Function &Callee, int Threshold)
Chandler Carruthf2286b02012-03-31 12:42:41 +0000130 : TD(TD), F(Callee), Threshold(Threshold), Cost(0),
Bill Wendling67658342012-10-09 07:45:08 +0000131 AlwaysInline(F.getFnAttributes().hasAttribute(Attributes::AlwaysInline)),
Nadav Rotem92df0262012-09-19 08:08:04 +0000132 IsCallerRecursive(false), IsRecursiveCall(false),
133 ExposesReturnsTwice(false), HasDynamicAlloca(false), AllocatedSize(0),
Chandler Carruthf2286b02012-03-31 12:42:41 +0000134 NumInstructions(0), NumVectorInstructions(0),
135 FiftyPercentVectorBonus(0), TenPercentVectorBonus(0), VectorBonus(0),
136 NumConstantArgs(0), NumConstantOffsetPtrArgs(0), NumAllocaArgs(0),
137 NumConstantPtrCmps(0), NumConstantPtrDiffs(0),
138 NumInstructionsSimplified(0), SROACostSavings(0), SROACostSavingsLost(0) {
139 }
140
141 bool analyzeCall(CallSite CS);
142
143 int getThreshold() { return Threshold; }
144 int getCost() { return Cost; }
Bob Wilsonc38b6362012-10-07 01:11:19 +0000145 bool isAlwaysInline() { return AlwaysInline; }
Chandler Carruthf2286b02012-03-31 12:42:41 +0000146
147 // Keep a bunch of stats about the cost savings found so we can print them
148 // out when debugging.
149 unsigned NumConstantArgs;
150 unsigned NumConstantOffsetPtrArgs;
151 unsigned NumAllocaArgs;
152 unsigned NumConstantPtrCmps;
153 unsigned NumConstantPtrDiffs;
154 unsigned NumInstructionsSimplified;
155 unsigned SROACostSavings;
156 unsigned SROACostSavingsLost;
157
158 void dump();
159};
160
161} // namespace
162
163/// \brief Test whether the given value is an Alloca-derived function argument.
164bool CallAnalyzer::isAllocaDerivedArg(Value *V) {
165 return SROAArgValues.count(V);
Owen Anderson082bf2a2010-09-09 16:56:42 +0000166}
167
Chandler Carruthf2286b02012-03-31 12:42:41 +0000168/// \brief Lookup the SROA-candidate argument and cost iterator which V maps to.
169/// Returns false if V does not map to a SROA-candidate.
170bool CallAnalyzer::lookupSROAArgAndCost(
171 Value *V, Value *&Arg, DenseMap<Value *, int>::iterator &CostIt) {
172 if (SROAArgValues.empty() || SROAArgCosts.empty())
173 return false;
Chandler Carruthe8187e02012-03-09 02:49:36 +0000174
Chandler Carruthf2286b02012-03-31 12:42:41 +0000175 DenseMap<Value *, Value *>::iterator ArgIt = SROAArgValues.find(V);
176 if (ArgIt == SROAArgValues.end())
177 return false;
Chandler Carruthe8187e02012-03-09 02:49:36 +0000178
Chandler Carruthf2286b02012-03-31 12:42:41 +0000179 Arg = ArgIt->second;
180 CostIt = SROAArgCosts.find(Arg);
181 return CostIt != SROAArgCosts.end();
Chandler Carruthe8187e02012-03-09 02:49:36 +0000182}
183
Chandler Carruthf2286b02012-03-31 12:42:41 +0000184/// \brief Disable SROA for the candidate marked by this cost iterator.
Chandler Carruthe8187e02012-03-09 02:49:36 +0000185///
Benjamin Kramerd9b0b022012-06-02 10:20:22 +0000186/// This marks the candidate as no longer viable for SROA, and adds the cost
Chandler Carruthf2286b02012-03-31 12:42:41 +0000187/// savings associated with it back into the inline cost measurement.
188void CallAnalyzer::disableSROA(DenseMap<Value *, int>::iterator CostIt) {
189 // If we're no longer able to perform SROA we need to undo its cost savings
190 // and prevent subsequent analysis.
191 Cost += CostIt->second;
192 SROACostSavings -= CostIt->second;
193 SROACostSavingsLost += CostIt->second;
194 SROAArgCosts.erase(CostIt);
195}
196
197/// \brief If 'V' maps to a SROA candidate, disable SROA for it.
198void CallAnalyzer::disableSROA(Value *V) {
199 Value *SROAArg;
200 DenseMap<Value *, int>::iterator CostIt;
201 if (lookupSROAArgAndCost(V, SROAArg, CostIt))
202 disableSROA(CostIt);
203}
204
205/// \brief Accumulate the given cost for a particular SROA candidate.
206void CallAnalyzer::accumulateSROACost(DenseMap<Value *, int>::iterator CostIt,
207 int InstructionCost) {
208 CostIt->second += InstructionCost;
209 SROACostSavings += InstructionCost;
210}
211
212/// \brief Helper for the common pattern of handling a SROA candidate.
213/// Either accumulates the cost savings if the SROA remains valid, or disables
214/// SROA for the candidate.
215bool CallAnalyzer::handleSROACandidate(bool IsSROAValid,
216 DenseMap<Value *, int>::iterator CostIt,
217 int InstructionCost) {
218 if (IsSROAValid) {
219 accumulateSROACost(CostIt, InstructionCost);
220 return true;
221 }
222
223 disableSROA(CostIt);
224 return false;
225}
226
227/// \brief Check whether a GEP's indices are all constant.
228///
229/// Respects any simplified values known during the analysis of this callsite.
230bool CallAnalyzer::isGEPOffsetConstant(GetElementPtrInst &GEP) {
231 for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I)
232 if (!isa<Constant>(*I) && !SimplifiedValues.lookup(*I))
Chandler Carruthe8187e02012-03-09 02:49:36 +0000233 return false;
Chandler Carruthe8187e02012-03-09 02:49:36 +0000234
Chandler Carruthf2286b02012-03-31 12:42:41 +0000235 return true;
236}
237
238/// \brief Accumulate a constant GEP offset into an APInt if possible.
239///
240/// Returns false if unable to compute the offset for any reason. Respects any
241/// simplified values known during the analysis of this callsite.
242bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
243 if (!TD)
244 return false;
245
Micah Villmow2c39b152012-10-15 16:24:29 +0000246 unsigned AS = GEP.getPointerAddressSpace();
247 unsigned IntPtrWidth = TD->getPointerSizeInBits(AS);
Chandler Carruthf2286b02012-03-31 12:42:41 +0000248 assert(IntPtrWidth == Offset.getBitWidth());
249
250 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
251 GTI != GTE; ++GTI) {
252 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
253 if (!OpC)
254 if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand()))
255 OpC = dyn_cast<ConstantInt>(SimpleOp);
256 if (!OpC)
Chandler Carruthe8187e02012-03-09 02:49:36 +0000257 return false;
Chandler Carruthf2286b02012-03-31 12:42:41 +0000258 if (OpC->isZero()) continue;
Chandler Carruthe8187e02012-03-09 02:49:36 +0000259
Chandler Carruthf2286b02012-03-31 12:42:41 +0000260 // Handle a struct index, which adds its field offset to the pointer.
261 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
262 unsigned ElementIdx = OpC->getZExtValue();
263 const StructLayout *SL = TD->getStructLayout(STy);
264 Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
265 continue;
Chandler Carruthe8187e02012-03-09 02:49:36 +0000266 }
Chandler Carruthe8187e02012-03-09 02:49:36 +0000267
Chandler Carruthf2286b02012-03-31 12:42:41 +0000268 APInt TypeSize(IntPtrWidth, TD->getTypeAllocSize(GTI.getIndexedType()));
269 Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize;
270 }
271 return true;
272}
273
274bool CallAnalyzer::visitAlloca(AllocaInst &I) {
275 // FIXME: Check whether inlining will turn a dynamic alloca into a static
276 // alloca, and handle that case.
277
Nadav Rotem92df0262012-09-19 08:08:04 +0000278 // Accumulate the allocated size.
279 if (I.isStaticAlloca()) {
280 Type *Ty = I.getAllocatedType();
281 AllocatedSize += (TD ? TD->getTypeAllocSize(Ty) :
282 Ty->getPrimitiveSizeInBits());
283 }
284
Chandler Carruthf5f256c2012-03-31 13:18:09 +0000285 // We will happily inline static alloca instructions or dynamic alloca
Chandler Carruthf2286b02012-03-31 12:42:41 +0000286 // instructions in always-inline situations.
287 if (AlwaysInline || I.isStaticAlloca())
288 return Base::visitAlloca(I);
289
290 // FIXME: This is overly conservative. Dynamic allocas are inefficient for
291 // a variety of reasons, and so we would like to not inline them into
292 // functions which don't currently have a dynamic alloca. This simply
293 // disables inlining altogether in the presence of a dynamic alloca.
294 HasDynamicAlloca = true;
295 return false;
296}
297
298bool CallAnalyzer::visitPHI(PHINode &I) {
299 // FIXME: We should potentially be tracking values through phi nodes,
300 // especially when they collapse to a single value due to deleted CFG edges
301 // during inlining.
302
303 // FIXME: We need to propagate SROA *disabling* through phi nodes, even
304 // though we don't want to propagate it's bonuses. The idea is to disable
305 // SROA if it *might* be used in an inappropriate manner.
306
307 // Phi nodes are always zero-cost.
308 return true;
309}
310
311bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) {
312 Value *SROAArg;
313 DenseMap<Value *, int>::iterator CostIt;
314 bool SROACandidate = lookupSROAArgAndCost(I.getPointerOperand(),
315 SROAArg, CostIt);
316
317 // Try to fold GEPs of constant-offset call site argument pointers. This
318 // requires target data and inbounds GEPs.
319 if (TD && I.isInBounds()) {
320 // Check if we have a base + offset for the pointer.
321 Value *Ptr = I.getPointerOperand();
322 std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Ptr);
323 if (BaseAndOffset.first) {
324 // Check if the offset of this GEP is constant, and if so accumulate it
325 // into Offset.
326 if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second)) {
327 // Non-constant GEPs aren't folded, and disable SROA.
328 if (SROACandidate)
329 disableSROA(CostIt);
330 return false;
331 }
332
333 // Add the result as a new mapping to Base + Offset.
334 ConstantOffsetPtrs[&I] = BaseAndOffset;
335
336 // Also handle SROA candidates here, we already know that the GEP is
337 // all-constant indexed.
338 if (SROACandidate)
339 SROAArgValues[&I] = SROAArg;
340
Chandler Carruthe8187e02012-03-09 02:49:36 +0000341 return true;
342 }
343 }
344
Chandler Carruthf2286b02012-03-31 12:42:41 +0000345 if (isGEPOffsetConstant(I)) {
346 if (SROACandidate)
347 SROAArgValues[&I] = SROAArg;
348
349 // Constant GEPs are modeled as free.
350 return true;
351 }
352
353 // Variable GEPs will require math and will disable SROA.
354 if (SROACandidate)
355 disableSROA(CostIt);
Chandler Carruthe8187e02012-03-09 02:49:36 +0000356 return false;
357}
358
Chandler Carruthf2286b02012-03-31 12:42:41 +0000359bool CallAnalyzer::visitBitCast(BitCastInst &I) {
360 // Propagate constants through bitcasts.
361 if (Constant *COp = dyn_cast<Constant>(I.getOperand(0)))
362 if (Constant *C = ConstantExpr::getBitCast(COp, I.getType())) {
363 SimplifiedValues[&I] = C;
364 return true;
Owen Anderson082bf2a2010-09-09 16:56:42 +0000365 }
Owen Anderson082bf2a2010-09-09 16:56:42 +0000366
Chandler Carruthf2286b02012-03-31 12:42:41 +0000367 // Track base/offsets through casts
368 std::pair<Value *, APInt> BaseAndOffset
369 = ConstantOffsetPtrs.lookup(I.getOperand(0));
370 // Casts don't change the offset, just wrap it up.
371 if (BaseAndOffset.first)
372 ConstantOffsetPtrs[&I] = BaseAndOffset;
373
374 // Also look for SROA candidates here.
375 Value *SROAArg;
376 DenseMap<Value *, int>::iterator CostIt;
377 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt))
378 SROAArgValues[&I] = SROAArg;
379
380 // Bitcasts are always zero cost.
381 return true;
Owen Anderson082bf2a2010-09-09 16:56:42 +0000382}
383
Chandler Carruthf2286b02012-03-31 12:42:41 +0000384bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
385 // Propagate constants through ptrtoint.
386 if (Constant *COp = dyn_cast<Constant>(I.getOperand(0)))
387 if (Constant *C = ConstantExpr::getPtrToInt(COp, I.getType())) {
388 SimplifiedValues[&I] = C;
389 return true;
Chandler Carruth274d3772012-03-14 23:19:53 +0000390 }
Chandler Carruthf2286b02012-03-31 12:42:41 +0000391
392 // Track base/offset pairs when converted to a plain integer provided the
393 // integer is large enough to represent the pointer.
394 unsigned IntegerSize = I.getType()->getScalarSizeInBits();
Micah Villmow2c39b152012-10-15 16:24:29 +0000395 unsigned AS = I.getPointerAddressSpace();
396 if (TD && IntegerSize >= TD->getPointerSizeInBits(AS)) {
Chandler Carruthf2286b02012-03-31 12:42:41 +0000397 std::pair<Value *, APInt> BaseAndOffset
398 = ConstantOffsetPtrs.lookup(I.getOperand(0));
399 if (BaseAndOffset.first)
400 ConstantOffsetPtrs[&I] = BaseAndOffset;
401 }
402
403 // This is really weird. Technically, ptrtoint will disable SROA. However,
404 // unless that ptrtoint is *used* somewhere in the live basic blocks after
405 // inlining, it will be nuked, and SROA should proceed. All of the uses which
406 // would block SROA would also block SROA if applied directly to a pointer,
407 // and so we can just add the integer in here. The only places where SROA is
408 // preserved either cannot fire on an integer, or won't in-and-of themselves
409 // disable SROA (ext) w/o some later use that we would see and disable.
410 Value *SROAArg;
411 DenseMap<Value *, int>::iterator CostIt;
412 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt))
413 SROAArgValues[&I] = SROAArg;
414
Chandler Carruthd5003ca2012-05-04 00:58:03 +0000415 return isInstructionFree(&I, TD);
Chandler Carruth274d3772012-03-14 23:19:53 +0000416}
417
Chandler Carruthf2286b02012-03-31 12:42:41 +0000418bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
419 // Propagate constants through ptrtoint.
420 if (Constant *COp = dyn_cast<Constant>(I.getOperand(0)))
421 if (Constant *C = ConstantExpr::getIntToPtr(COp, I.getType())) {
422 SimplifiedValues[&I] = C;
423 return true;
424 }
Dan Gohmane4aeec02009-10-13 18:30:07 +0000425
Chandler Carruthf2286b02012-03-31 12:42:41 +0000426 // Track base/offset pairs when round-tripped through a pointer without
427 // modifications provided the integer is not too large.
428 Value *Op = I.getOperand(0);
429 unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
Micah Villmow2c39b152012-10-15 16:24:29 +0000430 unsigned AS = I.getAddressSpace();
431 if (TD && IntegerSize <= TD->getPointerSizeInBits(AS)) {
Chandler Carruthf2286b02012-03-31 12:42:41 +0000432 std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);
433 if (BaseAndOffset.first)
434 ConstantOffsetPtrs[&I] = BaseAndOffset;
435 }
Dan Gohmane4aeec02009-10-13 18:30:07 +0000436
Chandler Carruthf2286b02012-03-31 12:42:41 +0000437 // "Propagate" SROA here in the same manner as we do for ptrtoint above.
438 Value *SROAArg;
439 DenseMap<Value *, int>::iterator CostIt;
440 if (lookupSROAArgAndCost(Op, SROAArg, CostIt))
441 SROAArgValues[&I] = SROAArg;
Chandler Carruth274d3772012-03-14 23:19:53 +0000442
Chandler Carruthd5003ca2012-05-04 00:58:03 +0000443 return isInstructionFree(&I, TD);
Chandler Carruthf2286b02012-03-31 12:42:41 +0000444}
445
446bool CallAnalyzer::visitCastInst(CastInst &I) {
447 // Propagate constants through ptrtoint.
448 if (Constant *COp = dyn_cast<Constant>(I.getOperand(0)))
449 if (Constant *C = ConstantExpr::getCast(I.getOpcode(), COp, I.getType())) {
450 SimplifiedValues[&I] = C;
451 return true;
452 }
453
454 // Disable SROA in the face of arbitrary casts we don't whitelist elsewhere.
455 disableSROA(I.getOperand(0));
456
Chandler Carruthd5003ca2012-05-04 00:58:03 +0000457 return isInstructionFree(&I, TD);
Chandler Carruthf2286b02012-03-31 12:42:41 +0000458}
459
460bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) {
461 Value *Operand = I.getOperand(0);
462 Constant *Ops[1] = { dyn_cast<Constant>(Operand) };
463 if (Ops[0] || (Ops[0] = SimplifiedValues.lookup(Operand)))
464 if (Constant *C = ConstantFoldInstOperands(I.getOpcode(), I.getType(),
465 Ops, TD)) {
466 SimplifiedValues[&I] = C;
467 return true;
468 }
469
470 // Disable any SROA on the argument to arbitrary unary operators.
471 disableSROA(Operand);
472
473 return false;
474}
475
476bool CallAnalyzer::visitICmp(ICmpInst &I) {
477 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
478 // First try to handle simplified comparisons.
479 if (!isa<Constant>(LHS))
480 if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
481 LHS = SimpleLHS;
482 if (!isa<Constant>(RHS))
483 if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
484 RHS = SimpleRHS;
485 if (Constant *CLHS = dyn_cast<Constant>(LHS))
486 if (Constant *CRHS = dyn_cast<Constant>(RHS))
487 if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) {
488 SimplifiedValues[&I] = C;
489 return true;
490 }
491
492 // Otherwise look for a comparison between constant offset pointers with
493 // a common base.
494 Value *LHSBase, *RHSBase;
495 APInt LHSOffset, RHSOffset;
496 llvm::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
497 if (LHSBase) {
498 llvm::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
499 if (RHSBase && LHSBase == RHSBase) {
500 // We have common bases, fold the icmp to a constant based on the
501 // offsets.
502 Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
503 Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
504 if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) {
505 SimplifiedValues[&I] = C;
506 ++NumConstantPtrCmps;
507 return true;
508 }
509 }
510 }
511
512 // If the comparison is an equality comparison with null, we can simplify it
513 // for any alloca-derived argument.
514 if (I.isEquality() && isa<ConstantPointerNull>(I.getOperand(1)))
515 if (isAllocaDerivedArg(I.getOperand(0))) {
516 // We can actually predict the result of comparisons between an
517 // alloca-derived value and null. Note that this fires regardless of
518 // SROA firing.
519 bool IsNotEqual = I.getPredicate() == CmpInst::ICMP_NE;
520 SimplifiedValues[&I] = IsNotEqual ? ConstantInt::getTrue(I.getType())
521 : ConstantInt::getFalse(I.getType());
522 return true;
523 }
524
525 // Finally check for SROA candidates in comparisons.
526 Value *SROAArg;
527 DenseMap<Value *, int>::iterator CostIt;
528 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
529 if (isa<ConstantPointerNull>(I.getOperand(1))) {
530 accumulateSROACost(CostIt, InlineConstants::InstrCost);
531 return true;
532 }
533
534 disableSROA(CostIt);
535 }
536
537 return false;
538}
539
540bool CallAnalyzer::visitSub(BinaryOperator &I) {
541 // Try to handle a special case: we can fold computing the difference of two
542 // constant-related pointers.
543 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
544 Value *LHSBase, *RHSBase;
545 APInt LHSOffset, RHSOffset;
546 llvm::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
547 if (LHSBase) {
548 llvm::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
549 if (RHSBase && LHSBase == RHSBase) {
550 // We have common bases, fold the subtract to a constant based on the
551 // offsets.
552 Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
553 Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
554 if (Constant *C = ConstantExpr::getSub(CLHS, CRHS)) {
555 SimplifiedValues[&I] = C;
556 ++NumConstantPtrDiffs;
557 return true;
558 }
559 }
560 }
561
562 // Otherwise, fall back to the generic logic for simplifying and handling
563 // instructions.
564 return Base::visitSub(I);
565}
566
567bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) {
568 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
569 if (!isa<Constant>(LHS))
570 if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
571 LHS = SimpleLHS;
572 if (!isa<Constant>(RHS))
573 if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
574 RHS = SimpleRHS;
575 Value *SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, TD);
576 if (Constant *C = dyn_cast_or_null<Constant>(SimpleV)) {
577 SimplifiedValues[&I] = C;
578 return true;
579 }
580
581 // Disable any SROA on arguments to arbitrary, unsimplified binary operators.
582 disableSROA(LHS);
583 disableSROA(RHS);
584
585 return false;
586}
587
588bool CallAnalyzer::visitLoad(LoadInst &I) {
589 Value *SROAArg;
590 DenseMap<Value *, int>::iterator CostIt;
591 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
592 if (I.isSimple()) {
593 accumulateSROACost(CostIt, InlineConstants::InstrCost);
594 return true;
595 }
596
597 disableSROA(CostIt);
598 }
599
600 return false;
601}
602
603bool CallAnalyzer::visitStore(StoreInst &I) {
604 Value *SROAArg;
605 DenseMap<Value *, int>::iterator CostIt;
606 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
607 if (I.isSimple()) {
608 accumulateSROACost(CostIt, InlineConstants::InstrCost);
609 return true;
610 }
611
612 disableSROA(CostIt);
613 }
614
615 return false;
616}
617
618bool CallAnalyzer::visitCallSite(CallSite CS) {
619 if (CS.isCall() && cast<CallInst>(CS.getInstruction())->canReturnTwice() &&
Bill Wendling67658342012-10-09 07:45:08 +0000620 !F.getFnAttributes().hasAttribute(Attributes::ReturnsTwice)) {
Chandler Carruthf2286b02012-03-31 12:42:41 +0000621 // This aborts the entire analysis.
622 ExposesReturnsTwice = true;
623 return false;
624 }
625
626 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) {
627 switch (II->getIntrinsicID()) {
628 default:
629 return Base::visitCallSite(CS);
630
Chandler Carruthf2286b02012-03-31 12:42:41 +0000631 case Intrinsic::memset:
632 case Intrinsic::memcpy:
633 case Intrinsic::memmove:
Chandler Carruthd5003ca2012-05-04 00:58:03 +0000634 // SROA can usually chew through these intrinsics, but they aren't free.
635 return false;
Chandler Carruthf2286b02012-03-31 12:42:41 +0000636 }
637 }
638
639 if (Function *F = CS.getCalledFunction()) {
640 if (F == CS.getInstruction()->getParent()->getParent()) {
641 // This flag will fully abort the analysis, so don't bother with anything
642 // else.
Nadav Rotem92df0262012-09-19 08:08:04 +0000643 IsRecursiveCall = true;
Chandler Carruthf2286b02012-03-31 12:42:41 +0000644 return false;
645 }
646
Chandler Carruthd5003ca2012-05-04 00:58:03 +0000647 if (!callIsSmall(CS)) {
Chandler Carruthf2286b02012-03-31 12:42:41 +0000648 // We account for the average 1 instruction per call argument setup
649 // here.
650 Cost += CS.arg_size() * InlineConstants::InstrCost;
651
652 // Everything other than inline ASM will also have a significant cost
653 // merely from making the call.
654 if (!isa<InlineAsm>(CS.getCalledValue()))
655 Cost += InlineConstants::CallPenalty;
656 }
657
658 return Base::visitCallSite(CS);
659 }
660
661 // Otherwise we're in a very special case -- an indirect function call. See
662 // if we can be particularly clever about this.
663 Value *Callee = CS.getCalledValue();
664
665 // First, pay the price of the argument setup. We account for the average
666 // 1 instruction per call argument setup here.
667 Cost += CS.arg_size() * InlineConstants::InstrCost;
668
669 // Next, check if this happens to be an indirect function call to a known
670 // function in this inline context. If not, we've done all we can.
671 Function *F = dyn_cast_or_null<Function>(SimplifiedValues.lookup(Callee));
672 if (!F)
673 return Base::visitCallSite(CS);
674
675 // If we have a constant that we are calling as a function, we can peer
676 // through it and see the function target. This happens not infrequently
677 // during devirtualization and so we want to give it a hefty bonus for
678 // inlining, but cap that bonus in the event that inlining wouldn't pan
679 // out. Pretend to inline the function, with a custom threshold.
680 CallAnalyzer CA(TD, *F, InlineConstants::IndirectCallThreshold);
681 if (CA.analyzeCall(CS)) {
682 // We were able to inline the indirect call! Subtract the cost from the
683 // bonus we want to apply, but don't go below zero.
684 Cost -= std::max(0, InlineConstants::IndirectCallThreshold - CA.getCost());
685 }
686
687 return Base::visitCallSite(CS);
688}
689
690bool CallAnalyzer::visitInstruction(Instruction &I) {
Chandler Carruthd5003ca2012-05-04 00:58:03 +0000691 // Some instructions are free. All of the free intrinsics can also be
692 // handled by SROA, etc.
693 if (isInstructionFree(&I, TD))
694 return true;
695
Chandler Carruthf2286b02012-03-31 12:42:41 +0000696 // We found something we don't understand or can't handle. Mark any SROA-able
697 // values in the operand list as no longer viable.
698 for (User::op_iterator OI = I.op_begin(), OE = I.op_end(); OI != OE; ++OI)
699 disableSROA(*OI);
700
701 return false;
702}
703
704
705/// \brief Analyze a basic block for its contribution to the inline cost.
706///
707/// This method walks the analyzer over every instruction in the given basic
708/// block and accounts for their cost during inlining at this callsite. It
709/// aborts early if the threshold has been exceeded or an impossible to inline
710/// construct has been detected. It returns false if inlining is no longer
711/// viable, and true if inlining remains viable.
712bool CallAnalyzer::analyzeBlock(BasicBlock *BB) {
713 for (BasicBlock::iterator I = BB->begin(), E = llvm::prior(BB->end());
714 I != E; ++I) {
715 ++NumInstructions;
716 if (isa<ExtractElementInst>(I) || I->getType()->isVectorTy())
717 ++NumVectorInstructions;
718
719 // If the instruction simplified to a constant, there is no cost to this
720 // instruction. Visit the instructions using our InstVisitor to account for
721 // all of the per-instruction logic. The visit tree returns true if we
722 // consumed the instruction in any way, and false if the instruction's base
723 // cost should count against inlining.
724 if (Base::visit(I))
725 ++NumInstructionsSimplified;
726 else
727 Cost += InlineConstants::InstrCost;
728
729 // If the visit this instruction detected an uninlinable pattern, abort.
Nadav Rotem92df0262012-09-19 08:08:04 +0000730 if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca)
731 return false;
732
733 // If the caller is a recursive function then we don't want to inline
734 // functions which allocate a lot of stack space because it would increase
735 // the caller stack usage dramatically.
736 if (IsCallerRecursive &&
737 AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller)
Chandler Carruthf2286b02012-03-31 12:42:41 +0000738 return false;
739
740 if (NumVectorInstructions > NumInstructions/2)
741 VectorBonus = FiftyPercentVectorBonus;
742 else if (NumVectorInstructions > NumInstructions/10)
743 VectorBonus = TenPercentVectorBonus;
744 else
745 VectorBonus = 0;
746
747 // Check if we've past the threshold so we don't spin in huge basic
748 // blocks that will never inline.
749 if (!AlwaysInline && Cost > (Threshold + VectorBonus))
750 return false;
751 }
752
753 return true;
754}
755
756/// \brief Compute the base pointer and cumulative constant offsets for V.
757///
758/// This strips all constant offsets off of V, leaving it the base pointer, and
759/// accumulates the total constant offset applied in the returned constant. It
760/// returns 0 if V is not a pointer, and returns the constant '0' if there are
761/// no constant offsets applied.
762ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
763 if (!TD || !V->getType()->isPointerTy())
764 return 0;
765
Micah Villmow2c39b152012-10-15 16:24:29 +0000766 unsigned AS = cast<PointerType>(V->getType())->getAddressSpace();;
767 unsigned IntPtrWidth = TD->getPointerSizeInBits(AS);
Chandler Carruthf2286b02012-03-31 12:42:41 +0000768 APInt Offset = APInt::getNullValue(IntPtrWidth);
769
770 // Even though we don't look through PHI nodes, we could be called on an
771 // instruction in an unreachable block, which may be on a cycle.
772 SmallPtrSet<Value *, 4> Visited;
773 Visited.insert(V);
774 do {
775 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
776 if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset))
777 return 0;
778 V = GEP->getPointerOperand();
779 } else if (Operator::getOpcode(V) == Instruction::BitCast) {
780 V = cast<Operator>(V)->getOperand(0);
781 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
782 if (GA->mayBeOverridden())
783 break;
784 V = GA->getAliasee();
785 } else {
786 break;
787 }
788 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
789 } while (Visited.insert(V));
790
791 Type *IntPtrTy = TD->getIntPtrType(V->getContext());
792 return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset));
793}
794
795/// \brief Analyze a call site for potential inlining.
796///
797/// Returns true if inlining this call is viable, and false if it is not
798/// viable. It computes the cost and adjusts the threshold based on numerous
799/// factors and heuristics. If this method returns false but the computed cost
800/// is below the computed threshold, then inlining was forcibly disabled by
801/// some artifact of the rountine.
802bool CallAnalyzer::analyzeCall(CallSite CS) {
Chandler Carruthd6fc2622012-04-11 10:15:10 +0000803 ++NumCallsAnalyzed;
804
Chandler Carruthf2286b02012-03-31 12:42:41 +0000805 // Track whether the post-inlining function would have more than one basic
806 // block. A single basic block is often intended for inlining. Balloon the
807 // threshold by 50% until we pass the single-BB phase.
808 bool SingleBB = true;
809 int SingleBBBonus = Threshold / 2;
810 Threshold += SingleBBBonus;
811
812 // Unless we are always-inlining, perform some tweaks to the cost and
813 // threshold based on the direct callsite information.
814 if (!AlwaysInline) {
815 // We want to more aggressively inline vector-dense kernels, so up the
816 // threshold, and we'll lower it if the % of vector instructions gets too
817 // low.
818 assert(NumInstructions == 0);
819 assert(NumVectorInstructions == 0);
820 FiftyPercentVectorBonus = Threshold;
821 TenPercentVectorBonus = Threshold / 2;
822
Benjamin Kramerb6fdd022012-08-07 11:13:19 +0000823 // Give out bonuses per argument, as the instructions setting them up will
824 // be gone after inlining.
825 for (unsigned I = 0, E = CS.arg_size(); I != E; ++I) {
826 if (TD && CS.isByValArgument(I)) {
827 // We approximate the number of loads and stores needed by dividing the
828 // size of the byval type by the target's pointer size.
829 PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType());
830 unsigned TypeSize = TD->getTypeSizeInBits(PTy->getElementType());
Micah Villmow2c39b152012-10-15 16:24:29 +0000831 unsigned AS = PTy->getAddressSpace();
832 unsigned PointerSize = TD->getPointerSizeInBits(AS);
Benjamin Kramerb6fdd022012-08-07 11:13:19 +0000833 // Ceiling division.
834 unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
835
836 // If it generates more than 8 stores it is likely to be expanded as an
837 // inline memcpy so we take that as an upper bound. Otherwise we assume
838 // one load and one store per word copied.
839 // FIXME: The maxStoresPerMemcpy setting from the target should be used
840 // here instead of a magic number of 8, but it's not available via
Micah Villmow3574eca2012-10-08 16:38:25 +0000841 // DataLayout.
Benjamin Kramerb6fdd022012-08-07 11:13:19 +0000842 NumStores = std::min(NumStores, 8U);
843
844 Cost -= 2 * NumStores * InlineConstants::InstrCost;
845 } else {
846 // For non-byval arguments subtract off one instruction per call
847 // argument.
848 Cost -= InlineConstants::InstrCost;
849 }
850 }
Chandler Carruthf2286b02012-03-31 12:42:41 +0000851
852 // If there is only one call of the function, and it has internal linkage,
853 // the cost of inlining it drops dramatically.
854 if (F.hasLocalLinkage() && F.hasOneUse() && &F == CS.getCalledFunction())
855 Cost += InlineConstants::LastCallToStaticBonus;
856
857 // If the instruction after the call, or if the normal destination of the
Nadav Rotem92df0262012-09-19 08:08:04 +0000858 // invoke is an unreachable instruction, the function is noreturn. As such,
859 // there is little point in inlining this unless there is literally zero
860 // cost.
861 Instruction *Instr = CS.getInstruction();
862 if (InvokeInst *II = dyn_cast<InvokeInst>(Instr)) {
Chandler Carruthf2286b02012-03-31 12:42:41 +0000863 if (isa<UnreachableInst>(II->getNormalDest()->begin()))
864 Threshold = 1;
Nadav Rotem92df0262012-09-19 08:08:04 +0000865 } else if (isa<UnreachableInst>(++BasicBlock::iterator(Instr)))
Chandler Carruthf2286b02012-03-31 12:42:41 +0000866 Threshold = 1;
867
868 // If this function uses the coldcc calling convention, prefer not to inline
869 // it.
870 if (F.getCallingConv() == CallingConv::Cold)
871 Cost += InlineConstants::ColdccPenalty;
872
873 // Check if we're done. This can happen due to bonuses and penalties.
874 if (Cost > Threshold)
875 return false;
876 }
877
878 if (F.empty())
879 return true;
880
Nadav Rotem92df0262012-09-19 08:08:04 +0000881 Function *Caller = CS.getInstruction()->getParent()->getParent();
882 // Check if the caller function is recursive itself.
883 for (Value::use_iterator U = Caller->use_begin(), E = Caller->use_end();
884 U != E; ++U) {
885 CallSite Site(cast<Value>(*U));
886 if (!Site)
887 continue;
888 Instruction *I = Site.getInstruction();
889 if (I->getParent()->getParent() == Caller) {
890 IsCallerRecursive = true;
891 break;
892 }
893 }
894
Chandler Carruthf2286b02012-03-31 12:42:41 +0000895 // Track whether we've seen a return instruction. The first return
896 // instruction is free, as at least one will usually disappear in inlining.
897 bool HasReturn = false;
898
899 // Populate our simplified values by mapping from function arguments to call
900 // arguments with known important simplifications.
901 CallSite::arg_iterator CAI = CS.arg_begin();
902 for (Function::arg_iterator FAI = F.arg_begin(), FAE = F.arg_end();
903 FAI != FAE; ++FAI, ++CAI) {
904 assert(CAI != CS.arg_end());
905 if (Constant *C = dyn_cast<Constant>(CAI))
906 SimplifiedValues[FAI] = C;
907
908 Value *PtrArg = *CAI;
909 if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) {
910 ConstantOffsetPtrs[FAI] = std::make_pair(PtrArg, C->getValue());
911
912 // We can SROA any pointer arguments derived from alloca instructions.
913 if (isa<AllocaInst>(PtrArg)) {
914 SROAArgValues[FAI] = PtrArg;
915 SROAArgCosts[PtrArg] = 0;
916 }
917 }
918 }
919 NumConstantArgs = SimplifiedValues.size();
920 NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size();
921 NumAllocaArgs = SROAArgValues.size();
922
923 // The worklist of live basic blocks in the callee *after* inlining. We avoid
924 // adding basic blocks of the callee which can be proven to be dead for this
925 // particular call site in order to get more accurate cost estimates. This
926 // requires a somewhat heavyweight iteration pattern: we need to walk the
927 // basic blocks in a breadth-first order as we insert live successors. To
928 // accomplish this, prioritizing for small iterations because we exit after
929 // crossing our threshold, we use a small-size optimized SetVector.
930 typedef SetVector<BasicBlock *, SmallVector<BasicBlock *, 16>,
931 SmallPtrSet<BasicBlock *, 16> > BBSetVector;
932 BBSetVector BBWorklist;
933 BBWorklist.insert(&F.getEntryBlock());
934 // Note that we *must not* cache the size, this loop grows the worklist.
935 for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) {
936 // Bail out the moment we cross the threshold. This means we'll under-count
937 // the cost, but only when undercounting doesn't matter.
938 if (!AlwaysInline && Cost > (Threshold + VectorBonus))
939 break;
940
941 BasicBlock *BB = BBWorklist[Idx];
942 if (BB->empty())
Chandler Carruth274d3772012-03-14 23:19:53 +0000943 continue;
Dan Gohmane4aeec02009-10-13 18:30:07 +0000944
Chandler Carruthf2286b02012-03-31 12:42:41 +0000945 // Handle the terminator cost here where we can track returns and other
946 // function-wide constructs.
947 TerminatorInst *TI = BB->getTerminator();
Kenneth Uildriks74fa7322010-10-09 22:06:36 +0000948
Chandler Carruthf2286b02012-03-31 12:42:41 +0000949 // We never want to inline functions that contain an indirectbr. This is
950 // incorrect because all the blockaddress's (in static global initializers
Nadav Rotem92df0262012-09-19 08:08:04 +0000951 // for example) would be referring to the original function, and this
952 // indirect jump would jump from the inlined copy of the function into the
953 // original function which is extremely undefined behavior.
Chandler Carruthf2286b02012-03-31 12:42:41 +0000954 // FIXME: This logic isn't really right; we can safely inline functions
955 // with indirectbr's as long as no other function or global references the
956 // blockaddress of a block within the current function. And as a QOI issue,
957 // if someone is using a blockaddress without an indirectbr, and that
958 // reference somehow ends up in another function or global, we probably
959 // don't want to inline this function.
960 if (isa<IndirectBrInst>(TI))
961 return false;
Andrew Trick5c655412011-10-01 01:27:56 +0000962
Chandler Carruthf2286b02012-03-31 12:42:41 +0000963 if (!HasReturn && isa<ReturnInst>(TI))
964 HasReturn = true;
965 else
966 Cost += InlineConstants::InstrCost;
Andrew Trick5c655412011-10-01 01:27:56 +0000967
Chandler Carruthf2286b02012-03-31 12:42:41 +0000968 // Analyze the cost of this block. If we blow through the threshold, this
969 // returns false, and we can bail on out.
970 if (!analyzeBlock(BB)) {
Nadav Rotem92df0262012-09-19 08:08:04 +0000971 if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca)
Chandler Carruthf2286b02012-03-31 12:42:41 +0000972 return false;
Nadav Rotem92df0262012-09-19 08:08:04 +0000973
974 // If the caller is a recursive function then we don't want to inline
975 // functions which allocate a lot of stack space because it would increase
976 // the caller stack usage dramatically.
977 if (IsCallerRecursive &&
978 AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller)
979 return false;
980
Chandler Carruthf2286b02012-03-31 12:42:41 +0000981 break;
Eric Christopher8e2da0c2011-02-01 01:16:32 +0000982 }
Eric Christopher8e2da0c2011-02-01 01:16:32 +0000983
Chandler Carruthf2286b02012-03-31 12:42:41 +0000984 // Add in the live successors by first checking whether we have terminator
985 // that may be simplified based on the values simplified by this call.
986 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
987 if (BI->isConditional()) {
988 Value *Cond = BI->getCondition();
989 if (ConstantInt *SimpleCond
990 = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
991 BBWorklist.insert(BI->getSuccessor(SimpleCond->isZero() ? 1 : 0));
992 continue;
Eric Christopher8e2da0c2011-02-01 01:16:32 +0000993 }
Chandler Carruthf2286b02012-03-31 12:42:41 +0000994 }
995 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
996 Value *Cond = SI->getCondition();
997 if (ConstantInt *SimpleCond
998 = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
999 BBWorklist.insert(SI->findCaseValue(SimpleCond).getCaseSuccessor());
1000 continue;
1001 }
1002 }
Eric Christopher8e2da0c2011-02-01 01:16:32 +00001003
Chandler Carruthf2286b02012-03-31 12:42:41 +00001004 // If we're unable to select a particular successor, just count all of
1005 // them.
Nadav Rotem92df0262012-09-19 08:08:04 +00001006 for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize;
1007 ++TIdx)
Chandler Carruthf2286b02012-03-31 12:42:41 +00001008 BBWorklist.insert(TI->getSuccessor(TIdx));
1009
1010 // If we had any successors at this point, than post-inlining is likely to
1011 // have them as well. Note that we assume any basic blocks which existed
1012 // due to branches or switches which folded above will also fold after
1013 // inlining.
1014 if (SingleBB && TI->getNumSuccessors() > 1) {
1015 // Take off the bonus we applied to the threshold.
1016 Threshold -= SingleBBBonus;
1017 SingleBB = false;
Eric Christopher8e2da0c2011-02-01 01:16:32 +00001018 }
1019 }
Andrew Trick5c655412011-10-01 01:27:56 +00001020
Chandler Carruthf2286b02012-03-31 12:42:41 +00001021 Threshold += VectorBonus;
1022
1023 return AlwaysInline || Cost < Threshold;
Eric Christopher4e8af6d2011-02-05 00:49:15 +00001024}
1025
Manman Ren286c4dc2012-09-12 05:06:18 +00001026#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
Chandler Carruthf2286b02012-03-31 12:42:41 +00001027/// \brief Dump stats about this call's analysis.
1028void CallAnalyzer::dump() {
1029#define DEBUG_PRINT_STAT(x) llvm::dbgs() << " " #x ": " << x << "\n"
1030 DEBUG_PRINT_STAT(NumConstantArgs);
1031 DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs);
1032 DEBUG_PRINT_STAT(NumAllocaArgs);
1033 DEBUG_PRINT_STAT(NumConstantPtrCmps);
1034 DEBUG_PRINT_STAT(NumConstantPtrDiffs);
1035 DEBUG_PRINT_STAT(NumInstructionsSimplified);
1036 DEBUG_PRINT_STAT(SROACostSavings);
1037 DEBUG_PRINT_STAT(SROACostSavingsLost);
1038#undef DEBUG_PRINT_STAT
Eric Christopher4e8af6d2011-02-05 00:49:15 +00001039}
Manman Rencc77eec2012-09-06 19:55:56 +00001040#endif
Eric Christopher4e8af6d2011-02-05 00:49:15 +00001041
Chandler Carruthf2286b02012-03-31 12:42:41 +00001042InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, int Threshold) {
David Chisnallb3815782012-04-06 17:27:41 +00001043 return getInlineCost(CS, CS.getCalledFunction(), Threshold);
1044}
Dan Gohmane4aeec02009-10-13 18:30:07 +00001045
David Chisnallb3815782012-04-06 17:27:41 +00001046InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, Function *Callee,
1047 int Threshold) {
Dan Gohmane4aeec02009-10-13 18:30:07 +00001048 // Don't inline functions which can be redefined at link-time to mean
Eric Christopherf27e6082010-03-25 04:49:10 +00001049 // something else. Don't inline functions marked noinline or call sites
1050 // marked noinline.
Chandler Carruthf2286b02012-03-31 12:42:41 +00001051 if (!Callee || Callee->mayBeOverridden() ||
Bill Wendling67658342012-10-09 07:45:08 +00001052 Callee->getFnAttributes().hasAttribute(Attributes::NoInline) ||
1053 CS.isNoInline())
Dan Gohmane4aeec02009-10-13 18:30:07 +00001054 return llvm::InlineCost::getNever();
1055
Nadav Rotem92df0262012-09-19 08:08:04 +00001056 DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName()
1057 << "...\n");
Andrew Trick5c655412011-10-01 01:27:56 +00001058
Chandler Carruthf2286b02012-03-31 12:42:41 +00001059 CallAnalyzer CA(TD, *Callee, Threshold);
1060 bool ShouldInline = CA.analyzeCall(CS);
Dan Gohmane4aeec02009-10-13 18:30:07 +00001061
Chandler Carruthf2286b02012-03-31 12:42:41 +00001062 DEBUG(CA.dump());
1063
1064 // Check if there was a reason to force inlining or no inlining.
1065 if (!ShouldInline && CA.getCost() < CA.getThreshold())
Dan Gohmane4aeec02009-10-13 18:30:07 +00001066 return InlineCost::getNever();
Bob Wilsonc38b6362012-10-07 01:11:19 +00001067 if (ShouldInline && (CA.isAlwaysInline() ||
1068 CA.getCost() >= CA.getThreshold()))
Dan Gohmane4aeec02009-10-13 18:30:07 +00001069 return InlineCost::getAlways();
Andrew Trick5c655412011-10-01 01:27:56 +00001070
Chandler Carruthf2286b02012-03-31 12:42:41 +00001071 return llvm::InlineCost::get(CA.getCost(), CA.getThreshold());
Dan Gohmane4aeec02009-10-13 18:30:07 +00001072}