blob: c17705bf85090504a74bfe0681d1d839e1791a63 [file] [log] [blame]
Devang Patel6899b312007-07-25 18:00:25 +00001//===- InlineCoast.cpp - Cost analysis for inliner ------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
Chris Lattner4ee451d2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Devang Patel6899b312007-07-25 18:00:25 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This file implements inline cost analysis.
11//
12//===----------------------------------------------------------------------===//
13
14
15#include "llvm/Transforms/Utils/InlineCost.h"
16#include "llvm/Support/CallSite.h"
17#include "llvm/CallingConv.h"
18#include "llvm/IntrinsicInst.h"
19
20using namespace llvm;
21
22// CountCodeReductionForConstant - Figure out an approximation for how many
23// instructions will be constant folded if the specified value is constant.
24//
25unsigned InlineCostAnalyzer::FunctionInfo::
26 CountCodeReductionForConstant(Value *V) {
27 unsigned Reduction = 0;
28 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ++UI)
29 if (isa<BranchInst>(*UI))
30 Reduction += 40; // Eliminating a conditional branch is a big win
31 else if (SwitchInst *SI = dyn_cast<SwitchInst>(*UI))
32 // Eliminating a switch is a big win, proportional to the number of edges
33 // deleted.
34 Reduction += (SI->getNumSuccessors()-1) * 40;
35 else if (CallInst *CI = dyn_cast<CallInst>(*UI)) {
36 // Turning an indirect call into a direct call is a BIG win
37 Reduction += CI->getCalledValue() == V ? 500 : 0;
38 } else if (InvokeInst *II = dyn_cast<InvokeInst>(*UI)) {
39 // Turning an indirect call into a direct call is a BIG win
40 Reduction += II->getCalledValue() == V ? 500 : 0;
41 } else {
42 // Figure out if this instruction will be removed due to simple constant
43 // propagation.
44 Instruction &Inst = cast<Instruction>(**UI);
45 bool AllOperandsConstant = true;
46 for (unsigned i = 0, e = Inst.getNumOperands(); i != e; ++i)
47 if (!isa<Constant>(Inst.getOperand(i)) && Inst.getOperand(i) != V) {
48 AllOperandsConstant = false;
49 break;
50 }
51
52 if (AllOperandsConstant) {
53 // We will get to remove this instruction...
54 Reduction += 7;
55
56 // And any other instructions that use it which become constants
57 // themselves.
58 Reduction += CountCodeReductionForConstant(&Inst);
59 }
60 }
61
62 return Reduction;
63}
64
65// CountCodeReductionForAlloca - Figure out an approximation of how much smaller
66// the function will be if it is inlined into a context where an argument
67// becomes an alloca.
68//
69unsigned InlineCostAnalyzer::FunctionInfo::
70 CountCodeReductionForAlloca(Value *V) {
71 if (!isa<PointerType>(V->getType())) return 0; // Not a pointer
72 unsigned Reduction = 0;
73 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
74 Instruction *I = cast<Instruction>(*UI);
75 if (isa<LoadInst>(I) || isa<StoreInst>(I))
76 Reduction += 10;
77 else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
78 // If the GEP has variable indices, we won't be able to do much with it.
79 for (Instruction::op_iterator I = GEP->op_begin()+1, E = GEP->op_end();
80 I != E; ++I)
81 if (!isa<Constant>(*I)) return 0;
82 Reduction += CountCodeReductionForAlloca(GEP)+15;
83 } else {
84 // If there is some other strange instruction, we're not going to be able
85 // to do much if we inline this.
86 return 0;
87 }
88 }
89
90 return Reduction;
91}
92
93/// analyzeFunction - Fill in the current structure with information gleaned
94/// from the specified function.
95void InlineCostAnalyzer::FunctionInfo::analyzeFunction(Function *F) {
Evan Cheng8d84d5b2008-03-24 06:37:48 +000096 unsigned NumInsts = 0, NumBlocks = 0, NumVectorInsts = 0;
Devang Patel6899b312007-07-25 18:00:25 +000097
98 // Look at the size of the callee. Each basic block counts as 20 units, and
Devang Patel161660e2007-09-17 20:07:40 +000099 // each instruction counts as 5.
Devang Patel6899b312007-07-25 18:00:25 +0000100 for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
101 for (BasicBlock::const_iterator II = BB->begin(), E = BB->end();
102 II != E; ++II) {
103 if (isa<DbgInfoIntrinsic>(II)) continue; // Debug intrinsics don't count.
Evan Cheng8d84d5b2008-03-24 06:37:48 +0000104 if (isa<PHINode>(II)) continue; // PHI nodes don't count.
105
Chris Lattner42384532008-07-14 00:32:20 +0000106 if (isa<ExtractElementInst>(II) || isa<VectorType>(II->getType()))
Evan Cheng8d84d5b2008-03-24 06:37:48 +0000107 ++NumVectorInsts;
Devang Patel6899b312007-07-25 18:00:25 +0000108
109 // Noop casts, including ptr <-> int, don't count.
110 if (const CastInst *CI = dyn_cast<CastInst>(II)) {
111 if (CI->isLosslessCast() || isa<IntToPtrInst>(CI) ||
112 isa<PtrToIntInst>(CI))
113 continue;
114 } else if (const GetElementPtrInst *GEPI =
Evan Cheng8d84d5b2008-03-24 06:37:48 +0000115 dyn_cast<GetElementPtrInst>(II)) {
Devang Patel6899b312007-07-25 18:00:25 +0000116 // If a GEP has all constant indices, it will probably be folded with
117 // a load/store.
118 bool AllConstant = true;
119 for (unsigned i = 1, e = GEPI->getNumOperands(); i != e; ++i)
120 if (!isa<ConstantInt>(GEPI->getOperand(i))) {
121 AllConstant = false;
122 break;
123 }
124 if (AllConstant) continue;
125 }
126
127 ++NumInsts;
128 }
129
130 ++NumBlocks;
131 }
132
Evan Cheng8d84d5b2008-03-24 06:37:48 +0000133 this->NumBlocks = NumBlocks;
134 this->NumInsts = NumInsts;
135 this->NumVectorInsts = NumVectorInsts;
Devang Patel6899b312007-07-25 18:00:25 +0000136
137 // Check out all of the arguments to the function, figuring out how much
138 // code can be eliminated if one of the arguments is a constant.
139 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I)
140 ArgumentWeights.push_back(ArgInfo(CountCodeReductionForConstant(I),
141 CountCodeReductionForAlloca(I)));
142}
143
144
145
146// getInlineCost - The heuristic used to determine if we should inline the
147// function call or not.
148//
Evan Cheng71d83742008-03-20 00:20:23 +0000149int InlineCostAnalyzer::getInlineCost(CallSite CS,
150 SmallPtrSet<const Function *, 16> &NeverInline) {
Devang Patel6899b312007-07-25 18:00:25 +0000151 Instruction *TheCall = CS.getInstruction();
152 Function *Callee = CS.getCalledFunction();
153 const Function *Caller = TheCall->getParent()->getParent();
154
155 // Don't inline a directly recursive call.
156 if (Caller == Callee ||
157 // Don't inline functions which can be redefined at link-time to mean
158 // something else. link-once linkage is ok though.
159 Callee->hasWeakLinkage() ||
160
161 // Don't inline functions marked noinline.
162 NeverInline.count(Callee))
163 return 2000000000;
164
165 // InlineCost - This value measures how good of an inline candidate this call
166 // site is to inline. A lower inline cost make is more likely for the call to
167 // be inlined. This value may go negative.
168 //
169 int InlineCost = 0;
170
171 // If there is only one call of the function, and it has internal linkage,
172 // make it almost guaranteed to be inlined.
173 //
174 if (Callee->hasInternalLinkage() && Callee->hasOneUse())
Evan Cheng79328662008-04-24 18:42:47 +0000175 InlineCost -= 15000;
Devang Patel6899b312007-07-25 18:00:25 +0000176
177 // If this function uses the coldcc calling convention, prefer not to inline
178 // it.
179 if (Callee->getCallingConv() == CallingConv::Cold)
180 InlineCost += 2000;
181
182 // If the instruction after the call, or if the normal destination of the
183 // invoke is an unreachable instruction, the function is noreturn. As such,
184 // there is little point in inlining this.
185 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
186 if (isa<UnreachableInst>(II->getNormalDest()->begin()))
187 InlineCost += 10000;
188 } else if (isa<UnreachableInst>(++BasicBlock::iterator(TheCall)))
189 InlineCost += 10000;
190
191 // Get information about the callee...
192 FunctionInfo &CalleeFI = CachedFunctionInfo[Callee];
193
194 // If we haven't calculated this information yet, do so now.
195 if (CalleeFI.NumBlocks == 0)
196 CalleeFI.analyzeFunction(Callee);
197
198 // Add to the inline quality for properties that make the call valuable to
199 // inline. This includes factors that indicate that the result of inlining
200 // the function will be optimizable. Currently this just looks at arguments
201 // passed into the function.
202 //
203 unsigned ArgNo = 0;
204 for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
205 I != E; ++I, ++ArgNo) {
206 // Each argument passed in has a cost at both the caller and the callee
207 // sides. This favors functions that take many arguments over functions
208 // that take few arguments.
209 InlineCost -= 20;
210
211 // If this is a function being passed in, it is very likely that we will be
212 // able to turn an indirect function call into a direct function call.
213 if (isa<Function>(I))
214 InlineCost -= 100;
215
216 // If an alloca is passed in, inlining this function is likely to allow
217 // significant future optimization possibilities (like scalar promotion, and
218 // scalarization), so encourage the inlining of the function.
219 //
220 else if (isa<AllocaInst>(I)) {
221 if (ArgNo < CalleeFI.ArgumentWeights.size())
222 InlineCost -= CalleeFI.ArgumentWeights[ArgNo].AllocaWeight;
223
224 // If this is a constant being passed into the function, use the argument
225 // weights calculated for the callee to determine how much will be folded
226 // away with this information.
227 } else if (isa<Constant>(I)) {
228 if (ArgNo < CalleeFI.ArgumentWeights.size())
229 InlineCost -= CalleeFI.ArgumentWeights[ArgNo].ConstantWeight;
230 }
231 }
232
233 // Now that we have considered all of the factors that make the call site more
234 // likely to be inlined, look at factors that make us not want to inline it.
235
Evan Cheng7c3becd2008-04-01 23:59:29 +0000236 // Don't inline into something too big, which would make it bigger.
Devang Patel6899b312007-07-25 18:00:25 +0000237 //
Evan Cheng79328662008-04-24 18:42:47 +0000238 InlineCost += Caller->size()/15;
Devang Patel6899b312007-07-25 18:00:25 +0000239
Evan Cheng7c3becd2008-04-01 23:59:29 +0000240 // Look at the size of the callee. Each instruction counts as 5.
241 InlineCost += CalleeFI.NumInsts*5;
Evan Cheng8d84d5b2008-03-24 06:37:48 +0000242
Devang Patel6899b312007-07-25 18:00:25 +0000243 return InlineCost;
244}
245
Evan Cheng8d84d5b2008-03-24 06:37:48 +0000246// getInlineFudgeFactor - Return a > 1.0 factor if the inliner should use a
247// higher threshold to determine if the function call should be inlined.
248float InlineCostAnalyzer::getInlineFudgeFactor(CallSite CS) {
249 Function *Callee = CS.getCalledFunction();
250
251 // Get information about the callee...
252 FunctionInfo &CalleeFI = CachedFunctionInfo[Callee];
253
254 // If we haven't calculated this information yet, do so now.
255 if (CalleeFI.NumBlocks == 0)
256 CalleeFI.analyzeFunction(Callee);
257
Evan Cheng7c3becd2008-04-01 23:59:29 +0000258 float Factor = 1.0f;
259 // Single BB functions are often written to be inlined.
260 if (CalleeFI.NumBlocks == 1)
261 Factor += 0.5f;
262
Evan Cheng8d84d5b2008-03-24 06:37:48 +0000263 // Be more aggressive if the function contains a good chunk (if it mades up
264 // at least 10% of the instructions) of vector instructions.
Evan Cheng7c3becd2008-04-01 23:59:29 +0000265 if (CalleeFI.NumVectorInsts > CalleeFI.NumInsts/2)
266 Factor += 2.0f;
267 else if (CalleeFI.NumVectorInsts > CalleeFI.NumInsts/10)
268 Factor += 1.5f;
269 return Factor;
Evan Cheng8d84d5b2008-03-24 06:37:48 +0000270}