blob: 2157dcd2fec8a565e1a34f3002a8aeb07c502374 [file] [log] [blame]
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001//===- InlineSimple.cpp - Code to perform simple function inlining --------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file was developed by the LLVM research group and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements bottom-up inlining of functions into callees.
11//
12//===----------------------------------------------------------------------===//
13
14#define DEBUG_TYPE "inline"
15#include "llvm/CallingConv.h"
16#include "llvm/Instructions.h"
17#include "llvm/IntrinsicInst.h"
18#include "llvm/Module.h"
19#include "llvm/Type.h"
20#include "llvm/Analysis/CallGraph.h"
21#include "llvm/Support/CallSite.h"
22#include "llvm/Support/Compiler.h"
23#include "llvm/Transforms/IPO.h"
24#include "llvm/Transforms/IPO/InlinerPass.h"
25#include <set>
26
27using namespace llvm;
28
29namespace {
30 struct VISIBILITY_HIDDEN ArgInfo {
31 unsigned ConstantWeight;
32 unsigned AllocaWeight;
33
34 ArgInfo(unsigned CWeight, unsigned AWeight)
35 : ConstantWeight(CWeight), AllocaWeight(AWeight) {}
36 };
37
38 // FunctionInfo - For each function, calculate the size of it in blocks and
39 // instructions.
40 struct VISIBILITY_HIDDEN FunctionInfo {
41 // NumInsts, NumBlocks - Keep track of how large each function is, which is
42 // used to estimate the code size cost of inlining it.
43 unsigned NumInsts, NumBlocks;
44
45 // ArgumentWeights - Each formal argument of the function is inspected to
46 // see if it is used in any contexts where making it a constant or alloca
47 // would reduce the code size. If so, we add some value to the argument
48 // entry here.
49 std::vector<ArgInfo> ArgumentWeights;
50
51 FunctionInfo() : NumInsts(0), NumBlocks(0) {}
52
53 /// analyzeFunction - Fill in the current structure with information gleaned
54 /// from the specified function.
55 void analyzeFunction(Function *F);
56 };
57
58 class VISIBILITY_HIDDEN SimpleInliner : public Inliner {
59 std::map<const Function*, FunctionInfo> CachedFunctionInfo;
60 std::set<const Function*> NeverInline; // Functions that are never inlined
61 public:
62 SimpleInliner() : Inliner(&ID) {}
63 static char ID; // Pass identification, replacement for typeid
64 int getInlineCost(CallSite CS);
65 virtual bool doInitialization(CallGraph &CG);
66 };
67 char SimpleInliner::ID = 0;
68 RegisterPass<SimpleInliner> X("inline", "Function Integration/Inlining");
69}
70
71Pass *llvm::createFunctionInliningPass() { return new SimpleInliner(); }
72
73// CountCodeReductionForConstant - Figure out an approximation for how many
74// instructions will be constant folded if the specified value is constant.
75//
76static unsigned CountCodeReductionForConstant(Value *V) {
77 unsigned Reduction = 0;
78 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ++UI)
79 if (isa<BranchInst>(*UI))
80 Reduction += 40; // Eliminating a conditional branch is a big win
81 else if (SwitchInst *SI = dyn_cast<SwitchInst>(*UI))
82 // Eliminating a switch is a big win, proportional to the number of edges
83 // deleted.
84 Reduction += (SI->getNumSuccessors()-1) * 40;
85 else if (CallInst *CI = dyn_cast<CallInst>(*UI)) {
86 // Turning an indirect call into a direct call is a BIG win
87 Reduction += CI->getCalledValue() == V ? 500 : 0;
88 } else if (InvokeInst *II = dyn_cast<InvokeInst>(*UI)) {
89 // Turning an indirect call into a direct call is a BIG win
90 Reduction += II->getCalledValue() == V ? 500 : 0;
91 } else {
92 // Figure out if this instruction will be removed due to simple constant
93 // propagation.
94 Instruction &Inst = cast<Instruction>(**UI);
95 bool AllOperandsConstant = true;
96 for (unsigned i = 0, e = Inst.getNumOperands(); i != e; ++i)
97 if (!isa<Constant>(Inst.getOperand(i)) && Inst.getOperand(i) != V) {
98 AllOperandsConstant = false;
99 break;
100 }
101
102 if (AllOperandsConstant) {
103 // We will get to remove this instruction...
104 Reduction += 7;
105
106 // And any other instructions that use it which become constants
107 // themselves.
108 Reduction += CountCodeReductionForConstant(&Inst);
109 }
110 }
111
112 return Reduction;
113}
114
115// CountCodeReductionForAlloca - Figure out an approximation of how much smaller
116// the function will be if it is inlined into a context where an argument
117// becomes an alloca.
118//
119static unsigned CountCodeReductionForAlloca(Value *V) {
120 if (!isa<PointerType>(V->getType())) return 0; // Not a pointer
121 unsigned Reduction = 0;
122 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
123 Instruction *I = cast<Instruction>(*UI);
124 if (isa<LoadInst>(I) || isa<StoreInst>(I))
125 Reduction += 10;
126 else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
127 // If the GEP has variable indices, we won't be able to do much with it.
128 for (Instruction::op_iterator I = GEP->op_begin()+1, E = GEP->op_end();
129 I != E; ++I)
130 if (!isa<Constant>(*I)) return 0;
131 Reduction += CountCodeReductionForAlloca(GEP)+15;
132 } else {
133 // If there is some other strange instruction, we're not going to be able
134 // to do much if we inline this.
135 return 0;
136 }
137 }
138
139 return Reduction;
140}
141
142/// analyzeFunction - Fill in the current structure with information gleaned
143/// from the specified function.
144void FunctionInfo::analyzeFunction(Function *F) {
145 unsigned NumInsts = 0, NumBlocks = 0;
146
147 // Look at the size of the callee. Each basic block counts as 20 units, and
148 // each instruction counts as 10.
149 for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
150 for (BasicBlock::const_iterator II = BB->begin(), E = BB->end();
151 II != E; ++II) {
152 if (isa<DbgInfoIntrinsic>(II)) continue; // Debug intrinsics don't count.
153
154 // Noop casts, including ptr <-> int, don't count.
155 if (const CastInst *CI = dyn_cast<CastInst>(II)) {
156 if (CI->isLosslessCast() || isa<IntToPtrInst>(CI) ||
157 isa<PtrToIntInst>(CI))
158 continue;
159 } else if (const GetElementPtrInst *GEPI =
160 dyn_cast<GetElementPtrInst>(II)) {
161 // If a GEP has all constant indices, it will probably be folded with
162 // a load/store.
163 bool AllConstant = true;
164 for (unsigned i = 1, e = GEPI->getNumOperands(); i != e; ++i)
165 if (!isa<ConstantInt>(GEPI->getOperand(i))) {
166 AllConstant = false;
167 break;
168 }
169 if (AllConstant) continue;
170 }
171
172 ++NumInsts;
173 }
174
175 ++NumBlocks;
176 }
177
178 this->NumBlocks = NumBlocks;
179 this->NumInsts = NumInsts;
180
181 // Check out all of the arguments to the function, figuring out how much
182 // code can be eliminated if one of the arguments is a constant.
183 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I)
184 ArgumentWeights.push_back(ArgInfo(CountCodeReductionForConstant(I),
185 CountCodeReductionForAlloca(I)));
186}
187
188
189// getInlineCost - The heuristic used to determine if we should inline the
190// function call or not.
191//
192int SimpleInliner::getInlineCost(CallSite CS) {
193 Instruction *TheCall = CS.getInstruction();
194 Function *Callee = CS.getCalledFunction();
195 const Function *Caller = TheCall->getParent()->getParent();
196
197 // Don't inline a directly recursive call.
198 if (Caller == Callee ||
199 // Don't inline functions which can be redefined at link-time to mean
200 // something else. link-once linkage is ok though.
201 Callee->hasWeakLinkage() ||
202
203 // Don't inline functions marked noinline.
204 NeverInline.count(Callee))
205 return 2000000000;
206
207 // InlineCost - This value measures how good of an inline candidate this call
208 // site is to inline. A lower inline cost make is more likely for the call to
209 // be inlined. This value may go negative.
210 //
211 int InlineCost = 0;
212
213 // If there is only one call of the function, and it has internal linkage,
214 // make it almost guaranteed to be inlined.
215 //
216 if (Callee->hasInternalLinkage() && Callee->hasOneUse())
217 InlineCost -= 30000;
218
219 // If this function uses the coldcc calling convention, prefer not to inline
220 // it.
221 if (Callee->getCallingConv() == CallingConv::Cold)
222 InlineCost += 2000;
223
224 // If the instruction after the call, or if the normal destination of the
225 // invoke is an unreachable instruction, the function is noreturn. As such,
226 // there is little point in inlining this.
227 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
228 if (isa<UnreachableInst>(II->getNormalDest()->begin()))
229 InlineCost += 10000;
230 } else if (isa<UnreachableInst>(++BasicBlock::iterator(TheCall)))
231 InlineCost += 10000;
232
233 // Get information about the callee...
234 FunctionInfo &CalleeFI = CachedFunctionInfo[Callee];
235
236 // If we haven't calculated this information yet, do so now.
237 if (CalleeFI.NumBlocks == 0)
238 CalleeFI.analyzeFunction(Callee);
239
240 // Add to the inline quality for properties that make the call valuable to
241 // inline. This includes factors that indicate that the result of inlining
242 // the function will be optimizable. Currently this just looks at arguments
243 // passed into the function.
244 //
245 unsigned ArgNo = 0;
246 for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
247 I != E; ++I, ++ArgNo) {
248 // Each argument passed in has a cost at both the caller and the callee
249 // sides. This favors functions that take many arguments over functions
250 // that take few arguments.
251 InlineCost -= 20;
252
253 // If this is a function being passed in, it is very likely that we will be
254 // able to turn an indirect function call into a direct function call.
255 if (isa<Function>(I))
256 InlineCost -= 100;
257
258 // If an alloca is passed in, inlining this function is likely to allow
259 // significant future optimization possibilities (like scalar promotion, and
260 // scalarization), so encourage the inlining of the function.
261 //
262 else if (isa<AllocaInst>(I)) {
263 if (ArgNo < CalleeFI.ArgumentWeights.size())
264 InlineCost -= CalleeFI.ArgumentWeights[ArgNo].AllocaWeight;
265
266 // If this is a constant being passed into the function, use the argument
267 // weights calculated for the callee to determine how much will be folded
268 // away with this information.
269 } else if (isa<Constant>(I)) {
270 if (ArgNo < CalleeFI.ArgumentWeights.size())
271 InlineCost -= CalleeFI.ArgumentWeights[ArgNo].ConstantWeight;
272 }
273 }
274
275 // Now that we have considered all of the factors that make the call site more
276 // likely to be inlined, look at factors that make us not want to inline it.
277
278 // Don't inline into something too big, which would make it bigger. Here, we
279 // count each basic block as a single unit.
280 //
281 InlineCost += Caller->size()/20;
282
283
284 // Look at the size of the callee. Each basic block counts as 20 units, and
285 // each instruction counts as 5.
286 InlineCost += CalleeFI.NumInsts*5 + CalleeFI.NumBlocks*20;
287 return InlineCost;
288}
289
290// doInitialization - Initializes the vector of functions that have been
291// annotated with the noinline attribute.
292bool SimpleInliner::doInitialization(CallGraph &CG) {
293
294 Module &M = CG.getModule();
295
296 // Get llvm.noinline
297 GlobalVariable *GV = M.getNamedGlobal("llvm.noinline");
298
299 if (GV == 0)
300 return false;
301
302 const ConstantArray *InitList = dyn_cast<ConstantArray>(GV->getInitializer());
303
304 if (InitList == 0)
305 return false;
306
307 // Iterate over each element and add to the NeverInline set
308 for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i) {
309
310 // Get Source
311 const Constant *Elt = InitList->getOperand(i);
312
313 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(Elt))
314 if (CE->getOpcode() == Instruction::BitCast)
315 Elt = CE->getOperand(0);
316
317 // Insert into set of functions to never inline
318 if (const Function *F = dyn_cast<Function>(Elt))
319 NeverInline.insert(F);
320 }
321
322 return false;
323}