blob: acd2b108cea23544db200618542598e97e66429e [file] [log] [blame]
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001//===- InlineFunction.cpp - Code to perform function inlining -------------===//
2//
3// The LLVM Compiler Infrastructure
4//
Chris Lattner081ce942007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This file implements inlining of a function into a call site, resolving
11// parameters and the return value as appropriate.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/Transforms/Utils/Cloning.h"
16#include "llvm/Constants.h"
17#include "llvm/DerivedTypes.h"
18#include "llvm/Module.h"
19#include "llvm/Instructions.h"
20#include "llvm/Intrinsics.h"
Chris Lattner124993a2008-01-11 06:09:30 +000021#include "llvm/ParameterAttributes.h"
Dan Gohmanf17a25c2007-07-18 16:29:46 +000022#include "llvm/Analysis/CallGraph.h"
Chris Lattner124993a2008-01-11 06:09:30 +000023#include "llvm/Target/TargetData.h"
Dan Gohmanf17a25c2007-07-18 16:29:46 +000024#include "llvm/ADT/SmallVector.h"
25#include "llvm/Support/CallSite.h"
26using namespace llvm;
27
28bool llvm::InlineFunction(CallInst *CI, CallGraph *CG, const TargetData *TD) {
29 return InlineFunction(CallSite(CI), CG, TD);
30}
31bool llvm::InlineFunction(InvokeInst *II, CallGraph *CG, const TargetData *TD) {
32 return InlineFunction(CallSite(II), CG, TD);
33}
34
35/// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls
36/// in the body of the inlined function into invokes and turn unwind
37/// instructions into branches to the invoke unwind dest.
38///
39/// II is the invoke instruction begin inlined. FirstNewBlock is the first
40/// block of the inlined code (the last block is the end of the function),
41/// and InlineCodeInfo is information about the code that got inlined.
42static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock,
43 ClonedCodeInfo &InlinedCodeInfo) {
44 BasicBlock *InvokeDest = II->getUnwindDest();
45 std::vector<Value*> InvokeDestPHIValues;
46
47 // If there are PHI nodes in the unwind destination block, we need to
48 // keep track of which values came into them from this invoke, then remove
49 // the entry for this block.
50 BasicBlock *InvokeBlock = II->getParent();
51 for (BasicBlock::iterator I = InvokeDest->begin(); isa<PHINode>(I); ++I) {
52 PHINode *PN = cast<PHINode>(I);
53 // Save the value to use for this edge.
54 InvokeDestPHIValues.push_back(PN->getIncomingValueForBlock(InvokeBlock));
55 }
56
57 Function *Caller = FirstNewBlock->getParent();
58
59 // The inlined code is currently at the end of the function, scan from the
60 // start of the inlined code to its end, checking for stuff we need to
61 // rewrite.
62 if (InlinedCodeInfo.ContainsCalls || InlinedCodeInfo.ContainsUnwinds) {
63 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
64 BB != E; ++BB) {
65 if (InlinedCodeInfo.ContainsCalls) {
66 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ){
67 Instruction *I = BBI++;
68
69 // We only need to check for function calls: inlined invoke
70 // instructions require no special handling.
71 if (!isa<CallInst>(I)) continue;
72 CallInst *CI = cast<CallInst>(I);
73
Duncan Sands1c5526c2007-12-17 18:08:19 +000074 // If this call cannot unwind, don't convert it to an invoke.
Duncan Sands7dc19d42007-12-18 09:59:50 +000075 if (CI->doesNotThrow())
Dan Gohmanf17a25c2007-07-18 16:29:46 +000076 continue;
Duncan Sands79d28872007-12-03 20:06:50 +000077
Dan Gohmanf17a25c2007-07-18 16:29:46 +000078 // Convert this function call into an invoke instruction.
79 // First, split the basic block.
80 BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");
81
82 // Next, create the new invoke instruction, inserting it at the end
83 // of the old basic block.
84 SmallVector<Value*, 8> InvokeArgs(CI->op_begin()+1, CI->op_end());
85 InvokeInst *II =
86 new InvokeInst(CI->getCalledValue(), Split, InvokeDest,
David Greene8278ef52007-08-27 19:04:21 +000087 InvokeArgs.begin(), InvokeArgs.end(),
Dan Gohmanf17a25c2007-07-18 16:29:46 +000088 CI->getName(), BB->getTerminator());
89 II->setCallingConv(CI->getCallingConv());
Duncan Sandsf5588dc2007-11-27 13:23:08 +000090 II->setParamAttrs(CI->getParamAttrs());
Dan Gohmanf17a25c2007-07-18 16:29:46 +000091
92 // Make sure that anything using the call now uses the invoke!
93 CI->replaceAllUsesWith(II);
94
95 // Delete the unconditional branch inserted by splitBasicBlock
96 BB->getInstList().pop_back();
97 Split->getInstList().pop_front(); // Delete the original call
98
99 // Update any PHI nodes in the exceptional block to indicate that
100 // there is now a new entry in them.
101 unsigned i = 0;
102 for (BasicBlock::iterator I = InvokeDest->begin();
103 isa<PHINode>(I); ++I, ++i) {
104 PHINode *PN = cast<PHINode>(I);
105 PN->addIncoming(InvokeDestPHIValues[i], BB);
106 }
107
108 // This basic block is now complete, start scanning the next one.
109 break;
110 }
111 }
112
113 if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
114 // An UnwindInst requires special handling when it gets inlined into an
115 // invoke site. Once this happens, we know that the unwind would cause
116 // a control transfer to the invoke exception destination, so we can
117 // transform it into a direct branch to the exception destination.
118 new BranchInst(InvokeDest, UI);
119
120 // Delete the unwind instruction!
121 UI->getParent()->getInstList().pop_back();
122
123 // Update any PHI nodes in the exceptional block to indicate that
124 // there is now a new entry in them.
125 unsigned i = 0;
126 for (BasicBlock::iterator I = InvokeDest->begin();
127 isa<PHINode>(I); ++I, ++i) {
128 PHINode *PN = cast<PHINode>(I);
129 PN->addIncoming(InvokeDestPHIValues[i], BB);
130 }
131 }
132 }
133 }
134
135 // Now that everything is happy, we have one final detail. The PHI nodes in
136 // the exception destination block still have entries due to the original
137 // invoke instruction. Eliminate these entries (which might even delete the
138 // PHI node) now.
139 InvokeDest->removePredecessor(II->getParent());
140}
141
142/// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee
143/// into the caller, update the specified callgraph to reflect the changes we
144/// made. Note that it's possible that not all code was copied over, so only
145/// some edges of the callgraph will be remain.
146static void UpdateCallGraphAfterInlining(const Function *Caller,
147 const Function *Callee,
148 Function::iterator FirstNewBlock,
149 DenseMap<const Value*, Value*> &ValueMap,
150 CallGraph &CG) {
151 // Update the call graph by deleting the edge from Callee to Caller
152 CallGraphNode *CalleeNode = CG[Callee];
153 CallGraphNode *CallerNode = CG[Caller];
154 CallerNode->removeCallEdgeTo(CalleeNode);
155
156 // Since we inlined some uninlined call sites in the callee into the caller,
157 // add edges from the caller to all of the callees of the callee.
158 for (CallGraphNode::iterator I = CalleeNode->begin(),
159 E = CalleeNode->end(); I != E; ++I) {
160 const Instruction *OrigCall = I->first.getInstruction();
161
162 DenseMap<const Value*, Value*>::iterator VMI = ValueMap.find(OrigCall);
163 // Only copy the edge if the call was inlined!
164 if (VMI != ValueMap.end() && VMI->second) {
165 // If the call was inlined, but then constant folded, there is no edge to
166 // add. Check for this case.
167 if (Instruction *NewCall = dyn_cast<Instruction>(VMI->second))
168 CallerNode->addCalledFunction(CallSite::get(NewCall), I->second);
169 }
170 }
171}
172
173
174// InlineFunction - This function inlines the called function into the basic
175// block of the caller. This returns false if it is not possible to inline this
176// call. The program is still in a well defined state if this occurs though.
177//
178// Note that this only does one level of inlining. For example, if the
179// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
180// exists in the instruction stream. Similiarly this will inline a recursive
181// function by one level.
182//
183bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD) {
184 Instruction *TheCall = CS.getInstruction();
185 assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
186 "Instruction not in function!");
187
188 const Function *CalledFunc = CS.getCalledFunction();
189 if (CalledFunc == 0 || // Can't inline external function or indirect
190 CalledFunc->isDeclaration() || // call, or call to a vararg function!
191 CalledFunc->getFunctionType()->isVarArg()) return false;
192
193
194 // If the call to the callee is a non-tail call, we must clear the 'tail'
195 // flags on any calls that we inline.
196 bool MustClearTailCallFlags =
197 isa<CallInst>(TheCall) && !cast<CallInst>(TheCall)->isTailCall();
198
Duncan Sands2937e352007-12-19 21:13:37 +0000199 // If the call to the callee cannot throw, set the 'nounwind' flag on any
200 // calls that we inline.
201 bool MarkNoUnwind = CS.doesNotThrow();
202
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000203 BasicBlock *OrigBB = TheCall->getParent();
204 Function *Caller = OrigBB->getParent();
205
Gordon Henriksena86e9192007-12-25 03:10:07 +0000206 // GC poses two hazards to inlining, which only occur when the callee has GC:
207 // 1. If the caller has no GC, then the callee's GC must be propagated to the
208 // caller.
209 // 2. If the caller has a differing GC, it is invalid to inline.
210 if (CalledFunc->hasCollector()) {
211 if (!Caller->hasCollector())
212 Caller->setCollector(CalledFunc->getCollector());
213 else if (CalledFunc->getCollector() != Caller->getCollector())
214 return false;
215 }
216
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000217 // Get an iterator to the last basic block in the function, which will have
218 // the new function inlined after it.
219 //
220 Function::iterator LastBlock = &Caller->back();
221
222 // Make sure to capture all of the return instructions from the cloned
223 // function.
224 std::vector<ReturnInst*> Returns;
225 ClonedCodeInfo InlinedFunctionInfo;
226 Function::iterator FirstNewBlock;
Duncan Sands2937e352007-12-19 21:13:37 +0000227
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000228 { // Scope to destroy ValueMap after cloning.
229 DenseMap<const Value*, Value*> ValueMap;
230
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000231 assert(std::distance(CalledFunc->arg_begin(), CalledFunc->arg_end()) ==
232 std::distance(CS.arg_begin(), CS.arg_end()) &&
233 "No varargs calls can be inlined!");
Chris Lattner124993a2008-01-11 06:09:30 +0000234
235 // Calculate the vector of arguments to pass into the function cloner, which
236 // matches up the formal to the actual argument values.
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000237 CallSite::arg_iterator AI = CS.arg_begin();
Chris Lattner124993a2008-01-11 06:09:30 +0000238 unsigned ArgNo = 0;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000239 for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
Chris Lattner124993a2008-01-11 06:09:30 +0000240 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
241 Value *ActualArg = *AI;
242
Duncan Sandsdb1b10a2008-01-26 06:41:49 +0000243 // When byval arguments are inlined, we need to make the copy implied
244 // by them explicit. It is tempting to think that this is not needed if
245 // the callee is readonly, because the callee doesn't modify the struct.
246 // However this would be wrong: readonly means that any writes the callee
247 // performs are not visible to the caller. But writes by the callee to
248 // an argument passed byval are by definition not visible to the caller!
249 // Since we allow this kind of readonly function, there needs to be an
250 // explicit copy in order to keep the writes invisible after inlining.
251 if (CalledFunc->paramHasAttr(ArgNo+1, ParamAttr::ByVal)) {
Chris Lattner124993a2008-01-11 06:09:30 +0000252 const Type *AggTy = cast<PointerType>(I->getType())->getElementType();
253 const Type *VoidPtrTy = PointerType::getUnqual(Type::Int8Ty);
254
255 // Create the alloca. If we have TargetData, use nice alignment.
256 unsigned Align = 1;
257 if (TD) Align = TD->getPrefTypeAlignment(AggTy);
258 Value *NewAlloca = new AllocaInst(AggTy, 0, Align, I->getName(),
259 Caller->begin()->begin());
260 // Emit a memcpy.
261 Function *MemCpyFn = Intrinsic::getDeclaration(Caller->getParent(),
262 Intrinsic::memcpy_i64);
263 Value *DestCast = new BitCastInst(NewAlloca, VoidPtrTy, "tmp", TheCall);
264 Value *SrcCast = new BitCastInst(*AI, VoidPtrTy, "tmp", TheCall);
265
266 Value *Size;
267 if (TD == 0)
268 Size = ConstantExpr::getSizeOf(AggTy);
269 else
270 Size = ConstantInt::get(Type::Int64Ty, TD->getTypeStoreSize(AggTy));
271
272 // Always generate a memcpy of alignment 1 here because we don't know
273 // the alignment of the src pointer. Other optimizations can infer
274 // better alignment.
275 Value *CallArgs[] = {
276 DestCast, SrcCast, Size, ConstantInt::get(Type::Int32Ty, 1)
277 };
278 CallInst *TheMemCpy =
279 new CallInst(MemCpyFn, CallArgs, CallArgs+4, "", TheCall);
280
281 // If we have a call graph, update it.
282 if (CG) {
283 CallGraphNode *MemCpyCGN = CG->getOrInsertFunction(MemCpyFn);
284 CallGraphNode *CallerNode = (*CG)[Caller];
285 CallerNode->addCalledFunction(TheMemCpy, MemCpyCGN);
286 }
287
288 // Uses of the argument in the function should use our new alloca
289 // instead.
290 ActualArg = NewAlloca;
291 }
292
293 ValueMap[I] = ActualArg;
294 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000295
296 // We want the inliner to prune the code as it copies. We would LOVE to
297 // have no dead or constant instructions leftover after inlining occurs
298 // (which can happen, e.g., because an argument was constant), but we'll be
299 // happy with whatever the cloner can do.
300 CloneAndPruneFunctionInto(Caller, CalledFunc, ValueMap, Returns, ".i",
301 &InlinedFunctionInfo, TD);
302
303 // Remember the first block that is newly cloned over.
304 FirstNewBlock = LastBlock; ++FirstNewBlock;
305
306 // Update the callgraph if requested.
307 if (CG)
308 UpdateCallGraphAfterInlining(Caller, CalledFunc, FirstNewBlock, ValueMap,
309 *CG);
310 }
311
312 // If there are any alloca instructions in the block that used to be the entry
313 // block for the callee, move them to the entry block of the caller. First
314 // calculate which instruction they should be inserted before. We insert the
315 // instructions at the end of the current alloca list.
316 //
317 {
318 BasicBlock::iterator InsertPoint = Caller->begin()->begin();
319 for (BasicBlock::iterator I = FirstNewBlock->begin(),
320 E = FirstNewBlock->end(); I != E; )
321 if (AllocaInst *AI = dyn_cast<AllocaInst>(I++)) {
322 // If the alloca is now dead, remove it. This often occurs due to code
323 // specialization.
324 if (AI->use_empty()) {
325 AI->eraseFromParent();
326 continue;
327 }
328
329 if (isa<Constant>(AI->getArraySize())) {
330 // Scan for the block of allocas that we can move over, and move them
331 // all at once.
332 while (isa<AllocaInst>(I) &&
333 isa<Constant>(cast<AllocaInst>(I)->getArraySize()))
334 ++I;
335
336 // Transfer all of the allocas over in a block. Using splice means
337 // that the instructions aren't removed from the symbol table, then
338 // reinserted.
339 Caller->getEntryBlock().getInstList().splice(
340 InsertPoint,
341 FirstNewBlock->getInstList(),
342 AI, I);
343 }
344 }
345 }
346
347 // If the inlined code contained dynamic alloca instructions, wrap the inlined
348 // code with llvm.stacksave/llvm.stackrestore intrinsics.
349 if (InlinedFunctionInfo.ContainsDynamicAllocas) {
350 Module *M = Caller->getParent();
Christopher Lambbb2f2222007-12-17 01:12:55 +0000351 const Type *BytePtr = PointerType::getUnqual(Type::Int8Ty);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000352 // Get the two intrinsics we care about.
353 Constant *StackSave, *StackRestore;
354 StackSave = M->getOrInsertFunction("llvm.stacksave", BytePtr, NULL);
355 StackRestore = M->getOrInsertFunction("llvm.stackrestore", Type::VoidTy,
356 BytePtr, NULL);
357
358 // If we are preserving the callgraph, add edges to the stacksave/restore
359 // functions for the calls we insert.
360 CallGraphNode *StackSaveCGN = 0, *StackRestoreCGN = 0, *CallerNode = 0;
361 if (CG) {
362 // We know that StackSave/StackRestore are Function*'s, because they are
363 // intrinsics which must have the right types.
364 StackSaveCGN = CG->getOrInsertFunction(cast<Function>(StackSave));
365 StackRestoreCGN = CG->getOrInsertFunction(cast<Function>(StackRestore));
366 CallerNode = (*CG)[Caller];
367 }
368
369 // Insert the llvm.stacksave.
370 CallInst *SavedPtr = new CallInst(StackSave, "savedstack",
371 FirstNewBlock->begin());
372 if (CG) CallerNode->addCalledFunction(SavedPtr, StackSaveCGN);
373
374 // Insert a call to llvm.stackrestore before any return instructions in the
375 // inlined function.
376 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
377 CallInst *CI = new CallInst(StackRestore, SavedPtr, "", Returns[i]);
378 if (CG) CallerNode->addCalledFunction(CI, StackRestoreCGN);
379 }
380
381 // Count the number of StackRestore calls we insert.
382 unsigned NumStackRestores = Returns.size();
383
384 // If we are inlining an invoke instruction, insert restores before each
385 // unwind. These unwinds will be rewritten into branches later.
386 if (InlinedFunctionInfo.ContainsUnwinds && isa<InvokeInst>(TheCall)) {
387 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
388 BB != E; ++BB)
389 if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
390 new CallInst(StackRestore, SavedPtr, "", UI);
391 ++NumStackRestores;
392 }
393 }
394 }
395
396 // If we are inlining tail call instruction through a call site that isn't
397 // marked 'tail', we must remove the tail marker for any calls in the inlined
Duncan Sands2937e352007-12-19 21:13:37 +0000398 // code. Also, calls inlined through a 'nounwind' call site should be marked
399 // 'nounwind'.
400 if (InlinedFunctionInfo.ContainsCalls &&
401 (MustClearTailCallFlags || MarkNoUnwind)) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000402 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
403 BB != E; ++BB)
404 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
Duncan Sands2937e352007-12-19 21:13:37 +0000405 if (CallInst *CI = dyn_cast<CallInst>(I)) {
406 if (MustClearTailCallFlags)
407 CI->setTailCall(false);
408 if (MarkNoUnwind)
409 CI->setDoesNotThrow();
410 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000411 }
412
Duncan Sands2937e352007-12-19 21:13:37 +0000413 // If we are inlining through a 'nounwind' call site then any inlined 'unwind'
414 // instructions are unreachable.
415 if (InlinedFunctionInfo.ContainsUnwinds && MarkNoUnwind)
416 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
417 BB != E; ++BB) {
418 TerminatorInst *Term = BB->getTerminator();
419 if (isa<UnwindInst>(Term)) {
420 new UnreachableInst(Term);
421 BB->getInstList().erase(Term);
422 }
423 }
424
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000425 // If we are inlining for an invoke instruction, we must make sure to rewrite
426 // any inlined 'unwind' instructions into branches to the invoke exception
427 // destination, and call instructions into invoke instructions.
428 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
429 HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo);
430
431 // If we cloned in _exactly one_ basic block, and if that block ends in a
432 // return instruction, we splice the body of the inlined callee directly into
433 // the calling basic block.
434 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
435 // Move all of the instructions right before the call.
436 OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(),
437 FirstNewBlock->begin(), FirstNewBlock->end());
438 // Remove the cloned basic block.
439 Caller->getBasicBlockList().pop_back();
440
441 // If the call site was an invoke instruction, add a branch to the normal
442 // destination.
443 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
444 new BranchInst(II->getNormalDest(), TheCall);
445
446 // If the return instruction returned a value, replace uses of the call with
447 // uses of the returned value.
448 if (!TheCall->use_empty())
449 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
450
451 // Since we are now done with the Call/Invoke, we can delete it.
452 TheCall->getParent()->getInstList().erase(TheCall);
453
454 // Since we are now done with the return instruction, delete it also.
455 Returns[0]->getParent()->getInstList().erase(Returns[0]);
456
457 // We are now done with the inlining.
458 return true;
459 }
460
461 // Otherwise, we have the normal case, of more than one block to inline or
462 // multiple return sites.
463
464 // We want to clone the entire callee function into the hole between the
465 // "starter" and "ender" blocks. How we accomplish this depends on whether
466 // this is an invoke instruction or a call instruction.
467 BasicBlock *AfterCallBB;
468 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
469
470 // Add an unconditional branch to make this look like the CallInst case...
471 BranchInst *NewBr = new BranchInst(II->getNormalDest(), TheCall);
472
473 // Split the basic block. This guarantees that no PHI nodes will have to be
474 // updated due to new incoming edges, and make the invoke case more
475 // symmetric to the call case.
476 AfterCallBB = OrigBB->splitBasicBlock(NewBr,
477 CalledFunc->getName()+".exit");
478
479 } else { // It's a call
480 // If this is a call instruction, we need to split the basic block that
481 // the call lives in.
482 //
483 AfterCallBB = OrigBB->splitBasicBlock(TheCall,
484 CalledFunc->getName()+".exit");
485 }
486
487 // Change the branch that used to go to AfterCallBB to branch to the first
488 // basic block of the inlined function.
489 //
490 TerminatorInst *Br = OrigBB->getTerminator();
491 assert(Br && Br->getOpcode() == Instruction::Br &&
492 "splitBasicBlock broken!");
493 Br->setOperand(0, FirstNewBlock);
494
495
496 // Now that the function is correct, make it a little bit nicer. In
497 // particular, move the basic blocks inserted from the end of the function
498 // into the space made by splitting the source basic block.
499 //
500 Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(),
501 FirstNewBlock, Caller->end());
502
503 // Handle all of the return instructions that we just cloned in, and eliminate
504 // any users of the original call/invoke instruction.
505 if (Returns.size() > 1) {
506 // The PHI node should go at the front of the new basic block to merge all
507 // possible incoming values.
508 //
509 PHINode *PHI = 0;
510 if (!TheCall->use_empty()) {
511 PHI = new PHINode(CalledFunc->getReturnType(),
512 TheCall->getName(), AfterCallBB->begin());
513
514 // Anything that used the result of the function call should now use the
515 // PHI node as their operand.
516 //
517 TheCall->replaceAllUsesWith(PHI);
518 }
519
520 // Loop over all of the return instructions, turning them into unconditional
521 // branches to the merge point now, and adding entries to the PHI node as
522 // appropriate.
523 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
524 ReturnInst *RI = Returns[i];
525
526 if (PHI) {
527 assert(RI->getReturnValue() && "Ret should have value!");
528 assert(RI->getReturnValue()->getType() == PHI->getType() &&
529 "Ret value not consistent in function!");
530 PHI->addIncoming(RI->getReturnValue(), RI->getParent());
531 }
532
533 // Add a branch to the merge point where the PHI node lives if it exists.
534 new BranchInst(AfterCallBB, RI);
535
536 // Delete the return instruction now
537 RI->getParent()->getInstList().erase(RI);
538 }
539
540 } else if (!Returns.empty()) {
541 // Otherwise, if there is exactly one return value, just replace anything
542 // using the return value of the call with the computed value.
543 if (!TheCall->use_empty())
544 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
545
546 // Splice the code from the return block into the block that it will return
547 // to, which contains the code that was after the call.
548 BasicBlock *ReturnBB = Returns[0]->getParent();
549 AfterCallBB->getInstList().splice(AfterCallBB->begin(),
550 ReturnBB->getInstList());
551
552 // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
553 ReturnBB->replaceAllUsesWith(AfterCallBB);
554
555 // Delete the return instruction now and empty ReturnBB now.
556 Returns[0]->eraseFromParent();
557 ReturnBB->eraseFromParent();
558 } else if (!TheCall->use_empty()) {
559 // No returns, but something is using the return value of the call. Just
560 // nuke the result.
561 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
562 }
563
564 // Since we are now done with the Call/Invoke, we can delete it.
565 TheCall->eraseFromParent();
566
567 // We should always be able to fold the entry block of the function into the
568 // single predecessor of the block...
569 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
570 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
571
572 // Splice the code entry block into calling block, right before the
573 // unconditional branch.
574 OrigBB->getInstList().splice(Br, CalleeEntry->getInstList());
575 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
576
577 // Remove the unconditional branch.
578 OrigBB->getInstList().erase(Br);
579
580 // Now we can remove the CalleeEntry block, which is now empty.
581 Caller->getBasicBlockList().erase(CalleeEntry);
582
583 return true;
584}