blob: abf7f5ead0792db777c5d9301f041c6d5e9ededb [file] [log] [blame]
Chris Lattner530d4bf2003-05-29 15:11:31 +00001//===- InlineFunction.cpp - Code to perform function inlining -------------===//
Misha Brukmanb1c93172005-04-21 23:48:37 +00002//
John Criswell482202a2003-10-20 19:43:21 +00003// The LLVM Compiler Infrastructure
4//
Chris Lattnerf3ebc3f2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Misha Brukmanb1c93172005-04-21 23:48:37 +00007//
John Criswell482202a2003-10-20 19:43:21 +00008//===----------------------------------------------------------------------===//
Chris Lattner530d4bf2003-05-29 15:11:31 +00009//
10// This file implements inlining of a function into a call site, resolving
11// parameters and the return value as appropriate.
12//
Chris Lattner530d4bf2003-05-29 15:11:31 +000013//===----------------------------------------------------------------------===//
14
15#include "llvm/Transforms/Utils/Cloning.h"
Hal Finkel94146652014-07-24 14:25:39 +000016#include "llvm/ADT/SmallSet.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000017#include "llvm/ADT/SmallVector.h"
Hal Finkel94146652014-07-24 14:25:39 +000018#include "llvm/ADT/SetVector.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000019#include "llvm/ADT/StringExtras.h"
Hal Finkelff0bcb62014-07-25 15:50:08 +000020#include "llvm/Analysis/AliasAnalysis.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000021#include "llvm/Analysis/CallGraph.h"
Hal Finkelff0bcb62014-07-25 15:50:08 +000022#include "llvm/Analysis/CaptureTracking.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000023#include "llvm/Analysis/InstructionSimplify.h"
Hal Finkel94146652014-07-24 14:25:39 +000024#include "llvm/Analysis/ValueTracking.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000025#include "llvm/IR/Attributes.h"
Chandler Carruth219b89b2014-03-04 11:01:28 +000026#include "llvm/IR/CallSite.h"
Reid Klecknerf0915aa2014-05-15 20:11:28 +000027#include "llvm/IR/CFG.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000028#include "llvm/IR/Constants.h"
29#include "llvm/IR/DataLayout.h"
Chandler Carruth9a4c9e52014-03-06 00:46:21 +000030#include "llvm/IR/DebugInfo.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000031#include "llvm/IR/DerivedTypes.h"
Hal Finkelff0bcb62014-07-25 15:50:08 +000032#include "llvm/IR/Dominators.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000033#include "llvm/IR/IRBuilder.h"
34#include "llvm/IR/Instructions.h"
35#include "llvm/IR/IntrinsicInst.h"
36#include "llvm/IR/Intrinsics.h"
Hal Finkel94146652014-07-24 14:25:39 +000037#include "llvm/IR/MDBuilder.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000038#include "llvm/IR/Module.h"
Chandler Carruthaafe0912012-06-29 12:38:19 +000039#include "llvm/Transforms/Utils/Local.h"
Hal Finkelff0bcb62014-07-25 15:50:08 +000040#include "llvm/Support/CommandLine.h"
41#include <algorithm>
Chris Lattnerdf3c3422004-01-09 06:12:26 +000042using namespace llvm;
Chris Lattner530d4bf2003-05-29 15:11:31 +000043
Hal Finkelff0bcb62014-07-25 15:50:08 +000044static cl::opt<bool>
45EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(false),
46 cl::Hidden,
47 cl::desc("Convert noalias attributes to metadata during inlining."));
48
Eric Christopherf16bee82012-03-26 19:09:38 +000049bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI,
50 bool InsertLifetime) {
Chad Rosier07d37bc2012-02-25 02:56:01 +000051 return InlineFunction(CallSite(CI), IFI, InsertLifetime);
Chris Lattner0841fb12006-01-14 20:07:50 +000052}
Eric Christopherf16bee82012-03-26 19:09:38 +000053bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI,
54 bool InsertLifetime) {
Chad Rosier07d37bc2012-02-25 02:56:01 +000055 return InlineFunction(CallSite(II), IFI, InsertLifetime);
Chris Lattner0841fb12006-01-14 20:07:50 +000056}
Chris Lattner0cc265e2003-08-24 06:59:16 +000057
John McCallbd04b742011-05-27 18:34:38 +000058namespace {
59 /// A class for recording information about inlining through an invoke.
60 class InvokeInliningInfo {
Dmitri Gribenkodbeafa72012-06-09 00:01:45 +000061 BasicBlock *OuterResumeDest; ///< Destination of the invoke's unwind.
62 BasicBlock *InnerResumeDest; ///< Destination for the callee's resume.
63 LandingPadInst *CallerLPad; ///< LandingPadInst associated with the invoke.
64 PHINode *InnerEHValuesPHI; ///< PHI for EH values from landingpad insts.
Bill Wendling0c2d82b2012-01-31 01:22:03 +000065 SmallVector<Value*, 8> UnwindDestPHIValues;
Bill Wendlingfa284402011-07-28 07:31:46 +000066
Bill Wendling55421f02011-08-14 08:01:36 +000067 public:
68 InvokeInliningInfo(InvokeInst *II)
Craig Topperf40110f2014-04-25 05:29:35 +000069 : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(nullptr),
70 CallerLPad(nullptr), InnerEHValuesPHI(nullptr) {
Bill Wendling55421f02011-08-14 08:01:36 +000071 // If there are PHI nodes in the unwind destination block, we need to keep
72 // track of which values came into them from the invoke before removing
73 // the edge from this block.
74 llvm::BasicBlock *InvokeBB = II->getParent();
Bill Wendlingea6e9352012-01-31 01:25:54 +000075 BasicBlock::iterator I = OuterResumeDest->begin();
Bill Wendling55421f02011-08-14 08:01:36 +000076 for (; isa<PHINode>(I); ++I) {
John McCallbd04b742011-05-27 18:34:38 +000077 // Save the value to use for this edge.
Bill Wendling55421f02011-08-14 08:01:36 +000078 PHINode *PHI = cast<PHINode>(I);
79 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
80 }
81
Bill Wendlingf3cae512012-01-31 00:56:53 +000082 CallerLPad = cast<LandingPadInst>(I);
John McCallbd04b742011-05-27 18:34:38 +000083 }
84
Bill Wendlingea6e9352012-01-31 01:25:54 +000085 /// getOuterResumeDest - The outer unwind destination is the target of
86 /// unwind edges introduced for calls within the inlined function.
Bill Wendling0c2d82b2012-01-31 01:22:03 +000087 BasicBlock *getOuterResumeDest() const {
Bill Wendlingea6e9352012-01-31 01:25:54 +000088 return OuterResumeDest;
John McCallbd04b742011-05-27 18:34:38 +000089 }
90
Bill Wendling3fd879d2012-01-31 01:48:40 +000091 BasicBlock *getInnerResumeDest();
Bill Wendling55421f02011-08-14 08:01:36 +000092
93 LandingPadInst *getLandingPadInst() const { return CallerLPad; }
94
Bill Wendling55421f02011-08-14 08:01:36 +000095 /// forwardResume - Forward the 'resume' instruction to the caller's landing
96 /// pad block. When the landing pad block has only one predecessor, this is
97 /// a simple branch. When there is more than one predecessor, we need to
98 /// split the landing pad block after the landingpad instruction and jump
99 /// to there.
Bill Wendling56f15bf2013-03-22 20:31:05 +0000100 void forwardResume(ResumeInst *RI,
101 SmallPtrSet<LandingPadInst*, 16> &InlinedLPads);
Bill Wendling55421f02011-08-14 08:01:36 +0000102
103 /// addIncomingPHIValuesFor - Add incoming-PHI values to the unwind
104 /// destination block for the given basic block, using the values for the
105 /// original invoke's source block.
John McCallbd04b742011-05-27 18:34:38 +0000106 void addIncomingPHIValuesFor(BasicBlock *BB) const {
Bill Wendlingea6e9352012-01-31 01:25:54 +0000107 addIncomingPHIValuesForInto(BB, OuterResumeDest);
John McCall046c47e2011-05-28 07:45:59 +0000108 }
Bill Wendlingad088e62011-07-30 05:42:50 +0000109
John McCall046c47e2011-05-28 07:45:59 +0000110 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
111 BasicBlock::iterator I = dest->begin();
John McCallbd04b742011-05-27 18:34:38 +0000112 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
Bill Wendlingad088e62011-07-30 05:42:50 +0000113 PHINode *phi = cast<PHINode>(I);
114 phi->addIncoming(UnwindDestPHIValues[i], src);
John McCallbd04b742011-05-27 18:34:38 +0000115 }
116 }
117 };
118}
119
Bill Wendling3fd879d2012-01-31 01:48:40 +0000120/// getInnerResumeDest - Get or create a target for the branch from ResumeInsts.
121BasicBlock *InvokeInliningInfo::getInnerResumeDest() {
Bill Wendling55421f02011-08-14 08:01:36 +0000122 if (InnerResumeDest) return InnerResumeDest;
123
124 // Split the landing pad.
125 BasicBlock::iterator SplitPoint = CallerLPad; ++SplitPoint;
126 InnerResumeDest =
127 OuterResumeDest->splitBasicBlock(SplitPoint,
128 OuterResumeDest->getName() + ".body");
129
130 // The number of incoming edges we expect to the inner landing pad.
131 const unsigned PHICapacity = 2;
132
133 // Create corresponding new PHIs for all the PHIs in the outer landing pad.
134 BasicBlock::iterator InsertPoint = InnerResumeDest->begin();
135 BasicBlock::iterator I = OuterResumeDest->begin();
136 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
137 PHINode *OuterPHI = cast<PHINode>(I);
138 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
139 OuterPHI->getName() + ".lpad-body",
140 InsertPoint);
141 OuterPHI->replaceAllUsesWith(InnerPHI);
142 InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
143 }
144
145 // Create a PHI for the exception values.
146 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
147 "eh.lpad-body", InsertPoint);
148 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
149 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
150
151 // All done.
152 return InnerResumeDest;
153}
154
155/// forwardResume - Forward the 'resume' instruction to the caller's landing pad
156/// block. When the landing pad block has only one predecessor, this is a simple
157/// branch. When there is more than one predecessor, we need to split the
158/// landing pad block after the landingpad instruction and jump to there.
Bill Wendling173c71f2013-03-21 23:30:12 +0000159void InvokeInliningInfo::forwardResume(ResumeInst *RI,
Bill Wendling56f15bf2013-03-22 20:31:05 +0000160 SmallPtrSet<LandingPadInst*, 16> &InlinedLPads) {
Bill Wendling3fd879d2012-01-31 01:48:40 +0000161 BasicBlock *Dest = getInnerResumeDest();
Bill Wendling55421f02011-08-14 08:01:36 +0000162 BasicBlock *Src = RI->getParent();
163
164 BranchInst::Create(Dest, Src);
165
166 // Update the PHIs in the destination. They were inserted in an order which
167 // makes this work.
168 addIncomingPHIValuesForInto(Src, Dest);
169
170 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
171 RI->eraseFromParent();
172}
173
Chris Lattner5eef6ad2009-08-27 03:51:50 +0000174/// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into
Eric Christopher66d8555f2009-09-06 22:20:54 +0000175/// an invoke, we have to turn all of the calls that can throw into
Chris Lattner5eef6ad2009-08-27 03:51:50 +0000176/// invokes. This function analyze BB to see if there are any calls, and if so,
177/// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
Chris Lattner8900f3e2009-09-01 18:44:06 +0000178/// nodes in that block with the values specified in InvokeDestPHIValues.
Mark Seabornd91fa222013-12-02 20:50:59 +0000179static void HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
John McCallbd04b742011-05-27 18:34:38 +0000180 InvokeInliningInfo &Invoke) {
Chris Lattner5eef6ad2009-08-27 03:51:50 +0000181 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
182 Instruction *I = BBI++;
Bill Wendling55421f02011-08-14 08:01:36 +0000183
Chris Lattner5eef6ad2009-08-27 03:51:50 +0000184 // We only need to check for function calls: inlined invoke
185 // instructions require no special handling.
186 CallInst *CI = dyn_cast<CallInst>(I);
John McCallbd04b742011-05-27 18:34:38 +0000187
Chris Lattner5eef6ad2009-08-27 03:51:50 +0000188 // If this call cannot unwind, don't convert it to an invoke.
Manman Ren87a2adc2013-10-31 21:56:03 +0000189 // Inline asm calls cannot throw.
190 if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
Chris Lattner5eef6ad2009-08-27 03:51:50 +0000191 continue;
Bill Wendling518a2052012-01-31 01:05:20 +0000192
193 // Convert this function call into an invoke instruction. First, split the
194 // basic block.
Chris Lattner5eef6ad2009-08-27 03:51:50 +0000195 BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");
John McCallbd04b742011-05-27 18:34:38 +0000196
John McCall046c47e2011-05-28 07:45:59 +0000197 // Delete the unconditional branch inserted by splitBasicBlock
198 BB->getInstList().pop_back();
John McCallbd04b742011-05-27 18:34:38 +0000199
Bill Wendling621699d2012-01-31 01:14:49 +0000200 // Create the new invoke instruction.
John McCall046c47e2011-05-28 07:45:59 +0000201 ImmutableCallSite CS(CI);
202 SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end());
Bill Wendlingce0c2292012-01-31 01:01:16 +0000203 InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split,
Bill Wendling0c2d82b2012-01-31 01:22:03 +0000204 Invoke.getOuterResumeDest(),
Bill Wendlingce0c2292012-01-31 01:01:16 +0000205 InvokeArgs, CI->getName(), BB);
David Blaikie644d2ee2014-06-30 20:30:39 +0000206 II->setDebugLoc(CI->getDebugLoc());
John McCall046c47e2011-05-28 07:45:59 +0000207 II->setCallingConv(CI->getCallingConv());
208 II->setAttributes(CI->getAttributes());
Chris Lattner5eef6ad2009-08-27 03:51:50 +0000209
John McCall046c47e2011-05-28 07:45:59 +0000210 // Make sure that anything using the call now uses the invoke! This also
211 // updates the CallGraph if present, because it uses a WeakVH.
212 CI->replaceAllUsesWith(II);
John McCallbd04b742011-05-27 18:34:38 +0000213
Bill Wendlingce0c2292012-01-31 01:01:16 +0000214 // Delete the original call
215 Split->getInstList().pop_front();
John McCall046c47e2011-05-28 07:45:59 +0000216
Bill Wendlingce0c2292012-01-31 01:01:16 +0000217 // Update any PHI nodes in the exceptional block to indicate that there is
218 // now a new entry in them.
John McCallbd04b742011-05-27 18:34:38 +0000219 Invoke.addIncomingPHIValuesFor(BB);
Mark Seabornd91fa222013-12-02 20:50:59 +0000220 return;
Chris Lattner5eef6ad2009-08-27 03:51:50 +0000221 }
222}
Chris Lattner5eef6ad2009-08-27 03:51:50 +0000223
Chris Lattner908d7952006-01-13 19:05:59 +0000224/// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls
Bill Wendling0aef16a2012-02-06 21:44:22 +0000225/// in the body of the inlined function into invokes.
Chris Lattner908d7952006-01-13 19:05:59 +0000226///
Nick Lewycky12a130b2009-02-03 04:34:40 +0000227/// II is the invoke instruction being inlined. FirstNewBlock is the first
Chris Lattner908d7952006-01-13 19:05:59 +0000228/// block of the inlined code (the last block is the end of the function),
229/// and InlineCodeInfo is information about the code that got inlined.
230static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock,
Chris Lattner8900f3e2009-09-01 18:44:06 +0000231 ClonedCodeInfo &InlinedCodeInfo) {
Chris Lattner908d7952006-01-13 19:05:59 +0000232 BasicBlock *InvokeDest = II->getUnwindDest();
Chris Lattner908d7952006-01-13 19:05:59 +0000233
234 Function *Caller = FirstNewBlock->getParent();
Duncan Sands7c8fb1a2008-09-05 12:37:12 +0000235
Chris Lattner908d7952006-01-13 19:05:59 +0000236 // The inlined code is currently at the end of the function, scan from the
237 // start of the inlined code to its end, checking for stuff we need to
Bill Wendling173c71f2013-03-21 23:30:12 +0000238 // rewrite.
John McCallbd04b742011-05-27 18:34:38 +0000239 InvokeInliningInfo Invoke(II);
Bill Wendling173c71f2013-03-21 23:30:12 +0000240
Bill Wendling56f15bf2013-03-22 20:31:05 +0000241 // Get all of the inlined landing pad instructions.
242 SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
243 for (Function::iterator I = FirstNewBlock, E = Caller->end(); I != E; ++I)
244 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
245 InlinedLPads.insert(II->getLandingPadInst());
246
Mark Seabornef3dbb92013-12-08 00:50:58 +0000247 // Append the clauses from the outer landing pad instruction into the inlined
248 // landing pad instructions.
249 LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
250 for (SmallPtrSet<LandingPadInst*, 16>::iterator I = InlinedLPads.begin(),
251 E = InlinedLPads.end(); I != E; ++I) {
252 LandingPadInst *InlinedLPad = *I;
253 unsigned OuterNum = OuterLPad->getNumClauses();
254 InlinedLPad->reserveClauses(OuterNum);
255 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
256 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
Mark Seaborn1b3dd352013-12-08 00:51:21 +0000257 if (OuterLPad->isCleanup())
258 InlinedLPad->setCleanup(true);
Mark Seabornef3dbb92013-12-08 00:50:58 +0000259 }
260
Chris Lattner5eef6ad2009-08-27 03:51:50 +0000261 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; ++BB){
262 if (InlinedCodeInfo.ContainsCalls)
Mark Seabornd91fa222013-12-02 20:50:59 +0000263 HandleCallsInBlockInlinedThroughInvoke(BB, Invoke);
Duncan Sands7c8fb1a2008-09-05 12:37:12 +0000264
Bill Wendling173c71f2013-03-21 23:30:12 +0000265 // Forward any resumes that are remaining here.
Bill Wendling621699d2012-01-31 01:14:49 +0000266 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
Bill Wendling56f15bf2013-03-22 20:31:05 +0000267 Invoke.forwardResume(RI, InlinedLPads);
Chris Lattner908d7952006-01-13 19:05:59 +0000268 }
269
270 // Now that everything is happy, we have one final detail. The PHI nodes in
271 // the exception destination block still have entries due to the original
Bill Wendling173c71f2013-03-21 23:30:12 +0000272 // invoke instruction. Eliminate these entries (which might even delete the
Chris Lattner908d7952006-01-13 19:05:59 +0000273 // PHI node) now.
274 InvokeDest->removePredecessor(II->getParent());
275}
276
Hal Finkel94146652014-07-24 14:25:39 +0000277/// CloneAliasScopeMetadata - When inlining a function that contains noalias
278/// scope metadata, this metadata needs to be cloned so that the inlined blocks
279/// have different "unqiue scopes" at every call site. Were this not done, then
280/// aliasing scopes from a function inlined into a caller multiple times could
281/// not be differentiated (and this would lead to miscompiles because the
282/// non-aliasing property communicated by the metadata could have
283/// call-site-specific control dependencies).
284static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap) {
285 const Function *CalledFunc = CS.getCalledFunction();
286 SetVector<const MDNode *> MD;
287
288 // Note: We could only clone the metadata if it is already used in the
289 // caller. I'm omitting that check here because it might confuse
290 // inter-procedural alias analysis passes. We can revisit this if it becomes
291 // an efficiency or overhead problem.
292
293 for (Function::const_iterator I = CalledFunc->begin(), IE = CalledFunc->end();
294 I != IE; ++I)
295 for (BasicBlock::const_iterator J = I->begin(), JE = I->end(); J != JE; ++J) {
296 if (const MDNode *M = J->getMetadata(LLVMContext::MD_alias_scope))
297 MD.insert(M);
298 if (const MDNode *M = J->getMetadata(LLVMContext::MD_noalias))
299 MD.insert(M);
300 }
301
302 if (MD.empty())
303 return;
304
305 // Walk the existing metadata, adding the complete (perhaps cyclic) chain to
306 // the set.
307 SmallVector<const Value *, 16> Queue(MD.begin(), MD.end());
308 while (!Queue.empty()) {
309 const MDNode *M = cast<MDNode>(Queue.pop_back_val());
310 for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i)
311 if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i)))
312 if (MD.insert(M1))
313 Queue.push_back(M1);
314 }
315
316 // Now we have a complete set of all metadata in the chains used to specify
317 // the noalias scopes and the lists of those scopes.
318 SmallVector<MDNode *, 16> DummyNodes;
319 DenseMap<const MDNode *, TrackingVH<MDNode> > MDMap;
320 for (SetVector<const MDNode *>::iterator I = MD.begin(), IE = MD.end();
321 I != IE; ++I) {
322 MDNode *Dummy = MDNode::getTemporary(CalledFunc->getContext(),
323 ArrayRef<Value*>());
324 DummyNodes.push_back(Dummy);
325 MDMap[*I] = Dummy;
326 }
327
328 // Create new metadata nodes to replace the dummy nodes, replacing old
329 // metadata references with either a dummy node or an already-created new
330 // node.
331 for (SetVector<const MDNode *>::iterator I = MD.begin(), IE = MD.end();
332 I != IE; ++I) {
333 SmallVector<Value *, 4> NewOps;
334 for (unsigned i = 0, ie = (*I)->getNumOperands(); i != ie; ++i) {
335 const Value *V = (*I)->getOperand(i);
336 if (const MDNode *M = dyn_cast<MDNode>(V))
337 NewOps.push_back(MDMap[M]);
338 else
339 NewOps.push_back(const_cast<Value *>(V));
340 }
341
342 MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps),
343 *TempM = MDMap[*I];
344
345 TempM->replaceAllUsesWith(NewM);
346 }
347
348 // Now replace the metadata in the new inlined instructions with the
349 // repacements from the map.
350 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
351 VMI != VMIE; ++VMI) {
352 if (!VMI->second)
353 continue;
354
355 Instruction *NI = dyn_cast<Instruction>(VMI->second);
356 if (!NI)
357 continue;
358
359 if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope))
360 NI->setMetadata(LLVMContext::MD_alias_scope, MDMap[M]);
361
362 if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias))
363 NI->setMetadata(LLVMContext::MD_noalias, MDMap[M]);
364 }
365
366 // Now that everything has been replaced, delete the dummy nodes.
367 for (unsigned i = 0, ie = DummyNodes.size(); i != ie; ++i)
368 MDNode::deleteTemporary(DummyNodes[i]);
369}
370
Hal Finkelff0bcb62014-07-25 15:50:08 +0000371/// AddAliasScopeMetadata - If the inlined function has noalias arguments, then
372/// add new alias scopes for each noalias argument, tag the mapped noalias
373/// parameters with noalias metadata specifying the new scope, and tag all
374/// non-derived loads, stores and memory intrinsics with the new alias scopes.
375static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap,
376 const DataLayout *DL) {
377 if (!EnableNoAliasConversion)
378 return;
379
380 const Function *CalledFunc = CS.getCalledFunction();
381 SmallVector<const Argument *, 4> NoAliasArgs;
382
383 for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
384 E = CalledFunc->arg_end(); I != E; ++I) {
385 if (I->hasNoAliasAttr() && !I->hasNUses(0))
386 NoAliasArgs.push_back(I);
387 }
388
389 if (NoAliasArgs.empty())
390 return;
391
392 // To do a good job, if a noalias variable is captured, we need to know if
393 // the capture point dominates the particular use we're considering.
394 DominatorTree DT;
395 DT.recalculate(const_cast<Function&>(*CalledFunc));
396
397 // noalias indicates that pointer values based on the argument do not alias
398 // pointer values which are not based on it. So we add a new "scope" for each
399 // noalias function argument. Accesses using pointers based on that argument
400 // become part of that alias scope, accesses using pointers not based on that
401 // argument are tagged as noalias with that scope.
402
403 DenseMap<const Argument *, MDNode *> NewScopes;
404 MDBuilder MDB(CalledFunc->getContext());
405
406 // Create a new scope domain for this function.
407 MDNode *NewDomain =
408 MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
409 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
410 const Argument *A = NoAliasArgs[i];
411
412 std::string Name = CalledFunc->getName();
413 if (A->hasName()) {
414 Name += ": %";
415 Name += A->getName();
416 } else {
417 Name += ": argument ";
418 Name += utostr(i);
419 }
420
421 // Note: We always create a new anonymous root here. This is true regardless
422 // of the linkage of the callee because the aliasing "scope" is not just a
423 // property of the callee, but also all control dependencies in the caller.
424 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
425 NewScopes.insert(std::make_pair(A, NewScope));
426 }
427
428 // Iterate over all new instructions in the map; for all memory-access
429 // instructions, add the alias scope metadata.
430 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
431 VMI != VMIE; ++VMI) {
432 if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
433 if (!VMI->second)
434 continue;
435
436 Instruction *NI = dyn_cast<Instruction>(VMI->second);
437 if (!NI)
438 continue;
439
440 SmallVector<const Value *, 2> PtrArgs;
441
442 if (const LoadInst *LI = dyn_cast<LoadInst>(I))
443 PtrArgs.push_back(LI->getPointerOperand());
444 else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
445 PtrArgs.push_back(SI->getPointerOperand());
446 else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
447 PtrArgs.push_back(VAAI->getPointerOperand());
448 else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
449 PtrArgs.push_back(CXI->getPointerOperand());
450 else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
451 PtrArgs.push_back(RMWI->getPointerOperand());
452 else if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
453 PtrArgs.push_back(MI->getRawDest());
454 if (const MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
455 PtrArgs.push_back(MTI->getRawSource());
456 }
457
458 // If we found no pointers, then this instruction is not suitable for
459 // pairing with an instruction to receive aliasing metadata.
460 // Simplification during cloning could make this happen, and skip these
461 // cases for now.
462 if (PtrArgs.empty())
463 continue;
464
465 // It is possible that there is only one underlying object, but you
466 // need to go through several PHIs to see it, and thus could be
467 // repeated in the Objects list.
468 SmallPtrSet<const Value *, 4> ObjSet;
469 SmallVector<Value *, 4> Scopes, NoAliases;
470
471 SmallSetVector<const Argument *, 4> NAPtrArgs;
472 for (unsigned i = 0, ie = PtrArgs.size(); i != ie; ++i) {
473 SmallVector<Value *, 4> Objects;
474 GetUnderlyingObjects(const_cast<Value*>(PtrArgs[i]),
475 Objects, DL, /* MaxLookup = */ 0);
476
477 for (Value *O : Objects)
478 ObjSet.insert(O);
479 }
480
481 // Figure out if we're derived from anyhing that is not a noalias
482 // argument.
483 bool CanDeriveViaCapture = false;
484 for (const Value *V : ObjSet)
485 if (!isIdentifiedFunctionLocal(const_cast<Value*>(V))) {
486 CanDeriveViaCapture = true;
487 break;
488 }
489
490 // First, we want to figure out all of the sets with which we definitely
491 // don't alias. Iterate over all noalias set, and add those for which:
492 // 1. The noalias argument is not in the set of objects from which we
493 // definitely derive.
494 // 2. The noalias argument has not yet been captured.
495 for (const Argument *A : NoAliasArgs) {
496 if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
497 A->hasNoCaptureAttr() ||
498 !PointerMayBeCapturedBefore(A,
499 /* ReturnCaptures */ false,
500 /* StoreCaptures */ false, I, &DT)))
501 NoAliases.push_back(NewScopes[A]);
502 }
503
504 if (!NoAliases.empty())
505 NI->setMetadata(LLVMContext::MD_noalias, MDNode::concatenate(
506 NI->getMetadata(LLVMContext::MD_noalias),
507 MDNode::get(CalledFunc->getContext(), NoAliases)));
508 // Next, we want to figure out all of the sets to which we might belong.
509 // We might below to a set if:
510 // 1. The noalias argument is in the set of underlying objects
511 // or
512 // 2. There is some non-noalias argument in our list and the no-alias
513 // argument has been captured.
514
515 for (const Argument *A : NoAliasArgs) {
516 if (ObjSet.count(A) || (CanDeriveViaCapture &&
517 PointerMayBeCapturedBefore(A,
518 /* ReturnCaptures */ false,
519 /* StoreCaptures */ false,
520 I, &DT)))
521 Scopes.push_back(NewScopes[A]);
522 }
523
524 if (!Scopes.empty())
525 NI->setMetadata(LLVMContext::MD_alias_scope, MDNode::concatenate(
526 NI->getMetadata(LLVMContext::MD_alias_scope),
527 MDNode::get(CalledFunc->getContext(), Scopes)));
528 }
529 }
530}
531
Chris Lattner5de3b8b2006-07-12 18:29:36 +0000532/// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee
533/// into the caller, update the specified callgraph to reflect the changes we
534/// made. Note that it's possible that not all code was copied over, so only
Duncan Sands46911f12008-09-08 11:05:51 +0000535/// some edges of the callgraph may remain.
536static void UpdateCallGraphAfterInlining(CallSite CS,
Chris Lattner5de3b8b2006-07-12 18:29:36 +0000537 Function::iterator FirstNewBlock,
Rafael Espindola229e38f2010-10-13 01:36:30 +0000538 ValueToValueMapTy &VMap,
Chris Lattner2eee5d32010-04-22 23:37:35 +0000539 InlineFunctionInfo &IFI) {
540 CallGraph &CG = *IFI.CG;
Duncan Sands46911f12008-09-08 11:05:51 +0000541 const Function *Caller = CS.getInstruction()->getParent()->getParent();
542 const Function *Callee = CS.getCalledFunction();
Chris Lattner0841fb12006-01-14 20:07:50 +0000543 CallGraphNode *CalleeNode = CG[Callee];
544 CallGraphNode *CallerNode = CG[Caller];
Duncan Sands7c8fb1a2008-09-05 12:37:12 +0000545
Chris Lattner5de3b8b2006-07-12 18:29:36 +0000546 // Since we inlined some uninlined call sites in the callee into the caller,
Chris Lattner0841fb12006-01-14 20:07:50 +0000547 // add edges from the caller to all of the callees of the callee.
Gabor Greif5aa19222009-01-15 18:40:09 +0000548 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
549
550 // Consider the case where CalleeNode == CallerNode.
Gabor Greiff1abfdc2009-01-17 00:09:08 +0000551 CallGraphNode::CalledFunctionsVector CallCache;
Gabor Greif5aa19222009-01-15 18:40:09 +0000552 if (CalleeNode == CallerNode) {
553 CallCache.assign(I, E);
554 I = CallCache.begin();
555 E = CallCache.end();
556 }
557
558 for (; I != E; ++I) {
Chris Lattner063d0652009-09-01 06:31:31 +0000559 const Value *OrigCall = I->first;
Duncan Sands7c8fb1a2008-09-05 12:37:12 +0000560
Rafael Espindola229e38f2010-10-13 01:36:30 +0000561 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
Chris Lattnerb3c64f72006-07-12 21:37:11 +0000562 // Only copy the edge if the call was inlined!
Craig Topperf40110f2014-04-25 05:29:35 +0000563 if (VMI == VMap.end() || VMI->second == nullptr)
Chris Lattner5eef6ad2009-08-27 03:51:50 +0000564 continue;
565
566 // If the call was inlined, but then constant folded, there is no edge to
567 // add. Check for this case.
Chris Lattner016c00a2010-04-22 21:31:00 +0000568 Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
Craig Topperf40110f2014-04-25 05:29:35 +0000569 if (!NewCall) continue;
Chris Lattnerc2432b92010-05-01 01:26:13 +0000570
571 // Remember that this call site got inlined for the client of
572 // InlineFunction.
573 IFI.InlinedCalls.push_back(NewCall);
574
Chris Lattner016c00a2010-04-22 21:31:00 +0000575 // It's possible that inlining the callsite will cause it to go from an
576 // indirect to a direct call by resolving a function pointer. If this
577 // happens, set the callee of the new call site to a more precise
578 // destination. This can also happen if the call graph node of the caller
579 // was just unnecessarily imprecise.
Craig Topperf40110f2014-04-25 05:29:35 +0000580 if (!I->second->getFunction())
Chris Lattner016c00a2010-04-22 21:31:00 +0000581 if (Function *F = CallSite(NewCall).getCalledFunction()) {
582 // Indirect call site resolved to direct call.
Gabor Greif7b0a5fd2010-07-27 15:02:37 +0000583 CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);
584
Chris Lattner016c00a2010-04-22 21:31:00 +0000585 continue;
586 }
Gabor Greif7b0a5fd2010-07-27 15:02:37 +0000587
588 CallerNode->addCalledFunction(CallSite(NewCall), I->second);
Chris Lattner5de3b8b2006-07-12 18:29:36 +0000589 }
Chris Lattner5eef6ad2009-08-27 03:51:50 +0000590
Dale Johannesen0aeabdf2009-01-13 22:43:37 +0000591 // Update the call graph by deleting the edge from Callee to Caller. We must
592 // do this after the loop above in case Caller and Callee are the same.
593 CallerNode->removeCallEdgeFor(CS);
Chris Lattner0841fb12006-01-14 20:07:50 +0000594}
595
Julien Lerouge957e91c2014-04-15 18:01:54 +0000596static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
597 BasicBlock *InsertBlock,
598 InlineFunctionInfo &IFI) {
599 LLVMContext &Context = Src->getContext();
600 Type *VoidPtrTy = Type::getInt8PtrTy(Context);
601 Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
602 Type *Tys[3] = { VoidPtrTy, VoidPtrTy, Type::getInt64Ty(Context) };
603 Function *MemCpyFn = Intrinsic::getDeclaration(M, Intrinsic::memcpy, Tys);
604 IRBuilder<> builder(InsertBlock->begin());
605 Value *DstCast = builder.CreateBitCast(Dst, VoidPtrTy, "tmp");
606 Value *SrcCast = builder.CreateBitCast(Src, VoidPtrTy, "tmp");
607
608 Value *Size;
Craig Topperf40110f2014-04-25 05:29:35 +0000609 if (IFI.DL == nullptr)
Julien Lerouge957e91c2014-04-15 18:01:54 +0000610 Size = ConstantExpr::getSizeOf(AggTy);
611 else
612 Size = ConstantInt::get(Type::getInt64Ty(Context),
613 IFI.DL->getTypeStoreSize(AggTy));
614
615 // Always generate a memcpy of alignment 1 here because we don't know
616 // the alignment of the src pointer. Other optimizations can infer
617 // better alignment.
618 Value *CallArgs[] = {
619 DstCast, SrcCast, Size,
620 ConstantInt::get(Type::getInt32Ty(Context), 1),
621 ConstantInt::getFalse(Context) // isVolatile
622 };
623 builder.CreateCall(MemCpyFn, CallArgs);
624}
625
Chris Lattner0f114952010-12-20 08:10:40 +0000626/// HandleByValArgument - When inlining a call site that has a byval argument,
627/// we have to make the implicit memcpy explicit by adding it.
David Majnemer120f4a02013-11-03 12:22:13 +0000628static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
Chris Lattner00997442010-12-20 07:57:41 +0000629 const Function *CalledFunc,
630 InlineFunctionInfo &IFI,
631 unsigned ByValAlignment) {
Matt Arsenaultbe558882014-04-23 20:58:57 +0000632 PointerType *ArgTy = cast<PointerType>(Arg->getType());
633 Type *AggTy = ArgTy->getElementType();
Chris Lattner0f114952010-12-20 08:10:40 +0000634
635 // If the called function is readonly, then it could not mutate the caller's
636 // copy of the byval'd memory. In this case, it is safe to elide the copy and
637 // temporary.
David Majnemer120f4a02013-11-03 12:22:13 +0000638 if (CalledFunc->onlyReadsMemory()) {
Chris Lattner0f114952010-12-20 08:10:40 +0000639 // If the byval argument has a specified alignment that is greater than the
640 // passed in pointer, then we either have to round up the input pointer or
641 // give up on this transformation.
642 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
David Majnemer120f4a02013-11-03 12:22:13 +0000643 return Arg;
Chris Lattner0f114952010-12-20 08:10:40 +0000644
Chris Lattner20fca482010-12-25 20:42:38 +0000645 // If the pointer is already known to be sufficiently aligned, or if we can
646 // round it up to a larger alignment, then we don't need a temporary.
David Majnemer120f4a02013-11-03 12:22:13 +0000647 if (getOrEnforceKnownAlignment(Arg, ByValAlignment,
Rafael Espindola7c68beb2014-02-18 15:33:12 +0000648 IFI.DL) >= ByValAlignment)
David Majnemer120f4a02013-11-03 12:22:13 +0000649 return Arg;
Chris Lattner0f114952010-12-20 08:10:40 +0000650
Chris Lattner20fca482010-12-25 20:42:38 +0000651 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
652 // for code quality, but rarely happens and is required for correctness.
Chris Lattner0f114952010-12-20 08:10:40 +0000653 }
Chris Lattner00997442010-12-20 07:57:41 +0000654
Micah Villmowcdfe20b2012-10-08 16:38:25 +0000655 // Create the alloca. If we have DataLayout, use nice alignment.
Chris Lattner00997442010-12-20 07:57:41 +0000656 unsigned Align = 1;
Rafael Espindola7c68beb2014-02-18 15:33:12 +0000657 if (IFI.DL)
658 Align = IFI.DL->getPrefTypeAlignment(AggTy);
Chris Lattner00997442010-12-20 07:57:41 +0000659
660 // If the byval had an alignment specified, we *must* use at least that
661 // alignment, as it is required by the byval argument (and uses of the
662 // pointer inside the callee).
663 Align = std::max(Align, ByValAlignment);
664
665 Function *Caller = TheCall->getParent()->getParent();
666
Craig Topperf40110f2014-04-25 05:29:35 +0000667 Value *NewAlloca = new AllocaInst(AggTy, nullptr, Align, Arg->getName(),
Chris Lattner00997442010-12-20 07:57:41 +0000668 &*Caller->begin()->begin());
Julien Lerougebe4fe322014-04-15 18:06:46 +0000669 IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
Chris Lattner00997442010-12-20 07:57:41 +0000670
671 // Uses of the argument in the function should use our new alloca
672 // instead.
673 return NewAlloca;
674}
675
Nick Lewyckya68ec832011-05-22 05:22:10 +0000676// isUsedByLifetimeMarker - Check whether this Value is used by a lifetime
677// intrinsic.
678static bool isUsedByLifetimeMarker(Value *V) {
Chandler Carruthcdf47882014-03-09 03:16:01 +0000679 for (User *U : V->users()) {
680 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
Nick Lewyckya68ec832011-05-22 05:22:10 +0000681 switch (II->getIntrinsicID()) {
682 default: break;
683 case Intrinsic::lifetime_start:
684 case Intrinsic::lifetime_end:
685 return true;
686 }
687 }
688 }
689 return false;
690}
691
692// hasLifetimeMarkers - Check whether the given alloca already has
693// lifetime.start or lifetime.end intrinsics.
694static bool hasLifetimeMarkers(AllocaInst *AI) {
Matt Arsenaultbe558882014-04-23 20:58:57 +0000695 Type *Ty = AI->getType();
696 Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
697 Ty->getPointerAddressSpace());
698 if (Ty == Int8PtrTy)
Nick Lewyckya68ec832011-05-22 05:22:10 +0000699 return isUsedByLifetimeMarker(AI);
700
Nick Lewycky9711b5c2011-06-14 00:59:24 +0000701 // Do a scan to find all the casts to i8*.
Chandler Carruthcdf47882014-03-09 03:16:01 +0000702 for (User *U : AI->users()) {
703 if (U->getType() != Int8PtrTy) continue;
704 if (U->stripPointerCasts() != AI) continue;
705 if (isUsedByLifetimeMarker(U))
Nick Lewyckya68ec832011-05-22 05:22:10 +0000706 return true;
707 }
708 return false;
709}
710
Eric Christopherf16bee82012-03-26 19:09:38 +0000711/// updateInlinedAtInfo - Helper function used by fixupLineNumbers to
712/// recursively update InlinedAtEntry of a DebugLoc.
Devang Patel35797402011-07-08 18:01:31 +0000713static DebugLoc updateInlinedAtInfo(const DebugLoc &DL,
714 const DebugLoc &InlinedAtDL,
715 LLVMContext &Ctx) {
716 if (MDNode *IA = DL.getInlinedAt(Ctx)) {
717 DebugLoc NewInlinedAtDL
718 = updateInlinedAtInfo(DebugLoc::getFromDILocation(IA), InlinedAtDL, Ctx);
719 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx),
720 NewInlinedAtDL.getAsMDNode(Ctx));
721 }
Eric Christopherf16bee82012-03-26 19:09:38 +0000722
Devang Patel35797402011-07-08 18:01:31 +0000723 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx),
724 InlinedAtDL.getAsMDNode(Ctx));
725}
726
Devang Patel35797402011-07-08 18:01:31 +0000727/// fixupLineNumbers - Update inlined instructions' line numbers to
728/// to encode location where these instructions are inlined.
729static void fixupLineNumbers(Function *Fn, Function::iterator FI,
Eric Christopher2b40fdf2012-03-26 19:09:40 +0000730 Instruction *TheCall) {
Devang Patel35797402011-07-08 18:01:31 +0000731 DebugLoc TheCallDL = TheCall->getDebugLoc();
732 if (TheCallDL.isUnknown())
733 return;
734
735 for (; FI != Fn->end(); ++FI) {
736 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
737 BI != BE; ++BI) {
738 DebugLoc DL = BI->getDebugLoc();
Evgeniy Stepanov2be29922014-06-09 09:09:19 +0000739 if (DL.isUnknown()) {
740 // If the inlined instruction has no line number, make it look as if it
741 // originates from the call location. This is important for
742 // ((__always_inline__, __nodebug__)) functions which must use caller
743 // location for all instructions in their function body.
744 BI->setDebugLoc(TheCallDL);
745 } else {
Devang Patel35797402011-07-08 18:01:31 +0000746 BI->setDebugLoc(updateInlinedAtInfo(DL, TheCallDL, BI->getContext()));
Devang Patelbb23a4a2011-08-10 21:50:54 +0000747 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(BI)) {
748 LLVMContext &Ctx = BI->getContext();
749 MDNode *InlinedAt = BI->getDebugLoc().getInlinedAt(Ctx);
750 DVI->setOperand(2, createInlinedVariable(DVI->getVariable(),
751 InlinedAt, Ctx));
752 }
753 }
Devang Patel35797402011-07-08 18:01:31 +0000754 }
755 }
756}
757
Reid Klecknerf0915aa2014-05-15 20:11:28 +0000758/// Returns a musttail call instruction if one immediately precedes the given
759/// return instruction with an optional bitcast instruction between them.
760static CallInst *getPrecedingMustTailCall(ReturnInst *RI) {
761 Instruction *Prev = RI->getPrevNode();
762 if (!Prev)
763 return nullptr;
764
765 if (Value *RV = RI->getReturnValue()) {
766 if (RV != Prev)
767 return nullptr;
768
769 // Look through the optional bitcast.
770 if (auto *BI = dyn_cast<BitCastInst>(Prev)) {
771 RV = BI->getOperand(0);
772 Prev = BI->getPrevNode();
773 if (!Prev || RV != Prev)
774 return nullptr;
775 }
776 }
777
778 if (auto *CI = dyn_cast<CallInst>(Prev)) {
779 if (CI->isMustTailCall())
780 return CI;
781 }
782 return nullptr;
783}
784
Bill Wendlingce0c2292012-01-31 01:01:16 +0000785/// InlineFunction - This function inlines the called function into the basic
786/// block of the caller. This returns false if it is not possible to inline
787/// this call. The program is still in a well defined state if this occurs
788/// though.
789///
790/// Note that this only does one level of inlining. For example, if the
791/// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
792/// exists in the instruction stream. Similarly this will inline a recursive
793/// function by one level.
Eric Christopherf16bee82012-03-26 19:09:38 +0000794bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
795 bool InsertLifetime) {
Chris Lattner0cc265e2003-08-24 06:59:16 +0000796 Instruction *TheCall = CS.getInstruction();
797 assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
798 "Instruction not in function!");
Chris Lattner530d4bf2003-05-29 15:11:31 +0000799
Chris Lattner4ba01ec2010-04-22 23:07:58 +0000800 // If IFI has any state in it, zap it before we fill it in.
801 IFI.reset();
802
Chris Lattner0cc265e2003-08-24 06:59:16 +0000803 const Function *CalledFunc = CS.getCalledFunction();
Craig Topperf40110f2014-04-25 05:29:35 +0000804 if (!CalledFunc || // Can't inline external function or indirect
Reid Spencer5301e7c2007-01-30 20:08:39 +0000805 CalledFunc->isDeclaration() || // call, or call to a vararg function!
Eric Christopher1d385382010-03-24 23:35:21 +0000806 CalledFunc->getFunctionType()->isVarArg()) return false;
Chris Lattner530d4bf2003-05-29 15:11:31 +0000807
Duncan Sandsaa31b922007-12-19 21:13:37 +0000808 // If the call to the callee cannot throw, set the 'nounwind' flag on any
809 // calls that we inline.
810 bool MarkNoUnwind = CS.doesNotThrow();
811
Chris Lattner0cc265e2003-08-24 06:59:16 +0000812 BasicBlock *OrigBB = TheCall->getParent();
Chris Lattner530d4bf2003-05-29 15:11:31 +0000813 Function *Caller = OrigBB->getParent();
814
Gordon Henriksenb969c592007-12-25 03:10:07 +0000815 // GC poses two hazards to inlining, which only occur when the callee has GC:
816 // 1. If the caller has no GC, then the callee's GC must be propagated to the
817 // caller.
818 // 2. If the caller has a differing GC, it is invalid to inline.
Gordon Henriksend930f912008-08-17 18:44:35 +0000819 if (CalledFunc->hasGC()) {
820 if (!Caller->hasGC())
821 Caller->setGC(CalledFunc->getGC());
822 else if (CalledFunc->getGC() != Caller->getGC())
Gordon Henriksenb969c592007-12-25 03:10:07 +0000823 return false;
824 }
Duncan Sands7c8fb1a2008-09-05 12:37:12 +0000825
Benjamin Kramer4d2b8712011-12-02 18:37:31 +0000826 // Get the personality function from the callee if it contains a landing pad.
Craig Topperf40110f2014-04-25 05:29:35 +0000827 Value *CalleePersonality = nullptr;
Benjamin Kramer4d2b8712011-12-02 18:37:31 +0000828 for (Function::const_iterator I = CalledFunc->begin(), E = CalledFunc->end();
829 I != E; ++I)
830 if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
831 const BasicBlock *BB = II->getUnwindDest();
Bill Wendlingf3cae512012-01-31 00:56:53 +0000832 const LandingPadInst *LP = BB->getLandingPadInst();
833 CalleePersonality = LP->getPersonalityFn();
Benjamin Kramer4d2b8712011-12-02 18:37:31 +0000834 break;
835 }
836
Bill Wendling55421f02011-08-14 08:01:36 +0000837 // Find the personality function used by the landing pads of the caller. If it
838 // exists, then check to see that it matches the personality function used in
839 // the callee.
Bill Wendlingce0c2292012-01-31 01:01:16 +0000840 if (CalleePersonality) {
Benjamin Kramer4d2b8712011-12-02 18:37:31 +0000841 for (Function::const_iterator I = Caller->begin(), E = Caller->end();
842 I != E; ++I)
843 if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
844 const BasicBlock *BB = II->getUnwindDest();
Bill Wendlingf3cae512012-01-31 00:56:53 +0000845 const LandingPadInst *LP = BB->getLandingPadInst();
Bill Wendling55421f02011-08-14 08:01:36 +0000846
Benjamin Kramer4d2b8712011-12-02 18:37:31 +0000847 // If the personality functions match, then we can perform the
848 // inlining. Otherwise, we can't inline.
849 // TODO: This isn't 100% true. Some personality functions are proper
850 // supersets of others and can be used in place of the other.
851 if (LP->getPersonalityFn() != CalleePersonality)
852 return false;
Bill Wendling55421f02011-08-14 08:01:36 +0000853
Benjamin Kramer4d2b8712011-12-02 18:37:31 +0000854 break;
855 }
Bill Wendlingce0c2292012-01-31 01:01:16 +0000856 }
Bill Wendling55421f02011-08-14 08:01:36 +0000857
Chris Lattner9fc977e2004-02-04 01:41:09 +0000858 // Get an iterator to the last basic block in the function, which will have
859 // the new function inlined after it.
Chris Lattner9fc977e2004-02-04 01:41:09 +0000860 Function::iterator LastBlock = &Caller->back();
861
Chris Lattner18ef3fd2004-02-04 02:51:48 +0000862 // Make sure to capture all of the return instructions from the cloned
Chris Lattner530d4bf2003-05-29 15:11:31 +0000863 // function.
Chris Lattnerd84dbb32009-08-27 04:02:30 +0000864 SmallVector<ReturnInst*, 8> Returns;
Chris Lattner908d7952006-01-13 19:05:59 +0000865 ClonedCodeInfo InlinedFunctionInfo;
Dale Johannesen845e5822009-03-04 02:09:48 +0000866 Function::iterator FirstNewBlock;
Duncan Sandsaa31b922007-12-19 21:13:37 +0000867
Devang Patelb8f11de2010-06-23 23:55:51 +0000868 { // Scope to destroy VMap after cloning.
Rafael Espindola229e38f2010-10-13 01:36:30 +0000869 ValueToValueMapTy VMap;
Julien Lerouge957e91c2014-04-15 18:01:54 +0000870 // Keep a list of pair (dst, src) to emit byval initializations.
871 SmallVector<std::pair<Value*, Value*>, 4> ByValInit;
Chris Lattnerbe853d72006-05-27 01:28:04 +0000872
Dan Gohman3ada1e12008-06-20 17:11:32 +0000873 assert(CalledFunc->arg_size() == CS.arg_size() &&
Chris Lattner18ef3fd2004-02-04 02:51:48 +0000874 "No varargs calls can be inlined!");
Duncan Sands7c8fb1a2008-09-05 12:37:12 +0000875
Chris Lattner908117b2008-01-11 06:09:30 +0000876 // Calculate the vector of arguments to pass into the function cloner, which
877 // matches up the formal to the actual argument values.
Chris Lattner18ef3fd2004-02-04 02:51:48 +0000878 CallSite::arg_iterator AI = CS.arg_begin();
Chris Lattner908117b2008-01-11 06:09:30 +0000879 unsigned ArgNo = 0;
Chris Lattner531f9e92005-03-15 04:54:21 +0000880 for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
Chris Lattner908117b2008-01-11 06:09:30 +0000881 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
882 Value *ActualArg = *AI;
Duncan Sands7c8fb1a2008-09-05 12:37:12 +0000883
Duncan Sands053c9872008-01-27 18:12:58 +0000884 // When byval arguments actually inlined, we need to make the copy implied
885 // by them explicit. However, we don't do this if the callee is readonly
886 // or readnone, because the copy would be unneeded: the callee doesn't
887 // modify the struct.
Nick Lewycky612d70b2011-11-20 19:09:04 +0000888 if (CS.isByValArgument(ArgNo)) {
David Majnemer120f4a02013-11-03 12:22:13 +0000889 ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
Chris Lattner00997442010-12-20 07:57:41 +0000890 CalledFunc->getParamAlignment(ArgNo+1));
Reid Kleckner9b2cc642014-04-21 20:48:47 +0000891 if (ActualArg != *AI)
Julien Lerouge957e91c2014-04-15 18:01:54 +0000892 ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
Chris Lattner908117b2008-01-11 06:09:30 +0000893 }
Duncan Sands7c8fb1a2008-09-05 12:37:12 +0000894
Devang Patelb8f11de2010-06-23 23:55:51 +0000895 VMap[I] = ActualArg;
Chris Lattner908117b2008-01-11 06:09:30 +0000896 }
Misha Brukmanb1c93172005-04-21 23:48:37 +0000897
Chris Lattnerbe853d72006-05-27 01:28:04 +0000898 // We want the inliner to prune the code as it copies. We would LOVE to
899 // have no dead or constant instructions leftover after inlining occurs
900 // (which can happen, e.g., because an argument was constant), but we'll be
901 // happy with whatever the cloner can do.
Dan Gohmanca26f792010-08-26 15:41:53 +0000902 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
903 /*ModuleLevelChanges=*/false, Returns, ".i",
Rafael Espindola7c68beb2014-02-18 15:33:12 +0000904 &InlinedFunctionInfo, IFI.DL, TheCall);
Duncan Sands7c8fb1a2008-09-05 12:37:12 +0000905
Chris Lattner5de3b8b2006-07-12 18:29:36 +0000906 // Remember the first block that is newly cloned over.
907 FirstNewBlock = LastBlock; ++FirstNewBlock;
Duncan Sands7c8fb1a2008-09-05 12:37:12 +0000908
Julien Lerouge957e91c2014-04-15 18:01:54 +0000909 // Inject byval arguments initialization.
910 for (std::pair<Value*, Value*> &Init : ByValInit)
911 HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
912 FirstNewBlock, IFI);
913
Chris Lattner5de3b8b2006-07-12 18:29:36 +0000914 // Update the callgraph if requested.
Chris Lattner4ba01ec2010-04-22 23:07:58 +0000915 if (IFI.CG)
Devang Patelb8f11de2010-06-23 23:55:51 +0000916 UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
Devang Patel35797402011-07-08 18:01:31 +0000917
918 // Update inlined instructions' line number information.
919 fixupLineNumbers(Caller, FirstNewBlock, TheCall);
Hal Finkel94146652014-07-24 14:25:39 +0000920
921 // Clone existing noalias metadata if necessary.
922 CloneAliasScopeMetadata(CS, VMap);
Hal Finkelff0bcb62014-07-25 15:50:08 +0000923
924 // Add noalias metadata if necessary.
925 AddAliasScopeMetadata(CS, VMap, IFI.DL);
Misha Brukmanb1c93172005-04-21 23:48:37 +0000926 }
Duncan Sands7c8fb1a2008-09-05 12:37:12 +0000927
Chris Lattner530d4bf2003-05-29 15:11:31 +0000928 // If there are any alloca instructions in the block that used to be the entry
929 // block for the callee, move them to the entry block of the caller. First
930 // calculate which instruction they should be inserted before. We insert the
931 // instructions at the end of the current alloca list.
Chris Lattner257492c2006-01-13 18:16:48 +0000932 {
Chris Lattner0cc265e2003-08-24 06:59:16 +0000933 BasicBlock::iterator InsertPoint = Caller->begin()->begin();
Chris Lattner18ef3fd2004-02-04 02:51:48 +0000934 for (BasicBlock::iterator I = FirstNewBlock->begin(),
Chris Lattner5eef6ad2009-08-27 03:51:50 +0000935 E = FirstNewBlock->end(); I != E; ) {
936 AllocaInst *AI = dyn_cast<AllocaInst>(I++);
Craig Topperf40110f2014-04-25 05:29:35 +0000937 if (!AI) continue;
Chris Lattner5eef6ad2009-08-27 03:51:50 +0000938
939 // If the alloca is now dead, remove it. This often occurs due to code
940 // specialization.
941 if (AI->use_empty()) {
942 AI->eraseFromParent();
943 continue;
Chris Lattner6ef6d062006-09-13 19:23:57 +0000944 }
Chris Lattner5eef6ad2009-08-27 03:51:50 +0000945
946 if (!isa<Constant>(AI->getArraySize()))
947 continue;
948
Chris Lattnercd3af962010-12-06 07:43:04 +0000949 // Keep track of the static allocas that we inline into the caller.
Chris Lattner4ba01ec2010-04-22 23:07:58 +0000950 IFI.StaticAllocas.push_back(AI);
Chris Lattnerb1cba3f2009-08-27 04:20:52 +0000951
Chris Lattner5eef6ad2009-08-27 03:51:50 +0000952 // Scan for the block of allocas that we can move over, and move them
953 // all at once.
954 while (isa<AllocaInst>(I) &&
Chris Lattnerb1cba3f2009-08-27 04:20:52 +0000955 isa<Constant>(cast<AllocaInst>(I)->getArraySize())) {
Chris Lattner4ba01ec2010-04-22 23:07:58 +0000956 IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
Chris Lattner5eef6ad2009-08-27 03:51:50 +0000957 ++I;
Chris Lattnerb1cba3f2009-08-27 04:20:52 +0000958 }
Chris Lattner5eef6ad2009-08-27 03:51:50 +0000959
960 // Transfer all of the allocas over in a block. Using splice means
961 // that the instructions aren't removed from the symbol table, then
962 // reinserted.
963 Caller->getEntryBlock().getInstList().splice(InsertPoint,
964 FirstNewBlock->getInstList(),
965 AI, I);
966 }
Chris Lattner0cc265e2003-08-24 06:59:16 +0000967 }
Chris Lattner530d4bf2003-05-29 15:11:31 +0000968
Reid Klecknerf0915aa2014-05-15 20:11:28 +0000969 bool InlinedMustTailCalls = false;
970 if (InlinedFunctionInfo.ContainsCalls) {
Reid Kleckner6af21242014-05-15 20:39:42 +0000971 CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
972 if (CallInst *CI = dyn_cast<CallInst>(TheCall))
973 CallSiteTailKind = CI->getTailCallKind();
974
Reid Klecknerf0915aa2014-05-15 20:11:28 +0000975 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
976 ++BB) {
977 for (Instruction &I : *BB) {
978 CallInst *CI = dyn_cast<CallInst>(&I);
979 if (!CI)
980 continue;
981
982 // We need to reduce the strength of any inlined tail calls. For
983 // musttail, we have to avoid introducing potential unbounded stack
984 // growth. For example, if functions 'f' and 'g' are mutually recursive
985 // with musttail, we can inline 'g' into 'f' so long as we preserve
986 // musttail on the cloned call to 'f'. If either the inlined call site
987 // or the cloned call site is *not* musttail, the program already has
988 // one frame of stack growth, so it's safe to remove musttail. Here is
989 // a table of example transformations:
990 //
991 // f -> musttail g -> musttail f ==> f -> musttail f
992 // f -> musttail g -> tail f ==> f -> tail f
993 // f -> g -> musttail f ==> f -> f
994 // f -> g -> tail f ==> f -> f
995 CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
996 ChildTCK = std::min(CallSiteTailKind, ChildTCK);
997 CI->setTailCallKind(ChildTCK);
998 InlinedMustTailCalls |= CI->isMustTailCall();
999
1000 // Calls inlined through a 'nounwind' call site should be marked
1001 // 'nounwind'.
1002 if (MarkNoUnwind)
1003 CI->setDoesNotThrow();
1004 }
1005 }
1006 }
1007
Nick Lewyckya68ec832011-05-22 05:22:10 +00001008 // Leave lifetime markers for the static alloca's, scoping them to the
1009 // function we just inlined.
Chad Rosier07d37bc2012-02-25 02:56:01 +00001010 if (InsertLifetime && !IFI.StaticAllocas.empty()) {
Nick Lewyckya68ec832011-05-22 05:22:10 +00001011 IRBuilder<> builder(FirstNewBlock->begin());
1012 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
1013 AllocaInst *AI = IFI.StaticAllocas[ai];
1014
1015 // If the alloca is already scoped to something smaller than the whole
1016 // function then there's no need to add redundant, less accurate markers.
1017 if (hasLifetimeMarkers(AI))
1018 continue;
1019
Alexey Samsonovcfd662f2012-11-13 07:15:32 +00001020 // Try to determine the size of the allocation.
Craig Topperf40110f2014-04-25 05:29:35 +00001021 ConstantInt *AllocaSize = nullptr;
Alexey Samsonovcfd662f2012-11-13 07:15:32 +00001022 if (ConstantInt *AIArraySize =
1023 dyn_cast<ConstantInt>(AI->getArraySize())) {
Rafael Espindola7c68beb2014-02-18 15:33:12 +00001024 if (IFI.DL) {
Alexey Samsonovcfd662f2012-11-13 07:15:32 +00001025 Type *AllocaType = AI->getAllocatedType();
Rafael Espindola7c68beb2014-02-18 15:33:12 +00001026 uint64_t AllocaTypeSize = IFI.DL->getTypeAllocSize(AllocaType);
Alexey Samsonovcfd662f2012-11-13 07:15:32 +00001027 uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
1028 assert(AllocaArraySize > 0 && "array size of AllocaInst is zero");
1029 // Check that array size doesn't saturate uint64_t and doesn't
1030 // overflow when it's multiplied by type size.
1031 if (AllocaArraySize != ~0ULL &&
1032 UINT64_MAX / AllocaArraySize >= AllocaTypeSize) {
1033 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
1034 AllocaArraySize * AllocaTypeSize);
1035 }
1036 }
1037 }
1038
1039 builder.CreateLifetimeStart(AI, AllocaSize);
Reid Kleckner900d46f2014-05-15 21:10:46 +00001040 for (ReturnInst *RI : Returns) {
1041 // Don't insert llvm.lifetime.end calls between a musttail call and a
1042 // return. The return kills all local allocas.
1043 if (InlinedMustTailCalls && getPrecedingMustTailCall(RI))
1044 continue;
Reid Klecknerf0915aa2014-05-15 20:11:28 +00001045 IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
Reid Kleckner900d46f2014-05-15 21:10:46 +00001046 }
Nick Lewyckya68ec832011-05-22 05:22:10 +00001047 }
1048 }
1049
Chris Lattner2be06072006-01-13 19:34:14 +00001050 // If the inlined code contained dynamic alloca instructions, wrap the inlined
1051 // code with llvm.stacksave/llvm.stackrestore intrinsics.
1052 if (InlinedFunctionInfo.ContainsDynamicAllocas) {
1053 Module *M = Caller->getParent();
Chris Lattner2be06072006-01-13 19:34:14 +00001054 // Get the two intrinsics we care about.
Chris Lattner88b36f12009-10-17 05:39:39 +00001055 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
1056 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
Chris Lattner5de3b8b2006-07-12 18:29:36 +00001057
Chris Lattner2be06072006-01-13 19:34:14 +00001058 // Insert the llvm.stacksave.
John McCall5af84522011-06-14 02:51:53 +00001059 CallInst *SavedPtr = IRBuilder<>(FirstNewBlock, FirstNewBlock->begin())
1060 .CreateCall(StackSave, "savedstack");
Duncan Sands7c8fb1a2008-09-05 12:37:12 +00001061
Chris Lattner2be06072006-01-13 19:34:14 +00001062 // Insert a call to llvm.stackrestore before any return instructions in the
1063 // inlined function.
Reid Kleckner900d46f2014-05-15 21:10:46 +00001064 for (ReturnInst *RI : Returns) {
1065 // Don't insert llvm.stackrestore calls between a musttail call and a
1066 // return. The return will restore the stack pointer.
1067 if (InlinedMustTailCalls && getPrecedingMustTailCall(RI))
1068 continue;
Reid Klecknerf0915aa2014-05-15 20:11:28 +00001069 IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
Reid Kleckner900d46f2014-05-15 21:10:46 +00001070 }
Chris Lattner9f3dced2005-05-06 06:47:52 +00001071 }
1072
Chris Lattner18ef3fd2004-02-04 02:51:48 +00001073 // If we are inlining for an invoke instruction, we must make sure to rewrite
Bill Wendling0aef16a2012-02-06 21:44:22 +00001074 // any call instructions into invoke instructions.
Chris Lattner908d7952006-01-13 19:05:59 +00001075 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
Chris Lattner8900f3e2009-09-01 18:44:06 +00001076 HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo);
Chris Lattner18ef3fd2004-02-04 02:51:48 +00001077
Reid Klecknerf0915aa2014-05-15 20:11:28 +00001078 // Handle any inlined musttail call sites. In order for a new call site to be
1079 // musttail, the source of the clone and the inlined call site must have been
1080 // musttail. Therefore it's safe to return without merging control into the
1081 // phi below.
1082 if (InlinedMustTailCalls) {
1083 // Check if we need to bitcast the result of any musttail calls.
1084 Type *NewRetTy = Caller->getReturnType();
1085 bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy;
1086
1087 // Handle the returns preceded by musttail calls separately.
1088 SmallVector<ReturnInst *, 8> NormalReturns;
1089 for (ReturnInst *RI : Returns) {
1090 CallInst *ReturnedMustTail = getPrecedingMustTailCall(RI);
1091 if (!ReturnedMustTail) {
1092 NormalReturns.push_back(RI);
1093 continue;
1094 }
1095 if (!NeedBitCast)
1096 continue;
1097
1098 // Delete the old return and any preceding bitcast.
1099 BasicBlock *CurBB = RI->getParent();
1100 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
1101 RI->eraseFromParent();
1102 if (OldCast)
1103 OldCast->eraseFromParent();
1104
1105 // Insert a new bitcast and return with the right type.
1106 IRBuilder<> Builder(CurBB);
1107 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
1108 }
1109
1110 // Leave behind the normal returns so we can merge control flow.
1111 std::swap(Returns, NormalReturns);
1112 }
1113
Chris Lattner0fa8c7c2004-02-04 04:17:06 +00001114 // If we cloned in _exactly one_ basic block, and if that block ends in a
1115 // return instruction, we splice the body of the inlined callee directly into
1116 // the calling basic block.
1117 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
1118 // Move all of the instructions right before the call.
1119 OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(),
1120 FirstNewBlock->begin(), FirstNewBlock->end());
1121 // Remove the cloned basic block.
1122 Caller->getBasicBlockList().pop_back();
Misha Brukmanb1c93172005-04-21 23:48:37 +00001123
Chris Lattner0fa8c7c2004-02-04 04:17:06 +00001124 // If the call site was an invoke instruction, add a branch to the normal
1125 // destination.
Adrian Prantl15db52b2013-04-23 19:56:03 +00001126 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
1127 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
1128 NewBr->setDebugLoc(Returns[0]->getDebugLoc());
1129 }
Chris Lattner0fa8c7c2004-02-04 04:17:06 +00001130
1131 // If the return instruction returned a value, replace uses of the call with
1132 // uses of the returned value.
Devang Patel841322b2008-03-04 21:15:15 +00001133 if (!TheCall->use_empty()) {
1134 ReturnInst *R = Returns[0];
Eli Friedman36b90262009-05-08 00:22:04 +00001135 if (TheCall == R->getReturnValue())
Owen Andersonb292b8c2009-07-30 23:03:37 +00001136 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
Eli Friedman36b90262009-05-08 00:22:04 +00001137 else
1138 TheCall->replaceAllUsesWith(R->getReturnValue());
Devang Patel841322b2008-03-04 21:15:15 +00001139 }
Chris Lattner0fa8c7c2004-02-04 04:17:06 +00001140 // Since we are now done with the Call/Invoke, we can delete it.
Dan Gohman158ff2c2008-06-21 22:08:46 +00001141 TheCall->eraseFromParent();
Chris Lattner0fa8c7c2004-02-04 04:17:06 +00001142
1143 // Since we are now done with the return instruction, delete it also.
Dan Gohman158ff2c2008-06-21 22:08:46 +00001144 Returns[0]->eraseFromParent();
Chris Lattner0fa8c7c2004-02-04 04:17:06 +00001145
1146 // We are now done with the inlining.
1147 return true;
1148 }
1149
1150 // Otherwise, we have the normal case, of more than one block to inline or
1151 // multiple return sites.
1152
Chris Lattner18ef3fd2004-02-04 02:51:48 +00001153 // We want to clone the entire callee function into the hole between the
1154 // "starter" and "ender" blocks. How we accomplish this depends on whether
1155 // this is an invoke instruction or a call instruction.
1156 BasicBlock *AfterCallBB;
Craig Topperf40110f2014-04-25 05:29:35 +00001157 BranchInst *CreatedBranchToNormalDest = nullptr;
Chris Lattner18ef3fd2004-02-04 02:51:48 +00001158 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
Misha Brukmanb1c93172005-04-21 23:48:37 +00001159
Chris Lattner18ef3fd2004-02-04 02:51:48 +00001160 // Add an unconditional branch to make this look like the CallInst case...
Adrian Prantl15db52b2013-04-23 19:56:03 +00001161 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall);
Misha Brukmanb1c93172005-04-21 23:48:37 +00001162
Chris Lattner18ef3fd2004-02-04 02:51:48 +00001163 // Split the basic block. This guarantees that no PHI nodes will have to be
1164 // updated due to new incoming edges, and make the invoke case more
1165 // symmetric to the call case.
Adrian Prantl15db52b2013-04-23 19:56:03 +00001166 AfterCallBB = OrigBB->splitBasicBlock(CreatedBranchToNormalDest,
Chris Lattnerffefea02004-12-11 16:59:54 +00001167 CalledFunc->getName()+".exit");
Misha Brukmanb1c93172005-04-21 23:48:37 +00001168
Chris Lattner18ef3fd2004-02-04 02:51:48 +00001169 } else { // It's a call
Chris Lattner0fa8c7c2004-02-04 04:17:06 +00001170 // If this is a call instruction, we need to split the basic block that
1171 // the call lives in.
Chris Lattner18ef3fd2004-02-04 02:51:48 +00001172 //
1173 AfterCallBB = OrigBB->splitBasicBlock(TheCall,
Chris Lattnerffefea02004-12-11 16:59:54 +00001174 CalledFunc->getName()+".exit");
Chris Lattner18ef3fd2004-02-04 02:51:48 +00001175 }
1176
Chris Lattner0fa8c7c2004-02-04 04:17:06 +00001177 // Change the branch that used to go to AfterCallBB to branch to the first
1178 // basic block of the inlined function.
1179 //
1180 TerminatorInst *Br = OrigBB->getTerminator();
Misha Brukmanb1c93172005-04-21 23:48:37 +00001181 assert(Br && Br->getOpcode() == Instruction::Br &&
Chris Lattner0fa8c7c2004-02-04 04:17:06 +00001182 "splitBasicBlock broken!");
1183 Br->setOperand(0, FirstNewBlock);
1184
1185
1186 // Now that the function is correct, make it a little bit nicer. In
1187 // particular, move the basic blocks inserted from the end of the function
1188 // into the space made by splitting the source basic block.
Chris Lattner0fa8c7c2004-02-04 04:17:06 +00001189 Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(),
1190 FirstNewBlock, Caller->end());
1191
Chris Lattner18ef3fd2004-02-04 02:51:48 +00001192 // Handle all of the return instructions that we just cloned in, and eliminate
1193 // any users of the original call/invoke instruction.
Chris Lattner229907c2011-07-18 04:54:35 +00001194 Type *RTy = CalledFunc->getReturnType();
Dan Gohman3b18fd72008-06-20 01:03:44 +00001195
Craig Topperf40110f2014-04-25 05:29:35 +00001196 PHINode *PHI = nullptr;
Dan Gohmanfa1211f2008-07-23 00:34:11 +00001197 if (Returns.size() > 1) {
Chris Lattner18ef3fd2004-02-04 02:51:48 +00001198 // The PHI node should go at the front of the new basic block to merge all
1199 // possible incoming values.
Chris Lattner18ef3fd2004-02-04 02:51:48 +00001200 if (!TheCall->use_empty()) {
Jay Foad52131342011-03-30 11:28:46 +00001201 PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
Dan Gohmanfa1211f2008-07-23 00:34:11 +00001202 AfterCallBB->begin());
1203 // Anything that used the result of the function call should now use the
1204 // PHI node as their operand.
Duncan Sands7c8fb1a2008-09-05 12:37:12 +00001205 TheCall->replaceAllUsesWith(PHI);
Chris Lattner18ef3fd2004-02-04 02:51:48 +00001206 }
Misha Brukmanb1c93172005-04-21 23:48:37 +00001207
Gabor Greif5aa19222009-01-15 18:40:09 +00001208 // Loop over all of the return instructions adding entries to the PHI node
1209 // as appropriate.
Dan Gohmanfa1211f2008-07-23 00:34:11 +00001210 if (PHI) {
1211 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
1212 ReturnInst *RI = Returns[i];
1213 assert(RI->getReturnValue()->getType() == PHI->getType() &&
1214 "Ret value not consistent in function!");
1215 PHI->addIncoming(RI->getReturnValue(), RI->getParent());
Devang Patel780b3ca62008-03-07 20:06:16 +00001216 }
1217 }
1218
Chris Lattnerc6b3b252009-10-27 05:39:41 +00001219
Gabor Greif8c573f72009-01-16 23:08:50 +00001220 // Add a branch to the merge points and remove return instructions.
Richard Trieu624c2eb2013-04-30 22:45:10 +00001221 DebugLoc Loc;
Chris Lattner18ef3fd2004-02-04 02:51:48 +00001222 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
Richard Trieu624c2eb2013-04-30 22:45:10 +00001223 ReturnInst *RI = Returns[i];
Adrian Prantl09416382013-04-30 17:08:16 +00001224 BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
Richard Trieu624c2eb2013-04-30 22:45:10 +00001225 Loc = RI->getDebugLoc();
1226 BI->setDebugLoc(Loc);
Devang Patel64d0f072008-03-10 18:34:00 +00001227 RI->eraseFromParent();
Chris Lattner18ef3fd2004-02-04 02:51:48 +00001228 }
Adrian Prantl09416382013-04-30 17:08:16 +00001229 // We need to set the debug location to *somewhere* inside the
Adrian Prantl8beccf92013-04-30 17:33:32 +00001230 // inlined function. The line number may be nonsensical, but the
Adrian Prantl09416382013-04-30 17:08:16 +00001231 // instruction will at least be associated with the right
1232 // function.
1233 if (CreatedBranchToNormalDest)
Richard Trieu624c2eb2013-04-30 22:45:10 +00001234 CreatedBranchToNormalDest->setDebugLoc(Loc);
Devang Patel64d0f072008-03-10 18:34:00 +00001235 } else if (!Returns.empty()) {
1236 // Otherwise, if there is exactly one return value, just replace anything
1237 // using the return value of the call with the computed value.
Eli Friedman36b90262009-05-08 00:22:04 +00001238 if (!TheCall->use_empty()) {
1239 if (TheCall == Returns[0]->getReturnValue())
Owen Andersonb292b8c2009-07-30 23:03:37 +00001240 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
Eli Friedman36b90262009-05-08 00:22:04 +00001241 else
1242 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
1243 }
Duncan Sands7c8fb1a2008-09-05 12:37:12 +00001244
Jay Foad61ea0e42011-06-23 09:09:15 +00001245 // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
1246 BasicBlock *ReturnBB = Returns[0]->getParent();
1247 ReturnBB->replaceAllUsesWith(AfterCallBB);
1248
Devang Patel64d0f072008-03-10 18:34:00 +00001249 // Splice the code from the return block into the block that it will return
1250 // to, which contains the code that was after the call.
Devang Patel64d0f072008-03-10 18:34:00 +00001251 AfterCallBB->getInstList().splice(AfterCallBB->begin(),
1252 ReturnBB->getInstList());
Duncan Sands7c8fb1a2008-09-05 12:37:12 +00001253
Adrian Prantl15db52b2013-04-23 19:56:03 +00001254 if (CreatedBranchToNormalDest)
1255 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
1256
Devang Patel64d0f072008-03-10 18:34:00 +00001257 // Delete the return instruction now and empty ReturnBB now.
1258 Returns[0]->eraseFromParent();
1259 ReturnBB->eraseFromParent();
Chris Lattner6e79e552004-10-17 23:21:07 +00001260 } else if (!TheCall->use_empty()) {
1261 // No returns, but something is using the return value of the call. Just
1262 // nuke the result.
Owen Andersonb292b8c2009-07-30 23:03:37 +00001263 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
Chris Lattner18ef3fd2004-02-04 02:51:48 +00001264 }
Misha Brukmanb1c93172005-04-21 23:48:37 +00001265
Chris Lattner18ef3fd2004-02-04 02:51:48 +00001266 // Since we are now done with the Call/Invoke, we can delete it.
Chris Lattner6e79e552004-10-17 23:21:07 +00001267 TheCall->eraseFromParent();
Chris Lattner530d4bf2003-05-29 15:11:31 +00001268
Reid Klecknerf0915aa2014-05-15 20:11:28 +00001269 // If we inlined any musttail calls and the original return is now
1270 // unreachable, delete it. It can only contain a bitcast and ret.
1271 if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB))
1272 AfterCallBB->eraseFromParent();
1273
Chris Lattnerfc3fe5c2003-08-24 04:06:56 +00001274 // We should always be able to fold the entry block of the function into the
1275 // single predecessor of the block...
Chris Lattner0328d752004-04-16 05:17:59 +00001276 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
Chris Lattnerfc3fe5c2003-08-24 04:06:56 +00001277 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
Chris Lattner0fa8c7c2004-02-04 04:17:06 +00001278
Chris Lattner0328d752004-04-16 05:17:59 +00001279 // Splice the code entry block into calling block, right before the
1280 // unconditional branch.
Eric Christopher96513122011-06-23 06:24:52 +00001281 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
Jay Foad61ea0e42011-06-23 09:09:15 +00001282 OrigBB->getInstList().splice(Br, CalleeEntry->getInstList());
Chris Lattner0328d752004-04-16 05:17:59 +00001283
1284 // Remove the unconditional branch.
1285 OrigBB->getInstList().erase(Br);
1286
1287 // Now we can remove the CalleeEntry block, which is now empty.
1288 Caller->getBasicBlockList().erase(CalleeEntry);
Duncan Sands7c8fb1a2008-09-05 12:37:12 +00001289
Duncan Sands9d9a4e22010-11-17 11:16:23 +00001290 // If we inserted a phi node, check to see if it has a single value (e.g. all
1291 // the entries are the same or undef). If so, remove the PHI so it doesn't
1292 // block other optimizations.
Bill Wendlingce0c2292012-01-31 01:01:16 +00001293 if (PHI) {
Rafael Espindola7c68beb2014-02-18 15:33:12 +00001294 if (Value *V = SimplifyInstruction(PHI, IFI.DL)) {
Duncan Sands9d9a4e22010-11-17 11:16:23 +00001295 PHI->replaceAllUsesWith(V);
1296 PHI->eraseFromParent();
1297 }
Bill Wendlingce0c2292012-01-31 01:01:16 +00001298 }
Duncan Sands9d9a4e22010-11-17 11:16:23 +00001299
Chris Lattner530d4bf2003-05-29 15:11:31 +00001300 return true;
1301}