blob: 9ecaf102574a3e6250bd04ed2c031d21884689a6 [file] [log] [blame]
Philip Reames47cc6732015-02-04 00:37:33 +00001//===- PlaceSafepoints.cpp - Place GC Safepoints --------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Place garbage collection safepoints at appropriate locations in the IR. This
11// does not make relocation semantics or variable liveness explicit. That's
12// done by RewriteStatepointsForGC.
13//
Philip Reamesd4a912f2015-02-09 22:44:03 +000014// Terminology:
15// - A call is said to be "parseable" if there is a stack map generated for the
16// return PC of the call. A runtime can determine where values listed in the
17// deopt arguments and (after RewriteStatepointsForGC) gc arguments are located
18// on the stack when the code is suspended inside such a call. Every parse
NAKAMURA Takumifb3bd712015-05-25 01:43:23 +000019// point is represented by a call wrapped in an gc.statepoint intrinsic.
Philip Reamesd4a912f2015-02-09 22:44:03 +000020// - A "poll" is an explicit check in the generated code to determine if the
21// runtime needs the generated code to cooperate by calling a helper routine
22// and thus suspending its execution at a known state. The call to the helper
23// routine will be parseable. The (gc & runtime specific) logic of a poll is
24// assumed to be provided in a function of the name "gc.safepoint_poll".
25//
26// We aim to insert polls such that running code can quickly be brought to a
27// well defined state for inspection by the collector. In the current
28// implementation, this is done via the insertion of poll sites at method entry
29// and the backedge of most loops. We try to avoid inserting more polls than
30// are neccessary to ensure a finite period between poll sites. This is not
31// because the poll itself is expensive in the generated code; it's not. Polls
32// do tend to impact the optimizer itself in negative ways; we'd like to avoid
33// perturbing the optimization of the method as much as we can.
34//
35// We also need to make most call sites parseable. The callee might execute a
36// poll (or otherwise be inspected by the GC). If so, the entire stack
37// (including the suspended frame of the current method) must be parseable.
38//
Philip Reames47cc6732015-02-04 00:37:33 +000039// This pass will insert:
Philip Reamesd4a912f2015-02-09 22:44:03 +000040// - Call parse points ("call safepoints") for any call which may need to
41// reach a safepoint during the execution of the callee function.
42// - Backedge safepoint polls and entry safepoint polls to ensure that
43// executing code reaches a safepoint poll in a finite amount of time.
Philip Reames47cc6732015-02-04 00:37:33 +000044//
Philip Reamesd4a912f2015-02-09 22:44:03 +000045// We do not currently support return statepoints, but adding them would not
46// be hard. They are not required for correctness - entry safepoints are an
47// alternative - but some GCs may prefer them. Patches welcome.
Philip Reames47cc6732015-02-04 00:37:33 +000048//
49//===----------------------------------------------------------------------===//
50
51#include "llvm/Pass.h"
Chandler Carruth30d69c22015-02-13 10:01:29 +000052#include "llvm/IR/LegacyPassManager.h"
Philip Reames47cc6732015-02-04 00:37:33 +000053#include "llvm/ADT/SetOperations.h"
Philip Reames5708cca2015-05-12 20:43:48 +000054#include "llvm/ADT/SetVector.h"
Philip Reames47cc6732015-02-04 00:37:33 +000055#include "llvm/ADT/Statistic.h"
Swaroop Sridhar665bc9c2015-05-20 01:07:23 +000056#include "llvm/ADT/StringRef.h"
Philip Reames47cc6732015-02-04 00:37:33 +000057#include "llvm/Analysis/LoopPass.h"
58#include "llvm/Analysis/LoopInfo.h"
59#include "llvm/Analysis/ScalarEvolution.h"
60#include "llvm/Analysis/ScalarEvolutionExpressions.h"
61#include "llvm/Analysis/CFG.h"
62#include "llvm/Analysis/InstructionSimplify.h"
63#include "llvm/IR/BasicBlock.h"
64#include "llvm/IR/CallSite.h"
65#include "llvm/IR/Dominators.h"
66#include "llvm/IR/Function.h"
67#include "llvm/IR/IRBuilder.h"
68#include "llvm/IR/InstIterator.h"
69#include "llvm/IR/Instructions.h"
70#include "llvm/IR/Intrinsics.h"
71#include "llvm/IR/IntrinsicInst.h"
72#include "llvm/IR/Module.h"
73#include "llvm/IR/Statepoint.h"
74#include "llvm/IR/Value.h"
75#include "llvm/IR/Verifier.h"
76#include "llvm/Support/Debug.h"
77#include "llvm/Support/CommandLine.h"
78#include "llvm/Support/raw_ostream.h"
79#include "llvm/Transforms/Scalar.h"
80#include "llvm/Transforms/Utils/BasicBlockUtils.h"
81#include "llvm/Transforms/Utils/Cloning.h"
82#include "llvm/Transforms/Utils/Local.h"
83
84#define DEBUG_TYPE "safepoint-placement"
85STATISTIC(NumEntrySafepoints, "Number of entry safepoints inserted");
86STATISTIC(NumCallSafepoints, "Number of call safepoints inserted");
87STATISTIC(NumBackedgeSafepoints, "Number of backedge safepoints inserted");
88
89STATISTIC(CallInLoop, "Number of loops w/o safepoints due to calls in loop");
90STATISTIC(FiniteExecution, "Number of loops w/o safepoints finite execution");
91
92using namespace llvm;
93
94// Ignore oppurtunities to avoid placing safepoints on backedges, useful for
95// validation
Philip Reames1f3e5c12015-02-20 23:32:03 +000096static cl::opt<bool> AllBackedges("spp-all-backedges", cl::Hidden,
97 cl::init(false));
Philip Reames47cc6732015-02-04 00:37:33 +000098
99/// If true, do not place backedge safepoints in counted loops.
Philip Reames1f3e5c12015-02-20 23:32:03 +0000100static cl::opt<bool> SkipCounted("spp-counted", cl::Hidden, cl::init(true));
Philip Reames47cc6732015-02-04 00:37:33 +0000101
102// If true, split the backedge of a loop when placing the safepoint, otherwise
103// split the latch block itself. Both are useful to support for
104// experimentation, but in practice, it looks like splitting the backedge
105// optimizes better.
Philip Reames1f3e5c12015-02-20 23:32:03 +0000106static cl::opt<bool> SplitBackedge("spp-split-backedge", cl::Hidden,
107 cl::init(false));
Philip Reames47cc6732015-02-04 00:37:33 +0000108
109// Print tracing output
Philip Reames1f3e5c12015-02-20 23:32:03 +0000110static cl::opt<bool> TraceLSP("spp-trace", cl::Hidden, cl::init(false));
Philip Reames47cc6732015-02-04 00:37:33 +0000111
112namespace {
113
Philip Reames9f129042015-05-12 21:09:36 +0000114/// An analysis pass whose purpose is to identify each of the backedges in
115/// the function which require a safepoint poll to be inserted.
116struct PlaceBackedgeSafepointsImpl : public FunctionPass {
Philip Reames47cc6732015-02-04 00:37:33 +0000117 static char ID;
118
119 /// The output of the pass - gives a list of each backedge (described by
120 /// pointing at the branch) which need a poll inserted.
121 std::vector<TerminatorInst *> PollLocations;
122
123 /// True unless we're running spp-no-calls in which case we need to disable
124 /// the call dependend placement opts.
125 bool CallSafepointsEnabled;
Philip Reames9f129042015-05-12 21:09:36 +0000126
127 ScalarEvolution *SE = nullptr;
128 DominatorTree *DT = nullptr;
129 LoopInfo *LI = nullptr;
NAKAMURA Takumifb3bd712015-05-25 01:43:23 +0000130
Philip Reames47cc6732015-02-04 00:37:33 +0000131 PlaceBackedgeSafepointsImpl(bool CallSafepoints = false)
Philip Reames9f129042015-05-12 21:09:36 +0000132 : FunctionPass(ID), CallSafepointsEnabled(CallSafepoints) {
Philip Reames5a9685d2015-02-04 00:39:57 +0000133 initializePlaceBackedgeSafepointsImplPass(*PassRegistry::getPassRegistry());
Philip Reames47cc6732015-02-04 00:37:33 +0000134 }
135
Philip Reames9f129042015-05-12 21:09:36 +0000136 bool runOnLoop(Loop *);
137 void runOnLoopAndSubLoops(Loop *L) {
138 // Visit all the subloops
139 for (auto I = L->begin(), E = L->end(); I != E; I++)
140 runOnLoopAndSubLoops(*I);
141 runOnLoop(L);
142 }
Justin Bogner383749a2015-05-12 21:49:47 +0000143
144 bool runOnFunction(Function &F) override {
Philip Reames9f129042015-05-12 21:09:36 +0000145 SE = &getAnalysis<ScalarEvolution>();
146 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
147 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
148 for (auto I = LI->begin(), E = LI->end(); I != E; I++) {
149 runOnLoopAndSubLoops(*I);
150 }
151 return false;
152 }
NAKAMURA Takumifb3bd712015-05-25 01:43:23 +0000153
Philip Reames47cc6732015-02-04 00:37:33 +0000154 void getAnalysisUsage(AnalysisUsage &AU) const override {
Philip Reames57bdac92015-05-12 20:56:33 +0000155 AU.addRequired<DominatorTreeWrapperPass>();
Philip Reames47cc6732015-02-04 00:37:33 +0000156 AU.addRequired<ScalarEvolution>();
Philip Reames9f129042015-05-12 21:09:36 +0000157 AU.addRequired<LoopInfoWrapperPass>();
Philip Reames47cc6732015-02-04 00:37:33 +0000158 // We no longer modify the IR at all in this pass. Thus all
159 // analysis are preserved.
160 AU.setPreservesAll();
161 }
162};
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000163}
Philip Reames47cc6732015-02-04 00:37:33 +0000164
Philip Reames1f3e5c12015-02-20 23:32:03 +0000165static cl::opt<bool> NoEntry("spp-no-entry", cl::Hidden, cl::init(false));
166static cl::opt<bool> NoCall("spp-no-call", cl::Hidden, cl::init(false));
167static cl::opt<bool> NoBackedge("spp-no-backedge", cl::Hidden, cl::init(false));
Philip Reames47cc6732015-02-04 00:37:33 +0000168
169namespace {
Philip Reames7b981792015-05-12 21:21:18 +0000170struct PlaceSafepoints : public FunctionPass {
Philip Reames47cc6732015-02-04 00:37:33 +0000171 static char ID; // Pass identification, replacement for typeid
172
Philip Reames7b981792015-05-12 21:21:18 +0000173 PlaceSafepoints() : FunctionPass(ID) {
Philip Reames47cc6732015-02-04 00:37:33 +0000174 initializePlaceSafepointsPass(*PassRegistry::getPassRegistry());
Philip Reames47cc6732015-02-04 00:37:33 +0000175 }
Philip Reames7b981792015-05-12 21:21:18 +0000176 bool runOnFunction(Function &F) override;
Philip Reames47cc6732015-02-04 00:37:33 +0000177
178 void getAnalysisUsage(AnalysisUsage &AU) const override {
179 // We modify the graph wholesale (inlining, block insertion, etc). We
180 // preserve nothing at the moment. We could potentially preserve dom tree
181 // if that was worth doing
182 }
183};
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000184}
Philip Reames47cc6732015-02-04 00:37:33 +0000185
186// Insert a safepoint poll immediately before the given instruction. Does
187// not handle the parsability of state at the runtime call, that's the
188// callers job.
NAKAMURA Takumifb3bd712015-05-25 01:43:23 +0000189static void
Philip Reames388402452015-05-26 21:03:23 +0000190InsertSafepointPoll(Instruction *InsertBefore,
Philip Reames5a9685d2015-02-04 00:39:57 +0000191 std::vector<CallSite> &ParsePointsNeeded /*rval*/);
Philip Reames47cc6732015-02-04 00:37:33 +0000192
193static bool isGCLeafFunction(const CallSite &CS);
194
195static bool needsStatepoint(const CallSite &CS) {
196 if (isGCLeafFunction(CS))
197 return false;
198 if (CS.isCall()) {
199 CallInst *call = cast<CallInst>(CS.getInstruction());
200 if (call->isInlineAsm())
201 return false;
202 }
203 if (isStatepoint(CS) || isGCRelocate(CS) || isGCResult(CS)) {
204 return false;
205 }
206 return true;
207}
208
Philip Reames5a9685d2015-02-04 00:39:57 +0000209static Value *ReplaceWithStatepoint(const CallSite &CS, Pass *P);
Philip Reames47cc6732015-02-04 00:37:33 +0000210
211/// Returns true if this loop is known to contain a call safepoint which
212/// must unconditionally execute on any iteration of the loop which returns
213/// to the loop header via an edge from Pred. Returns a conservative correct
214/// answer; i.e. false is always valid.
215static bool containsUnconditionalCallSafepoint(Loop *L, BasicBlock *Header,
216 BasicBlock *Pred,
217 DominatorTree &DT) {
218 // In general, we're looking for any cut of the graph which ensures
219 // there's a call safepoint along every edge between Header and Pred.
220 // For the moment, we look only for the 'cuts' that consist of a single call
221 // instruction in a block which is dominated by the Header and dominates the
222 // loop latch (Pred) block. Somewhat surprisingly, walking the entire chain
223 // of such dominating blocks gets substaintially more occurences than just
224 // checking the Pred and Header blocks themselves. This may be due to the
225 // density of loop exit conditions caused by range and null checks.
226 // TODO: structure this as an analysis pass, cache the result for subloops,
227 // avoid dom tree recalculations
228 assert(DT.dominates(Header, Pred) && "loop latch not dominated by header?");
229
230 BasicBlock *Current = Pred;
231 while (true) {
232 for (Instruction &I : *Current) {
Benjamin Kramer3a09ef62015-04-10 14:50:08 +0000233 if (auto CS = CallSite(&I))
Philip Reames47cc6732015-02-04 00:37:33 +0000234 // Note: Technically, needing a safepoint isn't quite the right
235 // condition here. We should instead be checking if the target method
236 // has an
237 // unconditional poll. In practice, this is only a theoretical concern
238 // since we don't have any methods with conditional-only safepoint
239 // polls.
240 if (needsStatepoint(CS))
241 return true;
242 }
243
244 if (Current == Header)
245 break;
246 Current = DT.getNode(Current)->getIDom()->getBlock();
247 }
248
249 return false;
250}
251
252/// Returns true if this loop is known to terminate in a finite number of
253/// iterations. Note that this function may return false for a loop which
254/// does actual terminate in a finite constant number of iterations due to
255/// conservatism in the analysis.
256static bool mustBeFiniteCountedLoop(Loop *L, ScalarEvolution *SE,
Philip Reames5a9685d2015-02-04 00:39:57 +0000257 BasicBlock *Pred) {
Philip Reames47cc6732015-02-04 00:37:33 +0000258 // Only used when SkipCounted is off
259 const unsigned upperTripBound = 8192;
260
261 // A conservative bound on the loop as a whole.
262 const SCEV *MaxTrips = SE->getMaxBackedgeTakenCount(L);
263 if (MaxTrips != SE->getCouldNotCompute()) {
264 if (SE->getUnsignedRange(MaxTrips).getUnsignedMax().ult(upperTripBound))
265 return true;
266 if (SkipCounted &&
267 SE->getUnsignedRange(MaxTrips).getUnsignedMax().isIntN(32))
268 return true;
269 }
270
271 // If this is a conditional branch to the header with the alternate path
272 // being outside the loop, we can ask questions about the execution frequency
273 // of the exit block.
274 if (L->isLoopExiting(Pred)) {
275 // This returns an exact expression only. TODO: We really only need an
276 // upper bound here, but SE doesn't expose that.
277 const SCEV *MaxExec = SE->getExitCount(L, Pred);
278 if (MaxExec != SE->getCouldNotCompute()) {
279 if (SE->getUnsignedRange(MaxExec).getUnsignedMax().ult(upperTripBound))
280 return true;
281 if (SkipCounted &&
282 SE->getUnsignedRange(MaxExec).getUnsignedMax().isIntN(32))
283 return true;
284 }
285 }
286
287 return /* not finite */ false;
288}
289
290static void scanOneBB(Instruction *start, Instruction *end,
Philip Reames5a9685d2015-02-04 00:39:57 +0000291 std::vector<CallInst *> &calls,
292 std::set<BasicBlock *> &seen,
293 std::vector<BasicBlock *> &worklist) {
Philip Reames47cc6732015-02-04 00:37:33 +0000294 for (BasicBlock::iterator itr(start);
295 itr != start->getParent()->end() && itr != BasicBlock::iterator(end);
296 itr++) {
297 if (CallInst *CI = dyn_cast<CallInst>(&*itr)) {
298 calls.push_back(CI);
299 }
300 // FIXME: This code does not handle invokes
301 assert(!dyn_cast<InvokeInst>(&*itr) &&
302 "support for invokes in poll code needed");
303 // Only add the successor blocks if we reach the terminator instruction
304 // without encountering end first
305 if (itr->isTerminator()) {
306 BasicBlock *BB = itr->getParent();
Philip Reamesa29de872015-02-09 22:26:11 +0000307 for (BasicBlock *Succ : successors(BB)) {
Philip Reames47cc6732015-02-04 00:37:33 +0000308 if (seen.count(Succ) == 0) {
309 worklist.push_back(Succ);
310 seen.insert(Succ);
311 }
312 }
313 }
314 }
315}
316static void scanInlinedCode(Instruction *start, Instruction *end,
Philip Reames5a9685d2015-02-04 00:39:57 +0000317 std::vector<CallInst *> &calls,
318 std::set<BasicBlock *> &seen) {
Philip Reames47cc6732015-02-04 00:37:33 +0000319 calls.clear();
320 std::vector<BasicBlock *> worklist;
321 seen.insert(start->getParent());
322 scanOneBB(start, end, calls, seen, worklist);
323 while (!worklist.empty()) {
324 BasicBlock *BB = worklist.back();
325 worklist.pop_back();
326 scanOneBB(&*BB->begin(), end, calls, seen, worklist);
327 }
328}
329
Philip Reames9f129042015-05-12 21:09:36 +0000330bool PlaceBackedgeSafepointsImpl::runOnLoop(Loop *L) {
Philip Reames5708cca2015-05-12 20:43:48 +0000331 // Loop through all loop latches (branches controlling backedges). We need
NAKAMURA Takumifb3bd712015-05-25 01:43:23 +0000332 // to place a safepoint on every backedge (potentially).
Philip Reames5708cca2015-05-12 20:43:48 +0000333 // Note: In common usage, there will be only one edge due to LoopSimplify
334 // having run sometime earlier in the pipeline, but this code must be correct
335 // w.r.t. loops with multiple backedges.
Philip Reames47cc6732015-02-04 00:37:33 +0000336 BasicBlock *header = L->getHeader();
Philip Reames5708cca2015-05-12 20:43:48 +0000337 SmallVector<BasicBlock*, 16> LoopLatches;
338 L->getLoopLatches(LoopLatches);
339 for (BasicBlock *pred : LoopLatches) {
340 assert(L->contains(pred));
Philip Reames47cc6732015-02-04 00:37:33 +0000341
342 // Make a policy decision about whether this loop needs a safepoint or
343 // not. Note that this is about unburdening the optimizer in loops, not
344 // avoiding the runtime cost of the actual safepoint.
345 if (!AllBackedges) {
346 if (mustBeFiniteCountedLoop(L, SE, pred)) {
347 if (TraceLSP)
348 errs() << "skipping safepoint placement in finite loop\n";
349 FiniteExecution++;
350 continue;
351 }
352 if (CallSafepointsEnabled &&
Philip Reames57bdac92015-05-12 20:56:33 +0000353 containsUnconditionalCallSafepoint(L, header, pred, *DT)) {
Philip Reames47cc6732015-02-04 00:37:33 +0000354 // Note: This is only semantically legal since we won't do any further
355 // IPO or inlining before the actual call insertion.. If we hadn't, we
356 // might latter loose this call safepoint.
357 if (TraceLSP)
358 errs() << "skipping safepoint placement due to unconditional call\n";
359 CallInLoop++;
360 continue;
361 }
362 }
363
364 // TODO: We can create an inner loop which runs a finite number of
365 // iterations with an outer loop which contains a safepoint. This would
366 // not help runtime performance that much, but it might help our ability to
367 // optimize the inner loop.
368
Philip Reames47cc6732015-02-04 00:37:33 +0000369 // Safepoint insertion would involve creating a new basic block (as the
370 // target of the current backedge) which does the safepoint (of all live
371 // variables) and branches to the true header
372 TerminatorInst *term = pred->getTerminator();
373
374 if (TraceLSP) {
375 errs() << "[LSP] terminator instruction: ";
376 term->dump();
377 }
378
379 PollLocations.push_back(term);
380 }
381
Philip Reames5708cca2015-05-12 20:43:48 +0000382 return false;
Philip Reames47cc6732015-02-04 00:37:33 +0000383}
384
Philip Reamesd97cdf22015-05-19 23:40:11 +0000385/// Returns true if an entry safepoint is not required before this callsite in
NAKAMURA Takumifb3bd712015-05-25 01:43:23 +0000386/// the caller function.
Philip Reamesd97cdf22015-05-19 23:40:11 +0000387static bool doesNotRequireEntrySafepointBefore(const CallSite &CS) {
388 Instruction *Inst = CS.getInstruction();
389 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
390 switch (II->getIntrinsicID()) {
391 case Intrinsic::experimental_gc_statepoint:
392 case Intrinsic::experimental_patchpoint_void:
393 case Intrinsic::experimental_patchpoint_i64:
394 // The can wrap an actual call which may grow the stack by an unbounded
395 // amount or run forever.
396 return false;
397 default:
398 // Most LLVM intrinsics are things which do not expand to actual calls, or
399 // at least if they do, are leaf functions that cause only finite stack
400 // growth. In particular, the optimizer likes to form things like memsets
401 // out of stores in the original IR. Another important example is
402 // llvm.frameescape which must occur in the entry block. Inserting a
403 // safepoint before it is not legal since it could push the frameescape
404 // out of the entry block.
405 return true;
406 }
407 }
408 return false;
409}
410
Philip Reames47cc6732015-02-04 00:37:33 +0000411static Instruction *findLocationForEntrySafepoint(Function &F,
412 DominatorTree &DT) {
413
414 // Conceptually, this poll needs to be on method entry, but in
415 // practice, we place it as late in the entry block as possible. We
416 // can place it as late as we want as long as it dominates all calls
417 // that can grow the stack. This, combined with backedge polls,
418 // give us all the progress guarantees we need.
419
Philip Reames47cc6732015-02-04 00:37:33 +0000420 // hasNextInstruction and nextInstruction are used to iterate
421 // through a "straight line" execution sequence.
422
423 auto hasNextInstruction = [](Instruction *I) {
424 if (!I->isTerminator()) {
425 return true;
426 }
427 BasicBlock *nextBB = I->getParent()->getUniqueSuccessor();
428 return nextBB && (nextBB->getUniquePredecessor() != nullptr);
429 };
430
431 auto nextInstruction = [&hasNextInstruction](Instruction *I) {
432 assert(hasNextInstruction(I) &&
433 "first check if there is a next instruction!");
434 if (I->isTerminator()) {
435 return I->getParent()->getUniqueSuccessor()->begin();
436 } else {
437 return std::next(BasicBlock::iterator(I));
438 }
439 };
440
441 Instruction *cursor = nullptr;
442 for (cursor = F.getEntryBlock().begin(); hasNextInstruction(cursor);
443 cursor = nextInstruction(cursor)) {
444
Philip Reamesd97cdf22015-05-19 23:40:11 +0000445 // We need to ensure a safepoint poll occurs before any 'real' call. The
446 // easiest way to ensure finite execution between safepoints in the face of
447 // recursive and mutually recursive functions is to enforce that each take
448 // a safepoint. Additionally, we need to ensure a poll before any call
449 // which can grow the stack by an unbounded amount. This isn't required
450 // for GC semantics per se, but is a common requirement for languages
451 // which detect stack overflow via guard pages and then throw exceptions.
452 if (auto CS = CallSite(cursor)) {
453 if (doesNotRequireEntrySafepointBefore(CS))
454 continue;
Philip Reames47cc6732015-02-04 00:37:33 +0000455 break;
456 }
457 }
458
Philip Reames5a9685d2015-02-04 00:39:57 +0000459 assert((hasNextInstruction(cursor) || cursor->isTerminator()) &&
460 "either we stopped because of a call, or because of terminator");
Philip Reames47cc6732015-02-04 00:37:33 +0000461
Philip Reames52e7a592015-05-26 21:16:42 +0000462 return cursor;
Philip Reames47cc6732015-02-04 00:37:33 +0000463}
464
465/// Identify the list of call sites which need to be have parseable state
466static void findCallSafepoints(Function &F,
467 std::vector<CallSite> &Found /*rval*/) {
468 assert(Found.empty() && "must be empty!");
Philip Reamesa29de872015-02-09 22:26:11 +0000469 for (Instruction &I : inst_range(F)) {
470 Instruction *inst = &I;
Philip Reames47cc6732015-02-04 00:37:33 +0000471 if (isa<CallInst>(inst) || isa<InvokeInst>(inst)) {
472 CallSite CS(inst);
473
474 // No safepoint needed or wanted
475 if (!needsStatepoint(CS)) {
476 continue;
477 }
478
479 Found.push_back(CS);
480 }
481 }
482}
483
484/// Implement a unique function which doesn't require we sort the input
485/// vector. Doing so has the effect of changing the output of a couple of
486/// tests in ways which make them less useful in testing fused safepoints.
487template <typename T> static void unique_unsorted(std::vector<T> &vec) {
488 std::set<T> seen;
489 std::vector<T> tmp;
490 vec.reserve(vec.size());
491 std::swap(tmp, vec);
492 for (auto V : tmp) {
493 if (seen.insert(V).second) {
494 vec.push_back(V);
495 }
496 }
497}
498
Benjamin Kramer82f86522015-06-07 16:36:28 +0000499static const char *const GCSafepointPollName = "gc.safepoint_poll";
Philip Reamesb1ed02f2015-02-09 21:48:05 +0000500
501static bool isGCSafepointPoll(Function &F) {
502 return F.getName().equals(GCSafepointPollName);
503}
504
Philip Reames0b1b3872015-02-21 00:09:09 +0000505/// Returns true if this function should be rewritten to include safepoint
506/// polls and parseable call sites. The main point of this function is to be
NAKAMURA Takumifb3bd712015-05-25 01:43:23 +0000507/// an extension point for custom logic.
Philip Reames0b1b3872015-02-21 00:09:09 +0000508static bool shouldRewriteFunction(Function &F) {
509 // TODO: This should check the GCStrategy
510 if (F.hasGC()) {
NAKAMURA Takumifb3bd712015-05-25 01:43:23 +0000511 const char *FunctionGCName = F.getGC();
512 const StringRef StatepointExampleName("statepoint-example");
513 const StringRef CoreCLRName("coreclr");
514 return (StatepointExampleName == FunctionGCName) ||
NAKAMURA Takumi5582a6a2015-05-25 01:43:34 +0000515 (CoreCLRName == FunctionGCName);
Philip Reames0b1b3872015-02-21 00:09:09 +0000516 } else
517 return false;
518}
519
520// TODO: These should become properties of the GCStrategy, possibly with
521// command line overrides.
522static bool enableEntrySafepoints(Function &F) { return !NoEntry; }
523static bool enableBackedgeSafepoints(Function &F) { return !NoBackedge; }
524static bool enableCallSafepoints(Function &F) { return !NoCall; }
525
Igor Laevsky5e23e162015-05-08 11:59:09 +0000526// Normalize basic block to make it ready to be target of invoke statepoint.
527// Ensure that 'BB' does not have phi nodes. It may require spliting it.
528static BasicBlock *normalizeForInvokeSafepoint(BasicBlock *BB,
529 BasicBlock *InvokeParent) {
530 BasicBlock *ret = BB;
531
532 if (!BB->getUniquePredecessor()) {
533 ret = SplitBlockPredecessors(BB, InvokeParent, "");
534 }
535
536 // Now that 'ret' has unique predecessor we can safely remove all phi nodes
537 // from it
538 FoldSingleEntryPHINodes(ret);
539 assert(!isa<PHINode>(ret->begin()));
540
541 return ret;
542}
Philip Reames0b1b3872015-02-21 00:09:09 +0000543
Philip Reames47cc6732015-02-04 00:37:33 +0000544bool PlaceSafepoints::runOnFunction(Function &F) {
545 if (F.isDeclaration() || F.empty()) {
546 // This is a declaration, nothing to do. Must exit early to avoid crash in
547 // dom tree calculation
548 return false;
549 }
550
Philip Reames7e7dc3e2015-02-10 00:04:53 +0000551 if (isGCSafepointPoll(F)) {
552 // Given we're inlining this inside of safepoint poll insertion, this
553 // doesn't make any sense. Note that we do make any contained calls
NAKAMURA Takumifb3bd712015-05-25 01:43:23 +0000554 // parseable after we inline a poll.
Philip Reames7e7dc3e2015-02-10 00:04:53 +0000555 return false;
556 }
557
Philip Reames0b1b3872015-02-21 00:09:09 +0000558 if (!shouldRewriteFunction(F))
559 return false;
560
Philip Reames47cc6732015-02-04 00:37:33 +0000561 bool modified = false;
562
563 // In various bits below, we rely on the fact that uses are reachable from
564 // defs. When there are basic blocks unreachable from the entry, dominance
565 // and reachablity queries return non-sensical results. Thus, we preprocess
566 // the function to ensure these properties hold.
567 modified |= removeUnreachableBlocks(F);
568
569 // STEP 1 - Insert the safepoint polling locations. We do not need to
570 // actually insert parse points yet. That will be done for all polls and
571 // calls in a single pass.
572
Philip Reames47cc6732015-02-04 00:37:33 +0000573 DominatorTree DT;
Philip Reames4d1a3ef2015-05-13 00:32:23 +0000574 DT.recalculate(F);
Philip Reames47cc6732015-02-04 00:37:33 +0000575
Philip Reames4d1a3ef2015-05-13 00:32:23 +0000576 SmallVector<Instruction *, 16> PollsNeeded;
Philip Reames47cc6732015-02-04 00:37:33 +0000577 std::vector<CallSite> ParsePointNeeded;
578
Philip Reames0b1b3872015-02-21 00:09:09 +0000579 if (enableBackedgeSafepoints(F)) {
Philip Reames47cc6732015-02-04 00:37:33 +0000580 // Construct a pass manager to run the LoopPass backedge logic. We
581 // need the pass manager to handle scheduling all the loop passes
582 // appropriately. Doing this by hand is painful and just not worth messing
583 // with for the moment.
Chandler Carruth30d69c22015-02-13 10:01:29 +0000584 legacy::FunctionPassManager FPM(F.getParent());
Philip Reames0b1b3872015-02-21 00:09:09 +0000585 bool CanAssumeCallSafepoints = enableCallSafepoints(F);
Philip Reames47cc6732015-02-04 00:37:33 +0000586 PlaceBackedgeSafepointsImpl *PBS =
Philip Reamesb1ed02f2015-02-09 21:48:05 +0000587 new PlaceBackedgeSafepointsImpl(CanAssumeCallSafepoints);
Philip Reames47cc6732015-02-04 00:37:33 +0000588 FPM.add(PBS);
Philip Reames47cc6732015-02-04 00:37:33 +0000589 FPM.run(F);
590
591 // We preserve dominance information when inserting the poll, otherwise
592 // we'd have to recalculate this on every insert
593 DT.recalculate(F);
594
Philip Reames5708cca2015-05-12 20:43:48 +0000595 auto &PollLocations = PBS->PollLocations;
596
597 auto OrderByBBName = [](Instruction *a, Instruction *b) {
598 return a->getParent()->getName() < b->getParent()->getName();
599 };
600 // We need the order of list to be stable so that naming ends up stable
601 // when we split edges. This makes test cases much easier to write.
602 std::sort(PollLocations.begin(), PollLocations.end(), OrderByBBName);
603
604 // We can sometimes end up with duplicate poll locations. This happens if
605 // a single loop is visited more than once. The fact this happens seems
606 // wrong, but it does happen for the split-backedge.ll test case.
607 PollLocations.erase(std::unique(PollLocations.begin(),
608 PollLocations.end()),
609 PollLocations.end());
610
Philip Reames47cc6732015-02-04 00:37:33 +0000611 // Insert a poll at each point the analysis pass identified
Philip Reames89fe5702015-05-12 23:39:23 +0000612 // The poll location must be the terminator of a loop latch block.
613 for (TerminatorInst *Term : PollLocations) {
Philip Reames47cc6732015-02-04 00:37:33 +0000614 // We are inserting a poll, the function is modified
615 modified = true;
NAKAMURA Takumifb3bd712015-05-25 01:43:23 +0000616
Philip Reames47cc6732015-02-04 00:37:33 +0000617 if (SplitBackedge) {
618 // Split the backedge of the loop and insert the poll within that new
619 // basic block. This creates a loop with two latches per original
620 // latch (which is non-ideal), but this appears to be easier to
621 // optimize in practice than inserting the poll immediately before the
622 // latch test.
623
624 // Since this is a latch, at least one of the successors must dominate
625 // it. Its possible that we have a) duplicate edges to the same header
626 // and b) edges to distinct loop headers. We need to insert pools on
Philip Reames5708cca2015-05-12 20:43:48 +0000627 // each.
628 SetVector<BasicBlock *> Headers;
Philip Reames47cc6732015-02-04 00:37:33 +0000629 for (unsigned i = 0; i < Term->getNumSuccessors(); i++) {
630 BasicBlock *Succ = Term->getSuccessor(i);
631 if (DT.dominates(Succ, Term->getParent())) {
632 Headers.insert(Succ);
633 }
634 }
635 assert(!Headers.empty() && "poll location is not a loop latch?");
636
637 // The split loop structure here is so that we only need to recalculate
638 // the dominator tree once. Alternatively, we could just keep it up to
639 // date and use a more natural merged loop.
Philip Reames5708cca2015-05-12 20:43:48 +0000640 SetVector<BasicBlock *> SplitBackedges;
Philip Reames47cc6732015-02-04 00:37:33 +0000641 for (BasicBlock *Header : Headers) {
Philip Reames89fe5702015-05-12 23:39:23 +0000642 BasicBlock *NewBB = SplitEdge(Term->getParent(), Header, &DT);
Philip Reames4d1a3ef2015-05-13 00:32:23 +0000643 PollsNeeded.push_back(NewBB->getTerminator());
Philip Reames47cc6732015-02-04 00:37:33 +0000644 NumBackedgeSafepoints++;
645 }
Philip Reames47cc6732015-02-04 00:37:33 +0000646 } else {
647 // Split the latch block itself, right before the terminator.
Philip Reames4d1a3ef2015-05-13 00:32:23 +0000648 PollsNeeded.push_back(Term);
Philip Reames47cc6732015-02-04 00:37:33 +0000649 NumBackedgeSafepoints++;
650 }
Philip Reames47cc6732015-02-04 00:37:33 +0000651 }
652 }
653
Philip Reames0b1b3872015-02-21 00:09:09 +0000654 if (enableEntrySafepoints(F)) {
Philip Reames4d1a3ef2015-05-13 00:32:23 +0000655 Instruction *Location = findLocationForEntrySafepoint(F, DT);
656 if (!Location) {
Philip Reames47cc6732015-02-04 00:37:33 +0000657 // policy choice not to insert?
658 } else {
Philip Reames4d1a3ef2015-05-13 00:32:23 +0000659 PollsNeeded.push_back(Location);
Philip Reames47cc6732015-02-04 00:37:33 +0000660 modified = true;
661 NumEntrySafepoints++;
Philip Reames47cc6732015-02-04 00:37:33 +0000662 }
663 }
664
Philip Reames4d1a3ef2015-05-13 00:32:23 +0000665 // Now that we've identified all the needed safepoint poll locations, insert
666 // safepoint polls themselves.
667 for (Instruction *PollLocation : PollsNeeded) {
668 std::vector<CallSite> RuntimeCalls;
669 InsertSafepointPoll(PollLocation, RuntimeCalls);
670 ParsePointNeeded.insert(ParsePointNeeded.end(), RuntimeCalls.begin(),
671 RuntimeCalls.end());
672 }
673 PollsNeeded.clear(); // make sure we don't accidentally use
674 // The dominator tree has been invalidated by the inlining performed in the
675 // above loop. TODO: Teach the inliner how to update the dom tree?
676 DT.recalculate(F);
NAKAMURA Takumifb3bd712015-05-25 01:43:23 +0000677
Philip Reames0b1b3872015-02-21 00:09:09 +0000678 if (enableCallSafepoints(F)) {
Philip Reames47cc6732015-02-04 00:37:33 +0000679 std::vector<CallSite> Calls;
680 findCallSafepoints(F, Calls);
681 NumCallSafepoints += Calls.size();
Philip Reames5a9685d2015-02-04 00:39:57 +0000682 ParsePointNeeded.insert(ParsePointNeeded.end(), Calls.begin(), Calls.end());
Philip Reames47cc6732015-02-04 00:37:33 +0000683 }
684
685 // Unique the vectors since we can end up with duplicates if we scan the call
686 // site for call safepoints after we add it for entry or backedge. The
687 // only reason we need tracking at all is that some functions might have
688 // polls but not call safepoints and thus we might miss marking the runtime
689 // calls for the polls. (This is useful in test cases!)
690 unique_unsorted(ParsePointNeeded);
691
692 // Any parse point (no matter what source) will be handled here
Philip Reames47cc6732015-02-04 00:37:33 +0000693
694 // We're about to start modifying the function
695 if (!ParsePointNeeded.empty())
696 modified = true;
697
698 // Now run through and insert the safepoints, but do _NOT_ update or remove
699 // any existing uses. We have references to live variables that need to
700 // survive to the last iteration of this loop.
701 std::vector<Value *> Results;
702 Results.reserve(ParsePointNeeded.size());
703 for (size_t i = 0; i < ParsePointNeeded.size(); i++) {
704 CallSite &CS = ParsePointNeeded[i];
Igor Laevsky5e23e162015-05-08 11:59:09 +0000705
706 // For invoke statepoints we need to remove all phi nodes at the normal
707 // destination block.
708 // Reason for this is that we can place gc_result only after last phi node
709 // in basic block. We will get malformed code after RAUW for the
710 // gc_result if one of this phi nodes uses result from the invoke.
711 if (InvokeInst *Invoke = dyn_cast<InvokeInst>(CS.getInstruction())) {
712 normalizeForInvokeSafepoint(Invoke->getNormalDest(),
713 Invoke->getParent());
714 }
715
Philip Reames47cc6732015-02-04 00:37:33 +0000716 Value *GCResult = ReplaceWithStatepoint(CS, nullptr);
717 Results.push_back(GCResult);
718 }
719 assert(Results.size() == ParsePointNeeded.size());
720
721 // Adjust all users of the old call sites to use the new ones instead
722 for (size_t i = 0; i < ParsePointNeeded.size(); i++) {
723 CallSite &CS = ParsePointNeeded[i];
724 Value *GCResult = Results[i];
725 if (GCResult) {
Chen Li74ca2a82015-05-18 19:02:25 +0000726 // Can not RAUW for the invoke gc result in case of phi nodes preset.
727 assert(CS.isCall() || !isa<PHINode>(cast<Instruction>(GCResult)->getParent()->begin()));
Philip Reames47cc6732015-02-04 00:37:33 +0000728
729 // Replace all uses with the new call
730 CS.getInstruction()->replaceAllUsesWith(GCResult);
731 }
732
733 // Now that we've handled all uses, remove the original call itself
734 // Note: The insert point can't be the deleted instruction!
735 CS.getInstruction()->eraseFromParent();
736 }
737 return modified;
738}
739
740char PlaceBackedgeSafepointsImpl::ID = 0;
741char PlaceSafepoints::ID = 0;
742
Philip Reames7b981792015-05-12 21:21:18 +0000743FunctionPass *llvm::createPlaceSafepointsPass() {
744 return new PlaceSafepoints();
745}
Philip Reames47cc6732015-02-04 00:37:33 +0000746
747INITIALIZE_PASS_BEGIN(PlaceBackedgeSafepointsImpl,
748 "place-backedge-safepoints-impl",
749 "Place Backedge Safepoints", false, false)
750INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
Philip Reames57bdac92015-05-12 20:56:33 +0000751INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
Philip Reames9f129042015-05-12 21:09:36 +0000752INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
Philip Reames47cc6732015-02-04 00:37:33 +0000753INITIALIZE_PASS_END(PlaceBackedgeSafepointsImpl,
754 "place-backedge-safepoints-impl",
755 "Place Backedge Safepoints", false, false)
756
757INITIALIZE_PASS_BEGIN(PlaceSafepoints, "place-safepoints", "Place Safepoints",
758 false, false)
759INITIALIZE_PASS_END(PlaceSafepoints, "place-safepoints", "Place Safepoints",
760 false, false)
761
762static bool isGCLeafFunction(const CallSite &CS) {
763 Instruction *inst = CS.getInstruction();
Aaron Ballman94d4d332015-02-05 13:52:42 +0000764 if (isa<IntrinsicInst>(inst)) {
Aaron Ballman1b072b32015-02-05 13:40:04 +0000765 // Most LLVM intrinsics are things which can never take a safepoint.
766 // As a result, we don't need to have the stack parsable at the
767 // callsite. This is a highly useful optimization since intrinsic
768 // calls are fairly prevelent, particularly in debug builds.
769 return true;
Philip Reames47cc6732015-02-04 00:37:33 +0000770 }
771
772 // If this function is marked explicitly as a leaf call, we don't need to
773 // place a safepoint of it. In fact, for correctness we *can't* in many
774 // cases. Note: Indirect calls return Null for the called function,
775 // these obviously aren't runtime functions with attributes
776 // TODO: Support attributes on the call site as well.
777 const Function *F = CS.getCalledFunction();
778 bool isLeaf =
779 F &&
780 F->getFnAttribute("gc-leaf-function").getValueAsString().equals("true");
781 if (isLeaf) {
782 return true;
783 }
784 return false;
785}
786
Philip Reames5a9685d2015-02-04 00:39:57 +0000787static void
Philip Reames388402452015-05-26 21:03:23 +0000788InsertSafepointPoll(Instruction *InsertBefore,
Philip Reames5a9685d2015-02-04 00:39:57 +0000789 std::vector<CallSite> &ParsePointsNeeded /*rval*/) {
Philip Reames388402452015-05-26 21:03:23 +0000790 BasicBlock *OrigBB = InsertBefore->getParent();
791 Module *M = InsertBefore->getModule();
792 assert(M && "must be part of a module");
Philip Reames47cc6732015-02-04 00:37:33 +0000793
794 // Inline the safepoint poll implementation - this will get all the branch,
795 // control flow, etc.. Most importantly, it will introduce the actual slow
796 // path call - where we need to insert a safepoint (parsepoint).
Philip Reames388402452015-05-26 21:03:23 +0000797
798 auto *F = M->getFunction(GCSafepointPollName);
799 assert(F->getType()->getElementType() ==
800 FunctionType::get(Type::getVoidTy(M->getContext()), false) &&
801 "gc.safepoint_poll declared with wrong type");
Ramkumar Ramachandra3edf74f2015-02-09 23:02:10 +0000802 assert(!F->empty() && "gc.safepoint_poll must be a non-empty function");
Philip Reames388402452015-05-26 21:03:23 +0000803 CallInst *PollCall = CallInst::Create(F, "", InsertBefore);
Philip Reames47cc6732015-02-04 00:37:33 +0000804
805 // Record some information about the call site we're replacing
Philip Reames388402452015-05-26 21:03:23 +0000806 BasicBlock::iterator before(PollCall), after(PollCall);
Philip Reames47cc6732015-02-04 00:37:33 +0000807 bool isBegin(false);
Philip Reames388402452015-05-26 21:03:23 +0000808 if (before == OrigBB->begin()) {
Philip Reames47cc6732015-02-04 00:37:33 +0000809 isBegin = true;
810 } else {
811 before--;
812 }
813 after++;
Philip Reames388402452015-05-26 21:03:23 +0000814 assert(after != OrigBB->end() && "must have successor");
Philip Reames47cc6732015-02-04 00:37:33 +0000815
816 // do the actual inlining
817 InlineFunctionInfo IFI;
Philip Reames388402452015-05-26 21:03:23 +0000818 bool InlineStatus = InlineFunction(PollCall, IFI);
819 assert(InlineStatus && "inline must succeed");
820 (void)InlineStatus; // suppress warning in release-asserts
Philip Reames47cc6732015-02-04 00:37:33 +0000821
822 // Check post conditions
823 assert(IFI.StaticAllocas.empty() && "can't have allocs");
824
825 std::vector<CallInst *> calls; // new calls
826 std::set<BasicBlock *> BBs; // new BBs + insertee
827 // Include only the newly inserted instructions, Note: begin may not be valid
828 // if we inserted to the beginning of the basic block
829 BasicBlock::iterator start;
830 if (isBegin) {
831 start = OrigBB->begin();
832 } else {
833 start = before;
834 start++;
835 }
836
837 // If your poll function includes an unreachable at the end, that's not
838 // valid. Bugpoint likes to create this, so check for it.
839 assert(isPotentiallyReachable(&*start, &*after, nullptr, nullptr) &&
840 "malformed poll function");
841
842 scanInlinedCode(&*(start), &*(after), calls, BBs);
Philip Reames47cc6732015-02-04 00:37:33 +0000843 assert(!calls.empty() && "slow path not found for safepoint poll");
844
845 // Record the fact we need a parsable state at the runtime call contained in
846 // the poll function. This is required so that the runtime knows how to
847 // parse the last frame when we actually take the safepoint (i.e. execute
848 // the slow path)
849 assert(ParsePointsNeeded.empty());
850 for (size_t i = 0; i < calls.size(); i++) {
851
852 // No safepoint needed or wanted
853 if (!needsStatepoint(calls[i])) {
854 continue;
855 }
856
857 // These are likely runtime calls. Should we assert that via calling
858 // convention or something?
859 ParsePointsNeeded.push_back(CallSite(calls[i]));
860 }
861 assert(ParsePointsNeeded.size() <= calls.size());
862}
863
Philip Reames47cc6732015-02-04 00:37:33 +0000864/// Replaces the given call site (Call or Invoke) with a gc.statepoint
865/// intrinsic with an empty deoptimization arguments list. This does
866/// NOT do explicit relocation for GC support.
867static Value *ReplaceWithStatepoint(const CallSite &CS, /* to replace */
868 Pass *P) {
NAKAMURA Takumi2a5bd542015-05-07 10:18:46 +0000869 assert(CS.getInstruction()->getParent()->getParent()->getParent() &&
870 "must be set");
Philip Reames47cc6732015-02-04 00:37:33 +0000871
872 // TODO: technically, a pass is not allowed to get functions from within a
873 // function pass since it might trigger a new function addition. Refactor
874 // this logic out to the initialization of the pass. Doesn't appear to
875 // matter in practice.
876
Philip Reames47cc6732015-02-04 00:37:33 +0000877 // Then go ahead and use the builder do actually do the inserts. We insert
878 // immediately before the previous instruction under the assumption that all
879 // arguments will be available here. We can't insert afterwards since we may
880 // be replacing a terminator.
Sanjoy Das93abd812015-05-06 23:53:19 +0000881 IRBuilder<> Builder(CS.getInstruction());
Philip Reamesb1ed02f2015-02-09 21:48:05 +0000882
883 // Note: The gc args are not filled in at this time, that's handled by
884 // RewriteStatepointsForGC (which is currently under review).
885
Philip Reames47cc6732015-02-04 00:37:33 +0000886 // Create the statepoint given all the arguments
Sanjoy Das93abd812015-05-06 23:53:19 +0000887 Instruction *Token = nullptr;
Sanjoy Dasba74e642015-05-13 20:11:31 +0000888
889 uint64_t ID;
890 uint32_t NumPatchBytes;
891
892 AttributeSet OriginalAttrs = CS.getAttributes();
893 Attribute AttrID =
894 OriginalAttrs.getAttribute(AttributeSet::FunctionIndex, "statepoint-id");
895 Attribute AttrNumPatchBytes = OriginalAttrs.getAttribute(
896 AttributeSet::FunctionIndex, "statepoint-num-patch-bytes");
897
898 AttrBuilder AttrsToRemove;
899 bool HasID = AttrID.isStringAttribute() &&
900 !AttrID.getValueAsString().getAsInteger(10, ID);
901
902 if (HasID)
903 AttrsToRemove.addAttribute("statepoint-id");
904 else
905 ID = 0xABCDEF00;
906
907 bool HasNumPatchBytes =
908 AttrNumPatchBytes.isStringAttribute() &&
909 !AttrNumPatchBytes.getValueAsString().getAsInteger(10, NumPatchBytes);
910
911 if (HasNumPatchBytes)
912 AttrsToRemove.addAttribute("statepoint-num-patch-bytes");
913 else
914 NumPatchBytes = 0;
915
916 OriginalAttrs = OriginalAttrs.removeAttributes(
917 CS.getInstruction()->getContext(), AttributeSet::FunctionIndex,
918 AttrsToRemove);
919
920 Value *StatepointTarget = NumPatchBytes == 0
921 ? CS.getCalledValue()
922 : ConstantPointerNull::get(cast<PointerType>(
923 CS.getCalledValue()->getType()));
Sanjoy Dasabf15602015-05-06 23:53:21 +0000924
Philip Reames47cc6732015-02-04 00:37:33 +0000925 if (CS.isCall()) {
Sanjoy Das93abd812015-05-06 23:53:19 +0000926 CallInst *ToReplace = cast<CallInst>(CS.getInstruction());
Sanjoy Dasabe1c682015-05-06 23:53:09 +0000927 CallInst *Call = Builder.CreateGCStatepointCall(
Sanjoy Dasba74e642015-05-13 20:11:31 +0000928 ID, NumPatchBytes, StatepointTarget,
929 makeArrayRef(CS.arg_begin(), CS.arg_end()), None, None,
930 "safepoint_token");
Sanjoy Das93abd812015-05-06 23:53:19 +0000931 Call->setTailCall(ToReplace->isTailCall());
932 Call->setCallingConv(ToReplace->getCallingConv());
Philip Reames47cc6732015-02-04 00:37:33 +0000933
Sanjoy Dasabf15602015-05-06 23:53:21 +0000934 // In case if we can handle this set of attributes - set up function
935 // attributes directly on statepoint and return attributes later for
936 // gc_result intrinsic.
937 Call->setAttributes(OriginalAttrs.getFnAttributes());
Philip Reames47cc6732015-02-04 00:37:33 +0000938
Sanjoy Das93abd812015-05-06 23:53:19 +0000939 Token = Call;
Philip Reames47cc6732015-02-04 00:37:33 +0000940
941 // Put the following gc_result and gc_relocate calls immediately after the
Sanjoy Dasabf15602015-05-06 23:53:21 +0000942 // the old call (which we're about to delete).
943 assert(ToReplace->getNextNode() && "not a terminator, must have next");
944 Builder.SetInsertPoint(ToReplace->getNextNode());
945 Builder.SetCurrentDebugLocation(ToReplace->getNextNode()->getDebugLoc());
Philip Reames47cc6732015-02-04 00:37:33 +0000946 } else if (CS.isInvoke()) {
Sanjoy Das93abd812015-05-06 23:53:19 +0000947 InvokeInst *ToReplace = cast<InvokeInst>(CS.getInstruction());
Philip Reames47cc6732015-02-04 00:37:33 +0000948
949 // Insert the new invoke into the old block. We'll remove the old one in a
950 // moment at which point this will become the new terminator for the
951 // original block.
Sanjoy Das93abd812015-05-06 23:53:19 +0000952 Builder.SetInsertPoint(ToReplace->getParent());
953 InvokeInst *Invoke = Builder.CreateGCStatepointInvoke(
Sanjoy Dasba74e642015-05-13 20:11:31 +0000954 ID, NumPatchBytes, StatepointTarget, ToReplace->getNormalDest(),
Sanjoy Das93abd812015-05-06 23:53:19 +0000955 ToReplace->getUnwindDest(), makeArrayRef(CS.arg_begin(), CS.arg_end()),
Sanjoy Das80458102015-05-15 00:26:15 +0000956 None, None, "safepoint_token");
Philip Reames47cc6732015-02-04 00:37:33 +0000957
Sanjoy Das2c266142015-05-15 00:26:21 +0000958 Invoke->setCallingConv(ToReplace->getCallingConv());
959
Sanjoy Dasabf15602015-05-06 23:53:21 +0000960 // In case if we can handle this set of attributes - set up function
961 // attributes directly on statepoint and return attributes later for
962 // gc_result intrinsic.
Sanjoy Das93abd812015-05-06 23:53:19 +0000963 Invoke->setAttributes(OriginalAttrs.getFnAttributes());
Philip Reames47cc6732015-02-04 00:37:33 +0000964
Sanjoy Das93abd812015-05-06 23:53:19 +0000965 Token = Invoke;
Philip Reames47cc6732015-02-04 00:37:33 +0000966
967 // We'll insert the gc.result into the normal block
Igor Laevsky5e23e162015-05-08 11:59:09 +0000968 BasicBlock *NormalDest = ToReplace->getNormalDest();
969 // Can not insert gc.result in case of phi nodes preset.
970 // Should have removed this cases prior to runnning this function
971 assert(!isa<PHINode>(NormalDest->begin()));
972 Instruction *IP = &*(NormalDest->getFirstInsertionPt());
973 Builder.SetInsertPoint(IP);
Philip Reames47cc6732015-02-04 00:37:33 +0000974 } else {
975 llvm_unreachable("unexpect type of CallSite");
976 }
Sanjoy Das93abd812015-05-06 23:53:19 +0000977 assert(Token);
Philip Reames47cc6732015-02-04 00:37:33 +0000978
979 // Handle the return value of the original call - update all uses to use a
980 // gc_result hanging off the statepoint node we just inserted
981
982 // Only add the gc_result iff there is actually a used result
983 if (!CS.getType()->isVoidTy() && !CS.getInstruction()->use_empty()) {
Sanjoy Das93abd812015-05-06 23:53:19 +0000984 std::string TakenName =
985 CS.getInstruction()->hasName() ? CS.getInstruction()->getName() : "";
986 CallInst *GCResult = Builder.CreateGCResult(Token, CS.getType(), TakenName);
Sanjoy Dasabf15602015-05-06 23:53:21 +0000987 GCResult->setAttributes(OriginalAttrs.getRetAttributes());
Sanjoy Das93abd812015-05-06 23:53:19 +0000988 return GCResult;
Philip Reames47cc6732015-02-04 00:37:33 +0000989 } else {
990 // No return value for the call.
991 return nullptr;
992 }
993}