blob: 3d4c4850f60dbdee92a31de1d56c75055f2d85f4 [file] [log] [blame]
John McCalled1ae862011-01-28 11:13:47 +00001//===--- CGCleanup.cpp - Bookkeeping and code emission for cleanups -------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains code dealing with the IR generation for cleanups
11// and related information.
12//
13// A "cleanup" is a piece of code which needs to be executed whenever
14// control transfers out of a particular scope. This can be
15// conditionalized to occur only on exceptional control flow, only on
16// normal control flow, or both.
17//
18//===----------------------------------------------------------------------===//
19
John McCalled1ae862011-01-28 11:13:47 +000020#include "CGCleanup.h"
Reid Kleckner2da7fcd2013-06-09 16:56:53 +000021#include "CodeGenFunction.h"
John McCalled1ae862011-01-28 11:13:47 +000022
23using namespace clang;
24using namespace CodeGen;
25
26bool DominatingValue<RValue>::saved_type::needsSaving(RValue rv) {
27 if (rv.isScalar())
28 return DominatingLLVMValue::needsSaving(rv.getScalarVal());
29 if (rv.isAggregate())
30 return DominatingLLVMValue::needsSaving(rv.getAggregateAddr());
31 return true;
32}
33
34DominatingValue<RValue>::saved_type
35DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) {
36 if (rv.isScalar()) {
37 llvm::Value *V = rv.getScalarVal();
38
39 // These automatically dominate and don't need to be saved.
40 if (!DominatingLLVMValue::needsSaving(V))
41 return saved_type(V, ScalarLiteral);
42
43 // Everything else needs an alloca.
44 llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue");
45 CGF.Builder.CreateStore(V, addr);
46 return saved_type(addr, ScalarAddress);
47 }
48
49 if (rv.isComplex()) {
50 CodeGenFunction::ComplexPairTy V = rv.getComplexVal();
Chris Lattner2192fe52011-07-18 04:24:23 +000051 llvm::Type *ComplexTy =
Chris Lattner845511f2011-06-18 22:49:11 +000052 llvm::StructType::get(V.first->getType(), V.second->getType(),
Craig Topper8a13c412014-05-21 05:09:00 +000053 (void*) nullptr);
John McCalled1ae862011-01-28 11:13:47 +000054 llvm::Value *addr = CGF.CreateTempAlloca(ComplexTy, "saved-complex");
David Blaikie1ed728c2015-04-05 22:45:47 +000055 CGF.Builder.CreateStore(V.first,
56 CGF.Builder.CreateStructGEP(ComplexTy, addr, 0));
57 CGF.Builder.CreateStore(V.second,
58 CGF.Builder.CreateStructGEP(ComplexTy, addr, 1));
John McCalled1ae862011-01-28 11:13:47 +000059 return saved_type(addr, ComplexAddress);
60 }
61
62 assert(rv.isAggregate());
63 llvm::Value *V = rv.getAggregateAddr(); // TODO: volatile?
64 if (!DominatingLLVMValue::needsSaving(V))
65 return saved_type(V, AggregateLiteral);
66
67 llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue");
68 CGF.Builder.CreateStore(V, addr);
69 return saved_type(addr, AggregateAddress);
70}
71
72/// Given a saved r-value produced by SaveRValue, perform the code
73/// necessary to restore it to usability at the current insertion
74/// point.
75RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) {
76 switch (K) {
77 case ScalarLiteral:
78 return RValue::get(Value);
79 case ScalarAddress:
80 return RValue::get(CGF.Builder.CreateLoad(Value));
81 case AggregateLiteral:
82 return RValue::getAggregate(Value);
83 case AggregateAddress:
84 return RValue::getAggregate(CGF.Builder.CreateLoad(Value));
John McCall47fb9502013-03-07 21:37:08 +000085 case ComplexAddress: {
86 llvm::Value *real =
David Blaikie2e804282015-04-05 22:47:07 +000087 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(nullptr, Value, 0));
John McCall47fb9502013-03-07 21:37:08 +000088 llvm::Value *imag =
David Blaikie2e804282015-04-05 22:47:07 +000089 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(nullptr, Value, 1));
John McCall47fb9502013-03-07 21:37:08 +000090 return RValue::getComplex(real, imag);
91 }
John McCalled1ae862011-01-28 11:13:47 +000092 }
93
94 llvm_unreachable("bad saved r-value kind");
John McCalled1ae862011-01-28 11:13:47 +000095}
96
97/// Push an entry of the given size onto this protected-scope stack.
98char *EHScopeStack::allocate(size_t Size) {
James Y Knight53c76162015-07-17 18:21:37 +000099 Size = llvm::RoundUpToAlignment(Size, ScopeStackAlignment);
John McCalled1ae862011-01-28 11:13:47 +0000100 if (!StartOfBuffer) {
101 unsigned Capacity = 1024;
102 while (Capacity < Size) Capacity *= 2;
103 StartOfBuffer = new char[Capacity];
104 StartOfData = EndOfBuffer = StartOfBuffer + Capacity;
105 } else if (static_cast<size_t>(StartOfData - StartOfBuffer) < Size) {
106 unsigned CurrentCapacity = EndOfBuffer - StartOfBuffer;
107 unsigned UsedCapacity = CurrentCapacity - (StartOfData - StartOfBuffer);
108
109 unsigned NewCapacity = CurrentCapacity;
110 do {
111 NewCapacity *= 2;
112 } while (NewCapacity < UsedCapacity + Size);
113
114 char *NewStartOfBuffer = new char[NewCapacity];
115 char *NewEndOfBuffer = NewStartOfBuffer + NewCapacity;
116 char *NewStartOfData = NewEndOfBuffer - UsedCapacity;
117 memcpy(NewStartOfData, StartOfData, UsedCapacity);
118 delete [] StartOfBuffer;
119 StartOfBuffer = NewStartOfBuffer;
120 EndOfBuffer = NewEndOfBuffer;
121 StartOfData = NewStartOfData;
122 }
123
124 assert(StartOfBuffer + Size <= StartOfData);
125 StartOfData -= Size;
126 return StartOfData;
127}
128
James Y Knight53c76162015-07-17 18:21:37 +0000129void EHScopeStack::deallocate(size_t Size) {
130 StartOfData += llvm::RoundUpToAlignment(Size, ScopeStackAlignment);
131}
132
David Majnemerdc012fa2015-04-22 21:38:15 +0000133bool EHScopeStack::containsOnlyLifetimeMarkers(
134 EHScopeStack::stable_iterator Old) const {
135 for (EHScopeStack::iterator it = begin(); stabilize(it) != Old; it++) {
136 EHCleanupScope *cleanup = dyn_cast<EHCleanupScope>(&*it);
137 if (!cleanup || !cleanup->isLifetimeMarker())
138 return false;
139 }
140
141 return true;
142}
143
John McCalled1ae862011-01-28 11:13:47 +0000144EHScopeStack::stable_iterator
John McCall8e4c74b2011-08-11 02:22:43 +0000145EHScopeStack::getInnermostActiveNormalCleanup() const {
146 for (stable_iterator si = getInnermostNormalCleanup(), se = stable_end();
147 si != se; ) {
148 EHCleanupScope &cleanup = cast<EHCleanupScope>(*find(si));
149 if (cleanup.isActive()) return si;
150 si = cleanup.getEnclosingNormalCleanup();
151 }
152 return stable_end();
153}
154
155EHScopeStack::stable_iterator EHScopeStack::getInnermostActiveEHScope() const {
156 for (stable_iterator si = getInnermostEHScope(), se = stable_end();
157 si != se; ) {
158 // Skip over inactive cleanups.
159 EHCleanupScope *cleanup = dyn_cast<EHCleanupScope>(&*find(si));
160 if (cleanup && !cleanup->isActive()) {
161 si = cleanup->getEnclosingEHScope();
162 continue;
John McCalled1ae862011-01-28 11:13:47 +0000163 }
John McCall8e4c74b2011-08-11 02:22:43 +0000164
165 // All other scopes are always active.
166 return si;
167 }
168
John McCalled1ae862011-01-28 11:13:47 +0000169 return stable_end();
170}
171
172
173void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) {
John McCalled1ae862011-01-28 11:13:47 +0000174 char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size));
175 bool IsNormalCleanup = Kind & NormalCleanup;
176 bool IsEHCleanup = Kind & EHCleanup;
177 bool IsActive = !(Kind & InactiveCleanup);
178 EHCleanupScope *Scope =
179 new (Buffer) EHCleanupScope(IsNormalCleanup,
180 IsEHCleanup,
181 IsActive,
182 Size,
183 BranchFixups.size(),
184 InnermostNormalCleanup,
John McCall8e4c74b2011-08-11 02:22:43 +0000185 InnermostEHScope);
John McCalled1ae862011-01-28 11:13:47 +0000186 if (IsNormalCleanup)
187 InnermostNormalCleanup = stable_begin();
188 if (IsEHCleanup)
John McCall8e4c74b2011-08-11 02:22:43 +0000189 InnermostEHScope = stable_begin();
John McCalled1ae862011-01-28 11:13:47 +0000190
191 return Scope->getCleanupBuffer();
192}
193
194void EHScopeStack::popCleanup() {
195 assert(!empty() && "popping exception stack when not empty");
196
197 assert(isa<EHCleanupScope>(*begin()));
198 EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin());
199 InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup();
John McCall8e4c74b2011-08-11 02:22:43 +0000200 InnermostEHScope = Cleanup.getEnclosingEHScope();
James Y Knight53c76162015-07-17 18:21:37 +0000201 deallocate(Cleanup.getAllocatedSize());
John McCalled1ae862011-01-28 11:13:47 +0000202
John McCalled1ae862011-01-28 11:13:47 +0000203 // Destroy the cleanup.
Kostya Serebryanyb21aa762014-10-08 18:31:54 +0000204 Cleanup.Destroy();
John McCalled1ae862011-01-28 11:13:47 +0000205
206 // Check whether we can shrink the branch-fixups stack.
207 if (!BranchFixups.empty()) {
208 // If we no longer have any normal cleanups, all the fixups are
209 // complete.
210 if (!hasNormalCleanups())
211 BranchFixups.clear();
212
213 // Otherwise we can still trim out unnecessary nulls.
214 else
215 popNullFixups();
216 }
217}
218
John McCall8e4c74b2011-08-11 02:22:43 +0000219EHFilterScope *EHScopeStack::pushFilter(unsigned numFilters) {
220 assert(getInnermostEHScope() == stable_end());
221 char *buffer = allocate(EHFilterScope::getSizeForNumFilters(numFilters));
222 EHFilterScope *filter = new (buffer) EHFilterScope(numFilters);
223 InnermostEHScope = stable_begin();
224 return filter;
John McCalled1ae862011-01-28 11:13:47 +0000225}
226
227void EHScopeStack::popFilter() {
228 assert(!empty() && "popping exception stack when not empty");
229
John McCall8e4c74b2011-08-11 02:22:43 +0000230 EHFilterScope &filter = cast<EHFilterScope>(*begin());
James Y Knight53c76162015-07-17 18:21:37 +0000231 deallocate(EHFilterScope::getSizeForNumFilters(filter.getNumFilters()));
John McCalled1ae862011-01-28 11:13:47 +0000232
John McCall8e4c74b2011-08-11 02:22:43 +0000233 InnermostEHScope = filter.getEnclosingEHScope();
John McCalled1ae862011-01-28 11:13:47 +0000234}
235
John McCall8e4c74b2011-08-11 02:22:43 +0000236EHCatchScope *EHScopeStack::pushCatch(unsigned numHandlers) {
237 char *buffer = allocate(EHCatchScope::getSizeForNumHandlers(numHandlers));
238 EHCatchScope *scope =
239 new (buffer) EHCatchScope(numHandlers, InnermostEHScope);
240 InnermostEHScope = stable_begin();
241 return scope;
John McCalled1ae862011-01-28 11:13:47 +0000242}
243
244void EHScopeStack::pushTerminate() {
245 char *Buffer = allocate(EHTerminateScope::getSize());
John McCall8e4c74b2011-08-11 02:22:43 +0000246 new (Buffer) EHTerminateScope(InnermostEHScope);
247 InnermostEHScope = stable_begin();
John McCalled1ae862011-01-28 11:13:47 +0000248}
249
250/// Remove any 'null' fixups on the stack. However, we can't pop more
251/// fixups than the fixup depth on the innermost normal cleanup, or
252/// else fixups that we try to add to that cleanup will end up in the
253/// wrong place. We *could* try to shrink fixup depths, but that's
254/// actually a lot of work for little benefit.
255void EHScopeStack::popNullFixups() {
256 // We expect this to only be called when there's still an innermost
257 // normal cleanup; otherwise there really shouldn't be any fixups.
258 assert(hasNormalCleanups());
259
260 EHScopeStack::iterator it = find(InnermostNormalCleanup);
261 unsigned MinSize = cast<EHCleanupScope>(*it).getFixupDepth();
262 assert(BranchFixups.size() >= MinSize && "fixup stack out of order");
263
264 while (BranchFixups.size() > MinSize &&
Craig Topper8a13c412014-05-21 05:09:00 +0000265 BranchFixups.back().Destination == nullptr)
John McCalled1ae862011-01-28 11:13:47 +0000266 BranchFixups.pop_back();
267}
268
269void CodeGenFunction::initFullExprCleanup() {
270 // Create a variable to decide whether the cleanup needs to be run.
271 llvm::AllocaInst *active
272 = CreateTempAlloca(Builder.getInt1Ty(), "cleanup.cond");
273
274 // Initialize it to false at a site that's guaranteed to be run
275 // before each evaluation.
John McCallf4beacd2011-11-10 10:43:54 +0000276 setBeforeOutermostConditional(Builder.getFalse(), active);
John McCalled1ae862011-01-28 11:13:47 +0000277
278 // Initialize it to true at the current location.
279 Builder.CreateStore(Builder.getTrue(), active);
280
281 // Set that as the active flag in the cleanup.
282 EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin());
Craig Topper8a13c412014-05-21 05:09:00 +0000283 assert(!cleanup.getActiveFlag() && "cleanup already has active flag?");
John McCalled1ae862011-01-28 11:13:47 +0000284 cleanup.setActiveFlag(active);
285
286 if (cleanup.isNormalCleanup()) cleanup.setTestFlagInNormalCleanup();
287 if (cleanup.isEHCleanup()) cleanup.setTestFlagInEHCleanup();
288}
289
John McCall5fcf8da2011-07-12 00:15:30 +0000290void EHScopeStack::Cleanup::anchor() {}
John McCalled1ae862011-01-28 11:13:47 +0000291
292/// All the branch fixups on the EH stack have propagated out past the
293/// outermost normal cleanup; resolve them all by adding cases to the
294/// given switch instruction.
295static void ResolveAllBranchFixups(CodeGenFunction &CGF,
296 llvm::SwitchInst *Switch,
297 llvm::BasicBlock *CleanupEntry) {
298 llvm::SmallPtrSet<llvm::BasicBlock*, 4> CasesAdded;
299
300 for (unsigned I = 0, E = CGF.EHStack.getNumBranchFixups(); I != E; ++I) {
301 // Skip this fixup if its destination isn't set.
302 BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I);
Craig Topper8a13c412014-05-21 05:09:00 +0000303 if (Fixup.Destination == nullptr) continue;
John McCalled1ae862011-01-28 11:13:47 +0000304
305 // If there isn't an OptimisticBranchBlock, then InitialBranch is
306 // still pointing directly to its destination; forward it to the
307 // appropriate cleanup entry. This is required in the specific
308 // case of
309 // { std::string s; goto lbl; }
310 // lbl:
311 // i.e. where there's an unresolved fixup inside a single cleanup
312 // entry which we're currently popping.
Craig Topper8a13c412014-05-21 05:09:00 +0000313 if (Fixup.OptimisticBranchBlock == nullptr) {
John McCalled1ae862011-01-28 11:13:47 +0000314 new llvm::StoreInst(CGF.Builder.getInt32(Fixup.DestinationIndex),
315 CGF.getNormalCleanupDestSlot(),
316 Fixup.InitialBranch);
317 Fixup.InitialBranch->setSuccessor(0, CleanupEntry);
318 }
319
320 // Don't add this case to the switch statement twice.
David Blaikie82e95a32014-11-19 07:49:47 +0000321 if (!CasesAdded.insert(Fixup.Destination).second)
322 continue;
John McCalled1ae862011-01-28 11:13:47 +0000323
324 Switch->addCase(CGF.Builder.getInt32(Fixup.DestinationIndex),
325 Fixup.Destination);
326 }
327
328 CGF.EHStack.clearFixups();
329}
330
331/// Transitions the terminator of the given exit-block of a cleanup to
332/// be a cleanup switch.
333static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF,
334 llvm::BasicBlock *Block) {
335 // If it's a branch, turn it into a switch whose default
336 // destination is its original target.
337 llvm::TerminatorInst *Term = Block->getTerminator();
338 assert(Term && "can't transition block without terminator");
339
340 if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
341 assert(Br->isUnconditional());
342 llvm::LoadInst *Load =
343 new llvm::LoadInst(CGF.getNormalCleanupDestSlot(), "cleanup.dest", Term);
344 llvm::SwitchInst *Switch =
345 llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block);
346 Br->eraseFromParent();
347 return Switch;
348 } else {
349 return cast<llvm::SwitchInst>(Term);
350 }
351}
352
353void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) {
354 assert(Block && "resolving a null target block");
355 if (!EHStack.getNumBranchFixups()) return;
356
357 assert(EHStack.hasNormalCleanups() &&
358 "branch fixups exist with no normal cleanups on stack");
359
360 llvm::SmallPtrSet<llvm::BasicBlock*, 4> ModifiedOptimisticBlocks;
361 bool ResolvedAny = false;
362
363 for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) {
364 // Skip this fixup if its destination doesn't match.
365 BranchFixup &Fixup = EHStack.getBranchFixup(I);
366 if (Fixup.Destination != Block) continue;
367
Craig Topper8a13c412014-05-21 05:09:00 +0000368 Fixup.Destination = nullptr;
John McCalled1ae862011-01-28 11:13:47 +0000369 ResolvedAny = true;
370
371 // If it doesn't have an optimistic branch block, LatestBranch is
372 // already pointing to the right place.
373 llvm::BasicBlock *BranchBB = Fixup.OptimisticBranchBlock;
374 if (!BranchBB)
375 continue;
376
377 // Don't process the same optimistic branch block twice.
David Blaikie82e95a32014-11-19 07:49:47 +0000378 if (!ModifiedOptimisticBlocks.insert(BranchBB).second)
John McCalled1ae862011-01-28 11:13:47 +0000379 continue;
380
381 llvm::SwitchInst *Switch = TransitionToCleanupSwitch(*this, BranchBB);
382
383 // Add a case to the switch.
384 Switch->addCase(Builder.getInt32(Fixup.DestinationIndex), Block);
385 }
386
387 if (ResolvedAny)
388 EHStack.popNullFixups();
389}
390
391/// Pops cleanup blocks until the given savepoint is reached.
Adrian Prantldc237b52013-05-16 00:41:26 +0000392void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) {
John McCalled1ae862011-01-28 11:13:47 +0000393 assert(Old.isValid());
394
395 while (EHStack.stable_begin() != Old) {
396 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
397
398 // As long as Old strictly encloses the scope's enclosing normal
399 // cleanup, we're going to emit another normal cleanup which
400 // fallthrough can propagate through.
401 bool FallThroughIsBranchThrough =
402 Old.strictlyEncloses(Scope.getEnclosingNormalCleanup());
403
Adrian Prantldc237b52013-05-16 00:41:26 +0000404 PopCleanupBlock(FallThroughIsBranchThrough);
John McCalled1ae862011-01-28 11:13:47 +0000405 }
406}
407
Nick Lewycky5d1159e2014-10-10 04:05:00 +0000408/// Pops cleanup blocks until the given savepoint is reached, then add the
409/// cleanups from the given savepoint in the lifetime-extended cleanups stack.
Richard Smith736a9472013-06-12 20:42:33 +0000410void
Nick Lewycky5d1159e2014-10-10 04:05:00 +0000411CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old,
412 size_t OldLifetimeExtendedSize) {
413 PopCleanupBlocks(Old);
414
415 // Move our deferred cleanups onto the EH stack.
Richard Smith736a9472013-06-12 20:42:33 +0000416 for (size_t I = OldLifetimeExtendedSize,
417 E = LifetimeExtendedCleanupStack.size(); I != E; /**/) {
418 // Alignment should be guaranteed by the vptrs in the individual cleanups.
419 assert((I % llvm::alignOf<LifetimeExtendedCleanupHeader>() == 0) &&
420 "misaligned cleanup stack entry");
421
422 LifetimeExtendedCleanupHeader &Header =
423 reinterpret_cast<LifetimeExtendedCleanupHeader&>(
424 LifetimeExtendedCleanupStack[I]);
425 I += sizeof(Header);
426
427 EHStack.pushCopyOfCleanup(Header.getKind(),
428 &LifetimeExtendedCleanupStack[I],
429 Header.getSize());
430 I += Header.getSize();
431 }
432 LifetimeExtendedCleanupStack.resize(OldLifetimeExtendedSize);
433}
434
John McCalled1ae862011-01-28 11:13:47 +0000435static llvm::BasicBlock *CreateNormalEntry(CodeGenFunction &CGF,
436 EHCleanupScope &Scope) {
437 assert(Scope.isNormalCleanup());
438 llvm::BasicBlock *Entry = Scope.getNormalBlock();
439 if (!Entry) {
440 Entry = CGF.createBasicBlock("cleanup");
441 Scope.setNormalBlock(Entry);
442 }
443 return Entry;
444}
445
John McCalled1ae862011-01-28 11:13:47 +0000446/// Attempts to reduce a cleanup's entry block to a fallthrough. This
447/// is basically llvm::MergeBlockIntoPredecessor, except
448/// simplified/optimized for the tighter constraints on cleanup blocks.
449///
450/// Returns the new block, whatever it is.
451static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF,
452 llvm::BasicBlock *Entry) {
453 llvm::BasicBlock *Pred = Entry->getSinglePredecessor();
454 if (!Pred) return Entry;
455
456 llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator());
457 if (!Br || Br->isConditional()) return Entry;
458 assert(Br->getSuccessor(0) == Entry);
459
460 // If we were previously inserting at the end of the cleanup entry
461 // block, we'll need to continue inserting at the end of the
462 // predecessor.
463 bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry;
464 assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end());
465
466 // Kill the branch.
467 Br->eraseFromParent();
468
John McCalled1ae862011-01-28 11:13:47 +0000469 // Replace all uses of the entry with the predecessor, in case there
470 // are phis in the cleanup.
471 Entry->replaceAllUsesWith(Pred);
472
Jay Foade03c05c2011-06-20 14:38:01 +0000473 // Merge the blocks.
474 Pred->getInstList().splice(Pred->end(), Entry->getInstList());
475
John McCalled1ae862011-01-28 11:13:47 +0000476 // Kill the entry block.
477 Entry->eraseFromParent();
478
479 if (WasInsertBlock)
480 CGF.Builder.SetInsertPoint(Pred);
481
482 return Pred;
483}
484
485static void EmitCleanup(CodeGenFunction &CGF,
486 EHScopeStack::Cleanup *Fn,
John McCall30317fd2011-07-12 20:27:29 +0000487 EHScopeStack::Cleanup::Flags flags,
John McCalled1ae862011-01-28 11:13:47 +0000488 llvm::Value *ActiveFlag) {
Reid Klecknere5b06422015-04-08 22:48:50 +0000489 // Itanium EH cleanups occur within a terminate scope. Microsoft SEH doesn't
490 // have this behavior, and the Microsoft C++ runtime will call terminate for
491 // us if the cleanup throws.
492 bool PushedTerminate = false;
493 if (flags.isForEHCleanup() && !CGF.getTarget().getCXXABI().isMicrosoft()) {
494 CGF.EHStack.pushTerminate();
495 PushedTerminate = true;
496 }
John McCalled1ae862011-01-28 11:13:47 +0000497
498 // If there's an active flag, load it and skip the cleanup if it's
499 // false.
Craig Topper8a13c412014-05-21 05:09:00 +0000500 llvm::BasicBlock *ContBB = nullptr;
John McCalled1ae862011-01-28 11:13:47 +0000501 if (ActiveFlag) {
502 ContBB = CGF.createBasicBlock("cleanup.done");
503 llvm::BasicBlock *CleanupBB = CGF.createBasicBlock("cleanup.action");
504 llvm::Value *IsActive
505 = CGF.Builder.CreateLoad(ActiveFlag, "cleanup.is_active");
506 CGF.Builder.CreateCondBr(IsActive, CleanupBB, ContBB);
507 CGF.EmitBlock(CleanupBB);
508 }
509
510 // Ask the cleanup to emit itself.
John McCall30317fd2011-07-12 20:27:29 +0000511 Fn->Emit(CGF, flags);
John McCalled1ae862011-01-28 11:13:47 +0000512 assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?");
513
514 // Emit the continuation block if there was an active flag.
515 if (ActiveFlag)
516 CGF.EmitBlock(ContBB);
517
518 // Leave the terminate scope.
Reid Klecknere5b06422015-04-08 22:48:50 +0000519 if (PushedTerminate)
520 CGF.EHStack.popTerminate();
John McCalled1ae862011-01-28 11:13:47 +0000521}
522
523static void ForwardPrebranchedFallthrough(llvm::BasicBlock *Exit,
524 llvm::BasicBlock *From,
525 llvm::BasicBlock *To) {
526 // Exit is the exit block of a cleanup, so it always terminates in
527 // an unconditional branch or a switch.
528 llvm::TerminatorInst *Term = Exit->getTerminator();
529
530 if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
531 assert(Br->isUnconditional() && Br->getSuccessor(0) == From);
532 Br->setSuccessor(0, To);
533 } else {
534 llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Term);
535 for (unsigned I = 0, E = Switch->getNumSuccessors(); I != E; ++I)
536 if (Switch->getSuccessor(I) == From)
537 Switch->setSuccessor(I, To);
538 }
539}
540
John McCallf82bdf62011-08-06 06:53:52 +0000541/// We don't need a normal entry block for the given cleanup.
542/// Optimistic fixup branches can cause these blocks to come into
543/// existence anyway; if so, destroy it.
544///
545/// The validity of this transformation is very much specific to the
546/// exact ways in which we form branches to cleanup entries.
547static void destroyOptimisticNormalEntry(CodeGenFunction &CGF,
548 EHCleanupScope &scope) {
549 llvm::BasicBlock *entry = scope.getNormalBlock();
550 if (!entry) return;
551
552 // Replace all the uses with unreachable.
553 llvm::BasicBlock *unreachableBB = CGF.getUnreachableBlock();
554 for (llvm::BasicBlock::use_iterator
555 i = entry->use_begin(), e = entry->use_end(); i != e; ) {
Chandler Carruth4d01fff2014-03-09 03:16:50 +0000556 llvm::Use &use = *i;
John McCallf82bdf62011-08-06 06:53:52 +0000557 ++i;
558
559 use.set(unreachableBB);
560
561 // The only uses should be fixup switches.
562 llvm::SwitchInst *si = cast<llvm::SwitchInst>(use.getUser());
Stepan Dyatkovskiy5fecf5442012-02-01 07:50:21 +0000563 if (si->getNumCases() == 1 && si->getDefaultDest() == unreachableBB) {
John McCallf82bdf62011-08-06 06:53:52 +0000564 // Replace the switch with a branch.
Stepan Dyatkovskiyfe3b0692012-03-11 06:09:37 +0000565 llvm::BranchInst::Create(si->case_begin().getCaseSuccessor(), si);
John McCallf82bdf62011-08-06 06:53:52 +0000566
567 // The switch operand is a load from the cleanup-dest alloca.
568 llvm::LoadInst *condition = cast<llvm::LoadInst>(si->getCondition());
569
570 // Destroy the switch.
571 si->eraseFromParent();
572
573 // Destroy the load.
574 assert(condition->getOperand(0) == CGF.NormalCleanupDest);
575 assert(condition->use_empty());
576 condition->eraseFromParent();
577 }
578 }
579
580 assert(entry->use_empty());
581 delete entry;
582}
583
John McCalled1ae862011-01-28 11:13:47 +0000584/// Pops a cleanup block. If the block includes a normal cleanup, the
585/// current insertion point is threaded through the cleanup, as are
586/// any branch fixups on the cleanup.
Adrian Prantldc237b52013-05-16 00:41:26 +0000587void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
John McCalled1ae862011-01-28 11:13:47 +0000588 assert(!EHStack.empty() && "cleanup stack is empty!");
589 assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!");
590 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
591 assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups());
592
593 // Remember activation information.
594 bool IsActive = Scope.isActive();
595 llvm::Value *NormalActiveFlag =
Craig Topper8a13c412014-05-21 05:09:00 +0000596 Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag() : nullptr;
John McCalled1ae862011-01-28 11:13:47 +0000597 llvm::Value *EHActiveFlag =
Craig Topper8a13c412014-05-21 05:09:00 +0000598 Scope.shouldTestFlagInEHCleanup() ? Scope.getActiveFlag() : nullptr;
John McCalled1ae862011-01-28 11:13:47 +0000599
600 // Check whether we need an EH cleanup. This is only true if we've
601 // generated a lazy EH cleanup block.
John McCall8e4c74b2011-08-11 02:22:43 +0000602 llvm::BasicBlock *EHEntry = Scope.getCachedEHDispatchBlock();
Craig Topper8a13c412014-05-21 05:09:00 +0000603 assert(Scope.hasEHBranches() == (EHEntry != nullptr));
604 bool RequiresEHCleanup = (EHEntry != nullptr);
John McCall8e4c74b2011-08-11 02:22:43 +0000605 EHScopeStack::stable_iterator EHParent = Scope.getEnclosingEHScope();
John McCalled1ae862011-01-28 11:13:47 +0000606
607 // Check the three conditions which might require a normal cleanup:
608
609 // - whether there are branch fix-ups through this cleanup
610 unsigned FixupDepth = Scope.getFixupDepth();
611 bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth;
612
613 // - whether there are branch-throughs or branch-afters
614 bool HasExistingBranches = Scope.hasBranches();
615
616 // - whether there's a fallthrough
617 llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock();
Craig Topper8a13c412014-05-21 05:09:00 +0000618 bool HasFallthrough = (FallthroughSource != nullptr && IsActive);
John McCalled1ae862011-01-28 11:13:47 +0000619
620 // Branch-through fall-throughs leave the insertion point set to the
621 // end of the last cleanup, which points to the current scope. The
622 // rest of IR gen doesn't need to worry about this; it only happens
623 // during the execution of PopCleanupBlocks().
624 bool HasPrebranchedFallthrough =
625 (FallthroughSource && FallthroughSource->getTerminator());
626
627 // If this is a normal cleanup, then having a prebranched
628 // fallthrough implies that the fallthrough source unconditionally
629 // jumps here.
630 assert(!Scope.isNormalCleanup() || !HasPrebranchedFallthrough ||
631 (Scope.getNormalBlock() &&
632 FallthroughSource->getTerminator()->getSuccessor(0)
633 == Scope.getNormalBlock()));
634
635 bool RequiresNormalCleanup = false;
636 if (Scope.isNormalCleanup() &&
637 (HasFixups || HasExistingBranches || HasFallthrough)) {
638 RequiresNormalCleanup = true;
639 }
640
John McCall45e42952011-08-07 07:05:57 +0000641 // If we have a prebranched fallthrough into an inactive normal
642 // cleanup, rewrite it so that it leads to the appropriate place.
643 if (Scope.isNormalCleanup() && HasPrebranchedFallthrough && !IsActive) {
644 llvm::BasicBlock *prebranchDest;
645
646 // If the prebranch is semantically branching through the next
647 // cleanup, just forward it to the next block, leaving the
648 // insertion point in the prebranched block.
John McCalled1ae862011-01-28 11:13:47 +0000649 if (FallthroughIsBranchThrough) {
John McCall45e42952011-08-07 07:05:57 +0000650 EHScope &enclosing = *EHStack.find(Scope.getEnclosingNormalCleanup());
651 prebranchDest = CreateNormalEntry(*this, cast<EHCleanupScope>(enclosing));
John McCalled1ae862011-01-28 11:13:47 +0000652
John McCall45e42952011-08-07 07:05:57 +0000653 // Otherwise, we need to make a new block. If the normal cleanup
654 // isn't being used at all, we could actually reuse the normal
655 // entry block, but this is simpler, and it avoids conflicts with
656 // dead optimistic fixup branches.
John McCalled1ae862011-01-28 11:13:47 +0000657 } else {
John McCall45e42952011-08-07 07:05:57 +0000658 prebranchDest = createBasicBlock("forwarded-prebranch");
659 EmitBlock(prebranchDest);
John McCalled1ae862011-01-28 11:13:47 +0000660 }
John McCall45e42952011-08-07 07:05:57 +0000661
662 llvm::BasicBlock *normalEntry = Scope.getNormalBlock();
663 assert(normalEntry && !normalEntry->use_empty());
664
665 ForwardPrebranchedFallthrough(FallthroughSource,
666 normalEntry, prebranchDest);
John McCalled1ae862011-01-28 11:13:47 +0000667 }
668
669 // If we don't need the cleanup at all, we're done.
670 if (!RequiresNormalCleanup && !RequiresEHCleanup) {
John McCallf82bdf62011-08-06 06:53:52 +0000671 destroyOptimisticNormalEntry(*this, Scope);
John McCalled1ae862011-01-28 11:13:47 +0000672 EHStack.popCleanup(); // safe because there are no fixups
673 assert(EHStack.getNumBranchFixups() == 0 ||
674 EHStack.hasNormalCleanups());
675 return;
676 }
677
678 // Copy the cleanup emission data out. Note that SmallVector
679 // guarantees maximal alignment for its buffer regardless of its
680 // type parameter.
Chris Lattner0e62c1c2011-07-23 10:55:15 +0000681 SmallVector<char, 8*sizeof(void*)> CleanupBuffer;
John McCalled1ae862011-01-28 11:13:47 +0000682 CleanupBuffer.reserve(Scope.getCleanupSize());
683 memcpy(CleanupBuffer.data(),
684 Scope.getCleanupBuffer(), Scope.getCleanupSize());
685 CleanupBuffer.set_size(Scope.getCleanupSize());
686 EHScopeStack::Cleanup *Fn =
687 reinterpret_cast<EHScopeStack::Cleanup*>(CleanupBuffer.data());
688
John McCall8e4c74b2011-08-11 02:22:43 +0000689 EHScopeStack::Cleanup::Flags cleanupFlags;
690 if (Scope.isNormalCleanup())
691 cleanupFlags.setIsNormalCleanupKind();
692 if (Scope.isEHCleanup())
693 cleanupFlags.setIsEHCleanupKind();
John McCalled1ae862011-01-28 11:13:47 +0000694
695 if (!RequiresNormalCleanup) {
John McCallf82bdf62011-08-06 06:53:52 +0000696 destroyOptimisticNormalEntry(*this, Scope);
John McCalled1ae862011-01-28 11:13:47 +0000697 EHStack.popCleanup();
698 } else {
699 // If we have a fallthrough and no other need for the cleanup,
700 // emit it directly.
701 if (HasFallthrough && !HasPrebranchedFallthrough &&
702 !HasFixups && !HasExistingBranches) {
703
John McCallf82bdf62011-08-06 06:53:52 +0000704 destroyOptimisticNormalEntry(*this, Scope);
John McCalled1ae862011-01-28 11:13:47 +0000705 EHStack.popCleanup();
706
John McCall30317fd2011-07-12 20:27:29 +0000707 EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag);
John McCalled1ae862011-01-28 11:13:47 +0000708
709 // Otherwise, the best approach is to thread everything through
710 // the cleanup block and then try to clean up after ourselves.
711 } else {
712 // Force the entry block to exist.
713 llvm::BasicBlock *NormalEntry = CreateNormalEntry(*this, Scope);
714
715 // I. Set up the fallthrough edge in.
716
John McCalla3654e32011-08-10 04:11:11 +0000717 CGBuilderTy::InsertPoint savedInactiveFallthroughIP;
John McCall45e42952011-08-07 07:05:57 +0000718
John McCalled1ae862011-01-28 11:13:47 +0000719 // If there's a fallthrough, we need to store the cleanup
720 // destination index. For fall-throughs this is always zero.
721 if (HasFallthrough) {
722 if (!HasPrebranchedFallthrough)
723 Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot());
724
John McCall45e42952011-08-07 07:05:57 +0000725 // Otherwise, save and clear the IP if we don't have fallthrough
726 // because the cleanup is inactive.
John McCalled1ae862011-01-28 11:13:47 +0000727 } else if (FallthroughSource) {
728 assert(!IsActive && "source without fallthrough for active cleanup");
John McCall45e42952011-08-07 07:05:57 +0000729 savedInactiveFallthroughIP = Builder.saveAndClearIP();
John McCalled1ae862011-01-28 11:13:47 +0000730 }
731
732 // II. Emit the entry block. This implicitly branches to it if
733 // we have fallthrough. All the fixups and existing branches
734 // should already be branched to it.
735 EmitBlock(NormalEntry);
736
737 // III. Figure out where we're going and build the cleanup
738 // epilogue.
739
740 bool HasEnclosingCleanups =
741 (Scope.getEnclosingNormalCleanup() != EHStack.stable_end());
742
743 // Compute the branch-through dest if we need it:
744 // - if there are branch-throughs threaded through the scope
745 // - if fall-through is a branch-through
746 // - if there are fixups that will be optimistically forwarded
747 // to the enclosing cleanup
Craig Topper8a13c412014-05-21 05:09:00 +0000748 llvm::BasicBlock *BranchThroughDest = nullptr;
John McCalled1ae862011-01-28 11:13:47 +0000749 if (Scope.hasBranchThroughs() ||
750 (FallthroughSource && FallthroughIsBranchThrough) ||
751 (HasFixups && HasEnclosingCleanups)) {
752 assert(HasEnclosingCleanups);
753 EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup());
754 BranchThroughDest = CreateNormalEntry(*this, cast<EHCleanupScope>(S));
755 }
756
Craig Topper8a13c412014-05-21 05:09:00 +0000757 llvm::BasicBlock *FallthroughDest = nullptr;
Benjamin Kramerc7497452015-02-17 16:53:08 +0000758 SmallVector<llvm::Instruction*, 2> InstsToAppend;
John McCalled1ae862011-01-28 11:13:47 +0000759
760 // If there's exactly one branch-after and no other threads,
761 // we can route it without a switch.
762 if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough &&
763 Scope.getNumBranchAfters() == 1) {
764 assert(!BranchThroughDest || !IsActive);
765
David Majnemerdc012fa2015-04-22 21:38:15 +0000766 // Clean up the possibly dead store to the cleanup dest slot.
767 llvm::Instruction *NormalCleanupDestSlot =
768 cast<llvm::Instruction>(getNormalCleanupDestSlot());
769 if (NormalCleanupDestSlot->hasOneUse()) {
770 NormalCleanupDestSlot->user_back()->eraseFromParent();
771 NormalCleanupDestSlot->eraseFromParent();
772 NormalCleanupDest = nullptr;
773 }
774
John McCalled1ae862011-01-28 11:13:47 +0000775 llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0);
776 InstsToAppend.push_back(llvm::BranchInst::Create(BranchAfter));
777
778 // Build a switch-out if we need it:
779 // - if there are branch-afters threaded through the scope
780 // - if fall-through is a branch-after
781 // - if there are fixups that have nowhere left to go and
782 // so must be immediately resolved
783 } else if (Scope.getNumBranchAfters() ||
784 (HasFallthrough && !FallthroughIsBranchThrough) ||
785 (HasFixups && !HasEnclosingCleanups)) {
786
787 llvm::BasicBlock *Default =
788 (BranchThroughDest ? BranchThroughDest : getUnreachableBlock());
789
790 // TODO: base this on the number of branch-afters and fixups
791 const unsigned SwitchCapacity = 10;
792
793 llvm::LoadInst *Load =
794 new llvm::LoadInst(getNormalCleanupDestSlot(), "cleanup.dest");
795 llvm::SwitchInst *Switch =
796 llvm::SwitchInst::Create(Load, Default, SwitchCapacity);
797
798 InstsToAppend.push_back(Load);
799 InstsToAppend.push_back(Switch);
800
801 // Branch-after fallthrough.
802 if (FallthroughSource && !FallthroughIsBranchThrough) {
803 FallthroughDest = createBasicBlock("cleanup.cont");
804 if (HasFallthrough)
805 Switch->addCase(Builder.getInt32(0), FallthroughDest);
806 }
807
808 for (unsigned I = 0, E = Scope.getNumBranchAfters(); I != E; ++I) {
809 Switch->addCase(Scope.getBranchAfterIndex(I),
810 Scope.getBranchAfterBlock(I));
811 }
812
813 // If there aren't any enclosing cleanups, we can resolve all
814 // the fixups now.
815 if (HasFixups && !HasEnclosingCleanups)
816 ResolveAllBranchFixups(*this, Switch, NormalEntry);
817 } else {
818 // We should always have a branch-through destination in this case.
819 assert(BranchThroughDest);
820 InstsToAppend.push_back(llvm::BranchInst::Create(BranchThroughDest));
821 }
822
823 // IV. Pop the cleanup and emit it.
824 EHStack.popCleanup();
825 assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups);
826
John McCall30317fd2011-07-12 20:27:29 +0000827 EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag);
John McCalled1ae862011-01-28 11:13:47 +0000828
829 // Append the prepared cleanup prologue from above.
830 llvm::BasicBlock *NormalExit = Builder.GetInsertBlock();
Benjamin Kramerc7497452015-02-17 16:53:08 +0000831 for (unsigned I = 0, E = InstsToAppend.size(); I != E; ++I)
832 NormalExit->getInstList().push_back(InstsToAppend[I]);
John McCalled1ae862011-01-28 11:13:47 +0000833
834 // Optimistically hope that any fixups will continue falling through.
835 for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
836 I < E; ++I) {
John McCallad7c5c12011-02-08 08:22:06 +0000837 BranchFixup &Fixup = EHStack.getBranchFixup(I);
John McCalled1ae862011-01-28 11:13:47 +0000838 if (!Fixup.Destination) continue;
839 if (!Fixup.OptimisticBranchBlock) {
840 new llvm::StoreInst(Builder.getInt32(Fixup.DestinationIndex),
841 getNormalCleanupDestSlot(),
842 Fixup.InitialBranch);
843 Fixup.InitialBranch->setSuccessor(0, NormalEntry);
844 }
845 Fixup.OptimisticBranchBlock = NormalExit;
846 }
847
848 // V. Set up the fallthrough edge out.
849
John McCall45e42952011-08-07 07:05:57 +0000850 // Case 1: a fallthrough source exists but doesn't branch to the
851 // cleanup because the cleanup is inactive.
John McCalled1ae862011-01-28 11:13:47 +0000852 if (!HasFallthrough && FallthroughSource) {
John McCall45e42952011-08-07 07:05:57 +0000853 // Prebranched fallthrough was forwarded earlier.
854 // Non-prebranched fallthrough doesn't need to be forwarded.
855 // Either way, all we need to do is restore the IP we cleared before.
John McCalled1ae862011-01-28 11:13:47 +0000856 assert(!IsActive);
John McCall45e42952011-08-07 07:05:57 +0000857 Builder.restoreIP(savedInactiveFallthroughIP);
John McCalled1ae862011-01-28 11:13:47 +0000858
859 // Case 2: a fallthrough source exists and should branch to the
860 // cleanup, but we're not supposed to branch through to the next
861 // cleanup.
862 } else if (HasFallthrough && FallthroughDest) {
863 assert(!FallthroughIsBranchThrough);
864 EmitBlock(FallthroughDest);
865
866 // Case 3: a fallthrough source exists and should branch to the
867 // cleanup and then through to the next.
868 } else if (HasFallthrough) {
869 // Everything is already set up for this.
870
871 // Case 4: no fallthrough source exists.
872 } else {
873 Builder.ClearInsertionPoint();
874 }
875
876 // VI. Assorted cleaning.
877
878 // Check whether we can merge NormalEntry into a single predecessor.
879 // This might invalidate (non-IR) pointers to NormalEntry.
880 llvm::BasicBlock *NewNormalEntry =
881 SimplifyCleanupEntry(*this, NormalEntry);
882
883 // If it did invalidate those pointers, and NormalEntry was the same
884 // as NormalExit, go back and patch up the fixups.
885 if (NewNormalEntry != NormalEntry && NormalEntry == NormalExit)
886 for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
887 I < E; ++I)
John McCallad7c5c12011-02-08 08:22:06 +0000888 EHStack.getBranchFixup(I).OptimisticBranchBlock = NewNormalEntry;
John McCalled1ae862011-01-28 11:13:47 +0000889 }
890 }
891
892 assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0);
893
894 // Emit the EH cleanup if required.
895 if (RequiresEHCleanup) {
896 CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
897
898 EmitBlock(EHEntry);
John McCall30317fd2011-07-12 20:27:29 +0000899
Eli Friedmanabab7762012-08-02 00:10:24 +0000900 // We only actually emit the cleanup code if the cleanup is either
901 // active or was used before it was deactivated.
902 if (EHActiveFlag || IsActive) {
Adrian Prantl52bf3c42013-05-03 20:11:48 +0000903
Eli Friedmanabab7762012-08-02 00:10:24 +0000904 cleanupFlags.setIsForEHCleanup();
905 EmitCleanup(*this, Fn, cleanupFlags, EHActiveFlag);
906 }
John McCalled1ae862011-01-28 11:13:47 +0000907
John McCall8e4c74b2011-08-11 02:22:43 +0000908 Builder.CreateBr(getEHDispatchBlock(EHParent));
John McCalled1ae862011-01-28 11:13:47 +0000909
910 Builder.restoreIP(SavedIP);
911
912 SimplifyCleanupEntry(*this, EHEntry);
913 }
914}
915
Justin Bognere25ffdf2014-01-21 00:35:11 +0000916/// isObviouslyBranchWithoutCleanups - Return true if a branch to the
917/// specified destination obviously has no cleanups to run. 'false' is always
918/// a conservatively correct answer for this method.
919bool CodeGenFunction::isObviouslyBranchWithoutCleanups(JumpDest Dest) const {
920 assert(Dest.getScopeDepth().encloses(EHStack.stable_begin())
921 && "stale jump destination");
922
923 // Calculate the innermost active normal cleanup.
924 EHScopeStack::stable_iterator TopCleanup =
925 EHStack.getInnermostActiveNormalCleanup();
926
927 // If we're not in an active normal cleanup scope, or if the
928 // destination scope is within the innermost active normal cleanup
929 // scope, we don't need to worry about fixups.
930 if (TopCleanup == EHStack.stable_end() ||
931 TopCleanup.encloses(Dest.getScopeDepth())) // works for invalid
932 return true;
933
934 // Otherwise, we might need some cleanups.
935 return false;
936}
937
938
John McCalled1ae862011-01-28 11:13:47 +0000939/// Terminate the current block by emitting a branch which might leave
940/// the current cleanup-protected scope. The target scope may not yet
941/// be known, in which case this will require a fixup.
942///
943/// As a side-effect, this method clears the insertion point.
944void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
John McCall1b93f1b2011-02-25 04:19:13 +0000945 assert(Dest.getScopeDepth().encloses(EHStack.stable_begin())
John McCalled1ae862011-01-28 11:13:47 +0000946 && "stale jump destination");
947
948 if (!HaveInsertPoint())
949 return;
950
951 // Create the branch.
952 llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock());
953
954 // Calculate the innermost active normal cleanup.
955 EHScopeStack::stable_iterator
956 TopCleanup = EHStack.getInnermostActiveNormalCleanup();
957
958 // If we're not in an active normal cleanup scope, or if the
959 // destination scope is within the innermost active normal cleanup
960 // scope, we don't need to worry about fixups.
961 if (TopCleanup == EHStack.stable_end() ||
962 TopCleanup.encloses(Dest.getScopeDepth())) { // works for invalid
963 Builder.ClearInsertionPoint();
964 return;
965 }
966
967 // If we can't resolve the destination cleanup scope, just add this
968 // to the current cleanup scope as a branch fixup.
969 if (!Dest.getScopeDepth().isValid()) {
970 BranchFixup &Fixup = EHStack.addBranchFixup();
971 Fixup.Destination = Dest.getBlock();
972 Fixup.DestinationIndex = Dest.getDestIndex();
973 Fixup.InitialBranch = BI;
Craig Topper8a13c412014-05-21 05:09:00 +0000974 Fixup.OptimisticBranchBlock = nullptr;
John McCalled1ae862011-01-28 11:13:47 +0000975
976 Builder.ClearInsertionPoint();
977 return;
978 }
979
980 // Otherwise, thread through all the normal cleanups in scope.
981
982 // Store the index at the start.
983 llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex());
984 new llvm::StoreInst(Index, getNormalCleanupDestSlot(), BI);
985
986 // Adjust BI to point to the first cleanup block.
987 {
988 EHCleanupScope &Scope =
989 cast<EHCleanupScope>(*EHStack.find(TopCleanup));
990 BI->setSuccessor(0, CreateNormalEntry(*this, Scope));
991 }
992
993 // Add this destination to all the scopes involved.
994 EHScopeStack::stable_iterator I = TopCleanup;
995 EHScopeStack::stable_iterator E = Dest.getScopeDepth();
996 if (E.strictlyEncloses(I)) {
997 while (true) {
998 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I));
999 assert(Scope.isNormalCleanup());
1000 I = Scope.getEnclosingNormalCleanup();
1001
1002 // If this is the last cleanup we're propagating through, tell it
1003 // that there's a resolved jump moving through it.
1004 if (!E.strictlyEncloses(I)) {
1005 Scope.addBranchAfter(Index, Dest.getBlock());
1006 break;
1007 }
1008
1009 // Otherwise, tell the scope that there's a jump propoagating
1010 // through it. If this isn't new information, all the rest of
1011 // the work has been done before.
1012 if (!Scope.addBranchThrough(Dest.getBlock()))
1013 break;
1014 }
1015 }
1016
1017 Builder.ClearInsertionPoint();
1018}
1019
John McCalled1ae862011-01-28 11:13:47 +00001020static bool IsUsedAsNormalCleanup(EHScopeStack &EHStack,
1021 EHScopeStack::stable_iterator C) {
1022 // If we needed a normal block for any reason, that counts.
1023 if (cast<EHCleanupScope>(*EHStack.find(C)).getNormalBlock())
1024 return true;
1025
1026 // Check whether any enclosed cleanups were needed.
1027 for (EHScopeStack::stable_iterator
1028 I = EHStack.getInnermostNormalCleanup();
1029 I != C; ) {
1030 assert(C.strictlyEncloses(I));
1031 EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I));
1032 if (S.getNormalBlock()) return true;
1033 I = S.getEnclosingNormalCleanup();
1034 }
1035
1036 return false;
1037}
1038
1039static bool IsUsedAsEHCleanup(EHScopeStack &EHStack,
John McCall8e4c74b2011-08-11 02:22:43 +00001040 EHScopeStack::stable_iterator cleanup) {
John McCalled1ae862011-01-28 11:13:47 +00001041 // If we needed an EH block for any reason, that counts.
John McCall8e4c74b2011-08-11 02:22:43 +00001042 if (EHStack.find(cleanup)->hasEHBranches())
John McCalled1ae862011-01-28 11:13:47 +00001043 return true;
1044
1045 // Check whether any enclosed cleanups were needed.
1046 for (EHScopeStack::stable_iterator
John McCall8e4c74b2011-08-11 02:22:43 +00001047 i = EHStack.getInnermostEHScope(); i != cleanup; ) {
1048 assert(cleanup.strictlyEncloses(i));
1049
1050 EHScope &scope = *EHStack.find(i);
1051 if (scope.hasEHBranches())
1052 return true;
1053
1054 i = scope.getEnclosingEHScope();
John McCalled1ae862011-01-28 11:13:47 +00001055 }
1056
1057 return false;
1058}
1059
1060enum ForActivation_t {
1061 ForActivation,
1062 ForDeactivation
1063};
1064
1065/// The given cleanup block is changing activation state. Configure a
1066/// cleanup variable if necessary.
1067///
1068/// It would be good if we had some way of determining if there were
1069/// extra uses *after* the change-over point.
1070static void SetupCleanupBlockActivation(CodeGenFunction &CGF,
1071 EHScopeStack::stable_iterator C,
John McCallf4beacd2011-11-10 10:43:54 +00001072 ForActivation_t kind,
1073 llvm::Instruction *dominatingIP) {
John McCalled1ae862011-01-28 11:13:47 +00001074 EHCleanupScope &Scope = cast<EHCleanupScope>(*CGF.EHStack.find(C));
1075
John McCalle63abb52011-11-10 09:22:44 +00001076 // We always need the flag if we're activating the cleanup in a
1077 // conditional context, because we have to assume that the current
1078 // location doesn't necessarily dominate the cleanup's code.
1079 bool isActivatedInConditional =
John McCallf4beacd2011-11-10 10:43:54 +00001080 (kind == ForActivation && CGF.isInConditionalBranch());
John McCalle63abb52011-11-10 09:22:44 +00001081
1082 bool needFlag = false;
John McCalled1ae862011-01-28 11:13:47 +00001083
1084 // Calculate whether the cleanup was used:
1085
1086 // - as a normal cleanup
John McCalle63abb52011-11-10 09:22:44 +00001087 if (Scope.isNormalCleanup() &&
1088 (isActivatedInConditional || IsUsedAsNormalCleanup(CGF.EHStack, C))) {
John McCalled1ae862011-01-28 11:13:47 +00001089 Scope.setTestFlagInNormalCleanup();
John McCalle63abb52011-11-10 09:22:44 +00001090 needFlag = true;
John McCalled1ae862011-01-28 11:13:47 +00001091 }
1092
1093 // - as an EH cleanup
John McCalle63abb52011-11-10 09:22:44 +00001094 if (Scope.isEHCleanup() &&
1095 (isActivatedInConditional || IsUsedAsEHCleanup(CGF.EHStack, C))) {
John McCalled1ae862011-01-28 11:13:47 +00001096 Scope.setTestFlagInEHCleanup();
John McCalle63abb52011-11-10 09:22:44 +00001097 needFlag = true;
John McCalled1ae862011-01-28 11:13:47 +00001098 }
1099
1100 // If it hasn't yet been used as either, we're done.
John McCalle63abb52011-11-10 09:22:44 +00001101 if (!needFlag) return;
John McCalled1ae862011-01-28 11:13:47 +00001102
John McCallf4beacd2011-11-10 10:43:54 +00001103 llvm::AllocaInst *var = Scope.getActiveFlag();
1104 if (!var) {
1105 var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "cleanup.isactive");
1106 Scope.setActiveFlag(var);
1107
1108 assert(dominatingIP && "no existing variable and no dominating IP!");
John McCalled1ae862011-01-28 11:13:47 +00001109
1110 // Initialize to true or false depending on whether it was
1111 // active up to this point.
John McCallf4beacd2011-11-10 10:43:54 +00001112 llvm::Value *value = CGF.Builder.getInt1(kind == ForDeactivation);
1113
1114 // If we're in a conditional block, ignore the dominating IP and
1115 // use the outermost conditional branch.
1116 if (CGF.isInConditionalBranch()) {
1117 CGF.setBeforeOutermostConditional(value, var);
1118 } else {
1119 new llvm::StoreInst(value, var, dominatingIP);
1120 }
John McCalled1ae862011-01-28 11:13:47 +00001121 }
1122
John McCallf4beacd2011-11-10 10:43:54 +00001123 CGF.Builder.CreateStore(CGF.Builder.getInt1(kind == ForActivation), var);
John McCalled1ae862011-01-28 11:13:47 +00001124}
1125
1126/// Activate a cleanup that was created in an inactivated state.
John McCallf4beacd2011-11-10 10:43:54 +00001127void CodeGenFunction::ActivateCleanupBlock(EHScopeStack::stable_iterator C,
1128 llvm::Instruction *dominatingIP) {
John McCalled1ae862011-01-28 11:13:47 +00001129 assert(C != EHStack.stable_end() && "activating bottom of stack?");
1130 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
1131 assert(!Scope.isActive() && "double activation");
1132
John McCallf4beacd2011-11-10 10:43:54 +00001133 SetupCleanupBlockActivation(*this, C, ForActivation, dominatingIP);
John McCalled1ae862011-01-28 11:13:47 +00001134
1135 Scope.setActive(true);
1136}
1137
1138/// Deactive a cleanup that was created in an active state.
John McCallf4beacd2011-11-10 10:43:54 +00001139void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C,
1140 llvm::Instruction *dominatingIP) {
John McCalled1ae862011-01-28 11:13:47 +00001141 assert(C != EHStack.stable_end() && "deactivating bottom of stack?");
1142 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
1143 assert(Scope.isActive() && "double deactivation");
1144
1145 // If it's the top of the stack, just pop it.
1146 if (C == EHStack.stable_begin()) {
1147 // If it's a normal cleanup, we need to pretend that the
1148 // fallthrough is unreachable.
1149 CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
1150 PopCleanupBlock();
1151 Builder.restoreIP(SavedIP);
1152 return;
1153 }
1154
1155 // Otherwise, follow the general case.
John McCallf4beacd2011-11-10 10:43:54 +00001156 SetupCleanupBlockActivation(*this, C, ForDeactivation, dominatingIP);
John McCalled1ae862011-01-28 11:13:47 +00001157
1158 Scope.setActive(false);
1159}
1160
1161llvm::Value *CodeGenFunction::getNormalCleanupDestSlot() {
1162 if (!NormalCleanupDest)
1163 NormalCleanupDest =
1164 CreateTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot");
1165 return NormalCleanupDest;
1166}
Peter Collingbourne702b2842011-11-27 22:09:22 +00001167
1168/// Emits all the code to cause the given temporary to be cleaned up.
1169void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary,
1170 QualType TempType,
1171 llvm::Value *Ptr) {
Peter Collingbourne1425b452012-01-26 03:33:36 +00001172 pushDestroy(NormalAndEHCleanup, Ptr, TempType, destroyCXXObject,
Peter Collingbourne702b2842011-11-27 22:09:22 +00001173 /*useEHCleanup*/ true);
1174}