blob: d158e70b86acad4e48099b2f93a45a90b637c783 [file] [log] [blame]
Nick Lewycky50f02cb2011-12-02 22:16:29 +00001//===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
Dan Gohman450aa642010-04-21 01:22:34 +00002//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Dan Gohman450aa642010-04-21 01:22:34 +00006//
7//===----------------------------------------------------------------------===//
8//
Eric Christopherdb5028b2014-06-10 20:07:29 +00009// This file defines several CodeGen-specific LLVM IR analysis utilities.
Dan Gohman450aa642010-04-21 01:22:34 +000010//
11//===----------------------------------------------------------------------===//
12
Eric Christopher09fc2762014-06-10 20:39:35 +000013#include "llvm/CodeGen/Analysis.h"
Eric Christopherdda00092014-06-25 22:36:37 +000014#include "llvm/Analysis/ValueTracking.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000015#include "llvm/CodeGen/MachineFunction.h"
David Blaikie3f833ed2017-11-08 01:01:31 +000016#include "llvm/CodeGen/TargetInstrInfo.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000017#include "llvm/CodeGen/TargetLowering.h"
18#include "llvm/CodeGen/TargetSubtargetInfo.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000019#include "llvm/IR/DataLayout.h"
20#include "llvm/IR/DerivedTypes.h"
21#include "llvm/IR/Function.h"
22#include "llvm/IR/Instructions.h"
23#include "llvm/IR/IntrinsicInst.h"
24#include "llvm/IR/LLVMContext.h"
25#include "llvm/IR/Module.h"
Dan Gohman450aa642010-04-21 01:22:34 +000026#include "llvm/Support/ErrorHandling.h"
27#include "llvm/Support/MathExtras.h"
Rafael Espindolaf21434c2014-07-30 19:42:16 +000028#include "llvm/Transforms/Utils/GlobalStatus.h"
Eric Christopherd9134482014-08-04 21:25:23 +000029
Dan Gohman450aa642010-04-21 01:22:34 +000030using namespace llvm;
31
Mehdi Amini8923cc52015-01-14 05:33:01 +000032/// Compute the linearized index of a member in a nested aggregate/struct/array
33/// by recursing and accumulating CurIndex as long as there are indices in the
34/// index list.
Chris Lattner229907c2011-07-18 04:54:35 +000035unsigned llvm::ComputeLinearIndex(Type *Ty,
Dan Gohman450aa642010-04-21 01:22:34 +000036 const unsigned *Indices,
37 const unsigned *IndicesEnd,
38 unsigned CurIndex) {
39 // Base case: We're done.
40 if (Indices && Indices == IndicesEnd)
41 return CurIndex;
42
43 // Given a struct type, recursively traverse the elements.
Chris Lattner229907c2011-07-18 04:54:35 +000044 if (StructType *STy = dyn_cast<StructType>(Ty)) {
Dan Gohman450aa642010-04-21 01:22:34 +000045 for (StructType::element_iterator EB = STy->element_begin(),
46 EI = EB,
47 EE = STy->element_end();
48 EI != EE; ++EI) {
49 if (Indices && *Indices == unsigned(EI - EB))
Dan Gohmanaadc5592010-10-06 16:18:29 +000050 return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex);
Craig Topperc0196b12014-04-14 00:51:57 +000051 CurIndex = ComputeLinearIndex(*EI, nullptr, nullptr, CurIndex);
Dan Gohman450aa642010-04-21 01:22:34 +000052 }
Mehdi Amini7b068f62015-01-14 05:38:48 +000053 assert(!Indices && "Unexpected out of bound");
Dan Gohman450aa642010-04-21 01:22:34 +000054 return CurIndex;
55 }
56 // Given an array type, recursively traverse the elements.
Chris Lattner229907c2011-07-18 04:54:35 +000057 else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
58 Type *EltTy = ATy->getElementType();
Mehdi Amini8923cc52015-01-14 05:33:01 +000059 unsigned NumElts = ATy->getNumElements();
60 // Compute the Linear offset when jumping one element of the array
61 unsigned EltLinearOffset = ComputeLinearIndex(EltTy, nullptr, nullptr, 0);
Mehdi Amini7b068f62015-01-14 05:38:48 +000062 if (Indices) {
63 assert(*Indices < NumElts && "Unexpected out of bound");
Mehdi Amini8923cc52015-01-14 05:33:01 +000064 // If the indice is inside the array, compute the index to the requested
65 // elt and recurse inside the element with the end of the indices list
66 CurIndex += EltLinearOffset* *Indices;
67 return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
Dan Gohman450aa642010-04-21 01:22:34 +000068 }
Mehdi Amini8923cc52015-01-14 05:33:01 +000069 CurIndex += EltLinearOffset*NumElts;
Dan Gohman450aa642010-04-21 01:22:34 +000070 return CurIndex;
71 }
72 // We haven't found the type we're looking for, so keep searching.
73 return CurIndex + 1;
74}
75
76/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
77/// EVTs that represent all the individual underlying
78/// non-aggregate types that comprise it.
79///
80/// If Offsets is non-null, it points to a vector to be filled in
81/// with the in-memory offsets of each of the individual values.
82///
Mehdi Amini56228da2015-07-09 01:57:34 +000083void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
84 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
Tim Northoveree2474d2019-05-01 12:37:30 +000085 SmallVectorImpl<EVT> *MemVTs,
Dan Gohman450aa642010-04-21 01:22:34 +000086 SmallVectorImpl<uint64_t> *Offsets,
87 uint64_t StartingOffset) {
88 // Given a struct type, recursively traverse the elements.
Chris Lattner229907c2011-07-18 04:54:35 +000089 if (StructType *STy = dyn_cast<StructType>(Ty)) {
Mehdi Amini56228da2015-07-09 01:57:34 +000090 const StructLayout *SL = DL.getStructLayout(STy);
Dan Gohman450aa642010-04-21 01:22:34 +000091 for (StructType::element_iterator EB = STy->element_begin(),
92 EI = EB,
93 EE = STy->element_end();
94 EI != EE; ++EI)
Tim Northoveree2474d2019-05-01 12:37:30 +000095 ComputeValueVTs(TLI, DL, *EI, ValueVTs, MemVTs, Offsets,
Dan Gohman450aa642010-04-21 01:22:34 +000096 StartingOffset + SL->getElementOffset(EI - EB));
97 return;
98 }
99 // Given an array type, recursively traverse the elements.
Chris Lattner229907c2011-07-18 04:54:35 +0000100 if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
101 Type *EltTy = ATy->getElementType();
Mehdi Amini56228da2015-07-09 01:57:34 +0000102 uint64_t EltSize = DL.getTypeAllocSize(EltTy);
Dan Gohman450aa642010-04-21 01:22:34 +0000103 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
Tim Northoveree2474d2019-05-01 12:37:30 +0000104 ComputeValueVTs(TLI, DL, EltTy, ValueVTs, MemVTs, Offsets,
Dan Gohman450aa642010-04-21 01:22:34 +0000105 StartingOffset + i * EltSize);
106 return;
107 }
108 // Interpret void as zero return values.
109 if (Ty->isVoidTy())
110 return;
111 // Base case: we can get an EVT for this LLVM IR type.
Mehdi Amini44ede332015-07-09 02:09:04 +0000112 ValueVTs.push_back(TLI.getValueType(DL, Ty));
Tim Northoveree2474d2019-05-01 12:37:30 +0000113 if (MemVTs)
114 MemVTs->push_back(TLI.getMemValueType(DL, Ty));
Dan Gohman450aa642010-04-21 01:22:34 +0000115 if (Offsets)
116 Offsets->push_back(StartingOffset);
117}
118
Tim Northoveree2474d2019-05-01 12:37:30 +0000119void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
120 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
121 SmallVectorImpl<uint64_t> *Offsets,
122 uint64_t StartingOffset) {
123 return ComputeValueVTs(TLI, DL, Ty, ValueVTs, /*MemVTs=*/nullptr, Offsets,
124 StartingOffset);
125}
126
Matt Arsenault2064e452019-04-10 17:27:56 +0000127void llvm::computeValueLLTs(const DataLayout &DL, Type &Ty,
128 SmallVectorImpl<LLT> &ValueTys,
129 SmallVectorImpl<uint64_t> *Offsets,
130 uint64_t StartingOffset) {
131 // Given a struct type, recursively traverse the elements.
132 if (StructType *STy = dyn_cast<StructType>(&Ty)) {
133 const StructLayout *SL = DL.getStructLayout(STy);
134 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I)
135 computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets,
136 StartingOffset + SL->getElementOffset(I));
137 return;
138 }
139 // Given an array type, recursively traverse the elements.
140 if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
141 Type *EltTy = ATy->getElementType();
142 uint64_t EltSize = DL.getTypeAllocSize(EltTy);
143 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
144 computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
145 StartingOffset + i * EltSize);
146 return;
147 }
148 // Interpret void as zero return values.
149 if (Ty.isVoidTy())
150 return;
151 // Base case: we can get an LLT for this LLVM IR type.
152 ValueTys.push_back(getLLTForType(Ty, DL));
153 if (Offsets != nullptr)
154 Offsets->push_back(StartingOffset * 8);
155}
156
Dan Gohman450aa642010-04-21 01:22:34 +0000157/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
Reid Kleckner283bc2e2014-11-14 00:35:50 +0000158GlobalValue *llvm::ExtractTypeInfo(Value *V) {
Dan Gohman450aa642010-04-21 01:22:34 +0000159 V = V->stripPointerCasts();
Reid Kleckner283bc2e2014-11-14 00:35:50 +0000160 GlobalValue *GV = dyn_cast<GlobalValue>(V);
161 GlobalVariable *Var = dyn_cast<GlobalVariable>(V);
Dan Gohman450aa642010-04-21 01:22:34 +0000162
Reid Kleckner283bc2e2014-11-14 00:35:50 +0000163 if (Var && Var->getName() == "llvm.eh.catch.all.value") {
164 assert(Var->hasInitializer() &&
Dan Gohman450aa642010-04-21 01:22:34 +0000165 "The EH catch-all value must have an initializer");
Reid Kleckner283bc2e2014-11-14 00:35:50 +0000166 Value *Init = Var->getInitializer();
167 GV = dyn_cast<GlobalValue>(Init);
Dan Gohman450aa642010-04-21 01:22:34 +0000168 if (!GV) V = cast<ConstantPointerNull>(Init);
169 }
170
171 assert((GV || isa<ConstantPointerNull>(V)) &&
172 "TypeInfo must be a global variable or NULL");
173 return GV;
174}
175
176/// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
177/// processed uses a memory 'm' constraint.
178bool
John Thompsone8360b72010-10-29 17:29:13 +0000179llvm::hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos,
Dan Gohman450aa642010-04-21 01:22:34 +0000180 const TargetLowering &TLI) {
181 for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
182 InlineAsm::ConstraintInfo &CI = CInfos[i];
183 for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
184 TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
185 if (CType == TargetLowering::C_Memory)
186 return true;
187 }
188
189 // Indirect operand accesses access memory.
190 if (CI.isIndirect)
191 return true;
192 }
193
194 return false;
195}
196
197/// getFCmpCondCode - Return the ISD condition code corresponding to
198/// the given LLVM IR floating-point condition code. This includes
199/// consideration of global floating-point math flags.
200///
201ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
Dan Gohman450aa642010-04-21 01:22:34 +0000202 switch (Pred) {
Nick Lewycky50f02cb2011-12-02 22:16:29 +0000203 case FCmpInst::FCMP_FALSE: return ISD::SETFALSE;
204 case FCmpInst::FCMP_OEQ: return ISD::SETOEQ;
205 case FCmpInst::FCMP_OGT: return ISD::SETOGT;
206 case FCmpInst::FCMP_OGE: return ISD::SETOGE;
207 case FCmpInst::FCMP_OLT: return ISD::SETOLT;
208 case FCmpInst::FCMP_OLE: return ISD::SETOLE;
209 case FCmpInst::FCMP_ONE: return ISD::SETONE;
210 case FCmpInst::FCMP_ORD: return ISD::SETO;
211 case FCmpInst::FCMP_UNO: return ISD::SETUO;
212 case FCmpInst::FCMP_UEQ: return ISD::SETUEQ;
213 case FCmpInst::FCMP_UGT: return ISD::SETUGT;
214 case FCmpInst::FCMP_UGE: return ISD::SETUGE;
215 case FCmpInst::FCMP_ULT: return ISD::SETULT;
216 case FCmpInst::FCMP_ULE: return ISD::SETULE;
217 case FCmpInst::FCMP_UNE: return ISD::SETUNE;
218 case FCmpInst::FCMP_TRUE: return ISD::SETTRUE;
David Blaikie46a9f012012-01-20 21:51:11 +0000219 default: llvm_unreachable("Invalid FCmp predicate opcode!");
Dan Gohman450aa642010-04-21 01:22:34 +0000220 }
Nick Lewycky50f02cb2011-12-02 22:16:29 +0000221}
222
223ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) {
224 switch (CC) {
225 case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ;
226 case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE;
227 case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT;
228 case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE;
229 case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT;
230 case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE;
David Blaikie46a9f012012-01-20 21:51:11 +0000231 default: return CC;
Nick Lewycky50f02cb2011-12-02 22:16:29 +0000232 }
Dan Gohman450aa642010-04-21 01:22:34 +0000233}
234
235/// getICmpCondCode - Return the ISD condition code corresponding to
236/// the given LLVM IR integer condition code.
237///
238ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
239 switch (Pred) {
240 case ICmpInst::ICMP_EQ: return ISD::SETEQ;
241 case ICmpInst::ICMP_NE: return ISD::SETNE;
242 case ICmpInst::ICMP_SLE: return ISD::SETLE;
243 case ICmpInst::ICMP_ULE: return ISD::SETULE;
244 case ICmpInst::ICMP_SGE: return ISD::SETGE;
245 case ICmpInst::ICMP_UGE: return ISD::SETUGE;
246 case ICmpInst::ICMP_SLT: return ISD::SETLT;
247 case ICmpInst::ICMP_ULT: return ISD::SETULT;
248 case ICmpInst::ICMP_SGT: return ISD::SETGT;
249 case ICmpInst::ICMP_UGT: return ISD::SETUGT;
250 default:
251 llvm_unreachable("Invalid ICmp predicate opcode!");
Dan Gohman450aa642010-04-21 01:22:34 +0000252 }
253}
254
Stephen Linffc44542013-04-20 04:27:51 +0000255static bool isNoopBitcast(Type *T1, Type *T2,
Michael Gottesmanc0659fa2013-07-22 21:05:47 +0000256 const TargetLoweringBase& TLI) {
Stephen Linffc44542013-04-20 04:27:51 +0000257 return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) ||
258 (isa<VectorType>(T1) && isa<VectorType>(T2) &&
259 TLI.isTypeLegal(EVT::getEVT(T1)) && TLI.isTypeLegal(EVT::getEVT(T2)));
Chris Lattner4f3615d2012-06-01 05:01:15 +0000260}
261
Tim Northovera4415852013-08-06 09:12:35 +0000262/// Look through operations that will be free to find the earliest source of
263/// this value.
264///
265/// @param ValLoc If V has aggegate type, we will be interested in a particular
266/// scalar component. This records its address; the reverse of this list gives a
267/// sequence of indices appropriate for an extractvalue to locate the important
268/// value. This value is updated during the function and on exit will indicate
269/// similar information for the Value returned.
270///
271/// @param DataBits If this function looks through truncate instructions, this
272/// will record the smallest size attained.
273static const Value *getNoopInput(const Value *V,
274 SmallVectorImpl<unsigned> &ValLoc,
275 unsigned &DataBits,
Mehdi Amini44ede332015-07-09 02:09:04 +0000276 const TargetLoweringBase &TLI,
277 const DataLayout &DL) {
Stephen Linffc44542013-04-20 04:27:51 +0000278 while (true) {
Stephen Linffc44542013-04-20 04:27:51 +0000279 // Try to look through V1; if V1 is not an instruction, it can't be looked
280 // through.
Tim Northovera4415852013-08-06 09:12:35 +0000281 const Instruction *I = dyn_cast<Instruction>(V);
282 if (!I || I->getNumOperands() == 0) return V;
Craig Topperc0196b12014-04-14 00:51:57 +0000283 const Value *NoopInput = nullptr;
Tim Northovera4415852013-08-06 09:12:35 +0000284
285 Value *Op = I->getOperand(0);
286 if (isa<BitCastInst>(I)) {
287 // Look through truly no-op bitcasts.
288 if (isNoopBitcast(Op->getType(), I->getType(), TLI))
289 NoopInput = Op;
290 } else if (isa<GetElementPtrInst>(I)) {
291 // Look through getelementptr
292 if (cast<GetElementPtrInst>(I)->hasAllZeroIndices())
293 NoopInput = Op;
294 } else if (isa<IntToPtrInst>(I)) {
295 // Look through inttoptr.
296 // Make sure this isn't a truncating or extending cast. We could
297 // support this eventually, but don't bother for now.
298 if (!isa<VectorType>(I->getType()) &&
Mehdi Amini44ede332015-07-09 02:09:04 +0000299 DL.getPointerSizeInBits() ==
300 cast<IntegerType>(Op->getType())->getBitWidth())
Tim Northovera4415852013-08-06 09:12:35 +0000301 NoopInput = Op;
302 } else if (isa<PtrToIntInst>(I)) {
303 // Look through ptrtoint.
304 // Make sure this isn't a truncating or extending cast. We could
305 // support this eventually, but don't bother for now.
306 if (!isa<VectorType>(I->getType()) &&
Mehdi Amini44ede332015-07-09 02:09:04 +0000307 DL.getPointerSizeInBits() ==
308 cast<IntegerType>(I->getType())->getBitWidth())
Tim Northovera4415852013-08-06 09:12:35 +0000309 NoopInput = Op;
310 } else if (isa<TruncInst>(I) &&
311 TLI.allowTruncateForTailCall(Op->getType(), I->getType())) {
312 DataBits = std::min(DataBits, I->getType()->getPrimitiveSizeInBits());
313 NoopInput = Op;
Ahmed Bougacha8a413192017-01-03 21:42:43 +0000314 } else if (auto CS = ImmutableCallSite(I)) {
315 const Value *ReturnedOp = CS.getReturnedArgOperand();
Ahmed Bougacha6aff7442017-01-03 20:33:22 +0000316 if (ReturnedOp && isNoopBitcast(ReturnedOp->getType(), I->getType(), TLI))
317 NoopInput = ReturnedOp;
Tim Northovera4415852013-08-06 09:12:35 +0000318 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) {
319 // Value may come from either the aggregate or the scalar
320 ArrayRef<unsigned> InsertLoc = IVI->getIndices();
Tim Northovere4310fe2015-05-06 20:07:38 +0000321 if (ValLoc.size() >= InsertLoc.size() &&
322 std::equal(InsertLoc.begin(), InsertLoc.end(), ValLoc.rbegin())) {
Tim Northovera4415852013-08-06 09:12:35 +0000323 // The type being inserted is a nested sub-type of the aggregate; we
324 // have to remove those initial indices to get the location we're
325 // interested in for the operand.
326 ValLoc.resize(ValLoc.size() - InsertLoc.size());
327 NoopInput = IVI->getInsertedValueOperand();
328 } else {
329 // The struct we're inserting into has the value we're interested in, no
330 // change of address.
331 NoopInput = Op;
332 }
333 } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) {
334 // The part we're interested in will inevitably be some sub-section of the
335 // previous aggregate. Combine the two paths to obtain the true address of
336 // our element.
337 ArrayRef<unsigned> ExtractLoc = EVI->getIndices();
Benjamin Kramer4f6ac162015-02-28 10:11:12 +0000338 ValLoc.append(ExtractLoc.rbegin(), ExtractLoc.rend());
Tim Northovera4415852013-08-06 09:12:35 +0000339 NoopInput = Op;
Stephen Linffc44542013-04-20 04:27:51 +0000340 }
Tim Northovera4415852013-08-06 09:12:35 +0000341 // Terminate if we couldn't find anything to look through.
342 if (!NoopInput)
343 return V;
Stephen Linffc44542013-04-20 04:27:51 +0000344
Tim Northovera4415852013-08-06 09:12:35 +0000345 V = NoopInput;
Stephen Linffc44542013-04-20 04:27:51 +0000346 }
Stephen Linffc44542013-04-20 04:27:51 +0000347}
Chris Lattner4f3615d2012-06-01 05:01:15 +0000348
Tim Northovera4415852013-08-06 09:12:35 +0000349/// Return true if this scalar return value only has bits discarded on its path
350/// from the "tail call" to the "ret". This includes the obvious noop
351/// instructions handled by getNoopInput above as well as free truncations (or
352/// extensions prior to the call).
353static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
354 SmallVectorImpl<unsigned> &RetIndices,
355 SmallVectorImpl<unsigned> &CallIndices,
Tim Northover707d68f2013-08-12 09:45:46 +0000356 bool AllowDifferingSizes,
Mehdi Amini44ede332015-07-09 02:09:04 +0000357 const TargetLoweringBase &TLI,
358 const DataLayout &DL) {
Tim Northovera4415852013-08-06 09:12:35 +0000359
360 // Trace the sub-value needed by the return value as far back up the graph as
361 // possible, in the hope that it will intersect with the value produced by the
362 // call. In the simple case with no "returned" attribute, the hope is actually
363 // that we end up back at the tail call instruction itself.
364 unsigned BitsRequired = UINT_MAX;
Mehdi Amini44ede332015-07-09 02:09:04 +0000365 RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI, DL);
Tim Northovera4415852013-08-06 09:12:35 +0000366
367 // If this slot in the value returned is undef, it doesn't matter what the
368 // call puts there, it'll be fine.
369 if (isa<UndefValue>(RetVal))
370 return true;
371
372 // Now do a similar search up through the graph to find where the value
373 // actually returned by the "tail call" comes from. In the simple case without
374 // a "returned" attribute, the search will be blocked immediately and the loop
375 // a Noop.
376 unsigned BitsProvided = UINT_MAX;
Mehdi Amini44ede332015-07-09 02:09:04 +0000377 CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI, DL);
Tim Northovera4415852013-08-06 09:12:35 +0000378
379 // There's no hope if we can't actually trace them to (the same part of!) the
380 // same value.
381 if (CallVal != RetVal || CallIndices != RetIndices)
382 return false;
383
384 // However, intervening truncates may have made the call non-tail. Make sure
385 // all the bits that are needed by the "ret" have been provided by the "tail
386 // call". FIXME: with sufficiently cunning bit-tracking, we could look through
387 // extensions too.
Tim Northover707d68f2013-08-12 09:45:46 +0000388 if (BitsProvided < BitsRequired ||
389 (!AllowDifferingSizes && BitsProvided != BitsRequired))
Tim Northovera4415852013-08-06 09:12:35 +0000390 return false;
391
392 return true;
393}
394
395/// For an aggregate type, determine whether a given index is within bounds or
396/// not.
397static bool indexReallyValid(CompositeType *T, unsigned Idx) {
398 if (ArrayType *AT = dyn_cast<ArrayType>(T))
399 return Idx < AT->getNumElements();
400
401 return Idx < cast<StructType>(T)->getNumElements();
402}
403
404/// Move the given iterators to the next leaf type in depth first traversal.
405///
406/// Performs a depth-first traversal of the type as specified by its arguments,
407/// stopping at the next leaf node (which may be a legitimate scalar type or an
408/// empty struct or array).
409///
410/// @param SubTypes List of the partial components making up the type from
411/// outermost to innermost non-empty aggregate. The element currently
412/// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
413///
414/// @param Path Set of extractvalue indices leading from the outermost type
415/// (SubTypes[0]) to the leaf node currently represented.
416///
417/// @returns true if a new type was found, false otherwise. Calling this
418/// function again on a finished iterator will repeatedly return
419/// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
420/// aggregate or a non-aggregate
Benjamin Kramerdf034492013-08-09 14:44:41 +0000421static bool advanceToNextLeafType(SmallVectorImpl<CompositeType *> &SubTypes,
422 SmallVectorImpl<unsigned> &Path) {
Tim Northovera4415852013-08-06 09:12:35 +0000423 // First march back up the tree until we can successfully increment one of the
424 // coordinates in Path.
425 while (!Path.empty() && !indexReallyValid(SubTypes.back(), Path.back() + 1)) {
426 Path.pop_back();
427 SubTypes.pop_back();
428 }
429
430 // If we reached the top, then the iterator is done.
431 if (Path.empty())
432 return false;
433
434 // We know there's *some* valid leaf now, so march back down the tree picking
435 // out the left-most element at each node.
436 ++Path.back();
437 Type *DeeperType = SubTypes.back()->getTypeAtIndex(Path.back());
438 while (DeeperType->isAggregateType()) {
439 CompositeType *CT = cast<CompositeType>(DeeperType);
440 if (!indexReallyValid(CT, 0))
441 return true;
442
443 SubTypes.push_back(CT);
444 Path.push_back(0);
445
446 DeeperType = CT->getTypeAtIndex(0U);
447 }
448
449 return true;
450}
451
452/// Find the first non-empty, scalar-like type in Next and setup the iterator
453/// components.
454///
455/// Assuming Next is an aggregate of some kind, this function will traverse the
456/// tree from left to right (i.e. depth-first) looking for the first
457/// non-aggregate type which will play a role in function return.
458///
459/// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
460/// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
461/// i32 in that type.
462static bool firstRealType(Type *Next,
463 SmallVectorImpl<CompositeType *> &SubTypes,
464 SmallVectorImpl<unsigned> &Path) {
465 // First initialise the iterator components to the first "leaf" node
466 // (i.e. node with no valid sub-type at any index, so {} does count as a leaf
467 // despite nominally being an aggregate).
468 while (Next->isAggregateType() &&
469 indexReallyValid(cast<CompositeType>(Next), 0)) {
470 SubTypes.push_back(cast<CompositeType>(Next));
471 Path.push_back(0);
472 Next = cast<CompositeType>(Next)->getTypeAtIndex(0U);
473 }
474
475 // If there's no Path now, Next was originally scalar already (or empty
476 // leaf). We're done.
477 if (Path.empty())
478 return true;
479
480 // Otherwise, use normal iteration to keep looking through the tree until we
481 // find a non-aggregate type.
482 while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType()) {
483 if (!advanceToNextLeafType(SubTypes, Path))
484 return false;
485 }
486
487 return true;
488}
489
490/// Set the iterator data-structures to the next non-empty, non-aggregate
491/// subtype.
Benjamin Kramerdf034492013-08-09 14:44:41 +0000492static bool nextRealType(SmallVectorImpl<CompositeType *> &SubTypes,
493 SmallVectorImpl<unsigned> &Path) {
Tim Northovera4415852013-08-06 09:12:35 +0000494 do {
495 if (!advanceToNextLeafType(SubTypes, Path))
496 return false;
497
498 assert(!Path.empty() && "found a leaf but didn't set the path?");
499 } while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType());
500
501 return true;
502}
503
504
Dan Gohman450aa642010-04-21 01:22:34 +0000505/// Test if the given instruction is in a position to be optimized
506/// with a tail-call. This roughly means that it's in a block with
507/// a return and there's nothing that needs to be scheduled
508/// between it and the return.
509///
510/// This function only tests target-independent requirements.
Juergen Ributzka480872b2014-07-16 00:01:22 +0000511bool llvm::isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM) {
Dan Gohman450aa642010-04-21 01:22:34 +0000512 const Instruction *I = CS.getInstruction();
513 const BasicBlock *ExitBB = I->getParent();
Chandler Carruthedb12a82018-10-15 10:04:59 +0000514 const Instruction *Term = ExitBB->getTerminator();
Dan Gohman450aa642010-04-21 01:22:34 +0000515 const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
Dan Gohman450aa642010-04-21 01:22:34 +0000516
517 // The block must end in a return statement or unreachable.
518 //
519 // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
520 // an unreachable, for now. The way tailcall optimization is currently
521 // implemented means it will add an epilogue followed by a jump. That is
522 // not profitable. Also, if the callee is a special function (e.g.
523 // longjmp on x86), it can end up causing miscompilation that has not
524 // been fully understood.
525 if (!Ret &&
Juergen Ributzka4ce98632014-07-11 20:50:47 +0000526 (!TM.Options.GuaranteedTailCallOpt || !isa<UnreachableInst>(Term)))
Chris Lattner4f3615d2012-06-01 05:01:15 +0000527 return false;
Dan Gohman450aa642010-04-21 01:22:34 +0000528
529 // If I will have a chain, make sure no other instruction that will have a
530 // chain interposes between I and the return.
David Majnemer0a92f862015-08-28 21:13:39 +0000531 if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
532 !isSafeToSpeculativelyExecute(I))
533 for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) {
534 if (&*BBI == I)
535 break;
536 // Debug info intrinsics do not get in the way of tail call optimization.
537 if (isa<DbgInfoIntrinsic>(BBI))
538 continue;
Robert Lougher18bfb3a2018-10-24 17:03:19 +0000539 // A lifetime end intrinsic should not stop tail call optimization.
540 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(BBI))
541 if (II->getIntrinsicID() == Intrinsic::lifetime_end)
542 continue;
David Majnemer0a92f862015-08-28 21:13:39 +0000543 if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
Duncan P. N. Exon Smith980f8f22015-10-09 18:23:49 +0000544 !isSafeToSpeculativelyExecute(&*BBI))
David Majnemer0a92f862015-08-28 21:13:39 +0000545 return false;
546 }
Dan Gohman450aa642010-04-21 01:22:34 +0000547
Eric Christopherf734a8b2015-02-20 18:44:17 +0000548 const Function *F = ExitBB->getParent();
Eric Christopherd9134482014-08-04 21:25:23 +0000549 return returnTypeIsEligibleForTailCall(
Eric Christopherf734a8b2015-02-20 18:44:17 +0000550 F, I, Ret, *TM.getSubtargetImpl(*F)->getTargetLowering());
Michael Gottesmance0e4c22013-08-20 08:36:50 +0000551}
552
Michael Kupersteinf79af6f2016-09-08 00:48:37 +0000553bool llvm::attributesPermitTailCall(const Function *F, const Instruction *I,
554 const ReturnInst *Ret,
555 const TargetLoweringBase &TLI,
556 bool *AllowDifferingSizes) {
557 // ADS may be null, so don't write to it directly.
558 bool DummyADS;
559 bool &ADS = AllowDifferingSizes ? *AllowDifferingSizes : DummyADS;
560 ADS = true;
561
Reid Klecknerb5180542017-03-21 16:57:19 +0000562 AttrBuilder CallerAttrs(F->getAttributes(), AttributeList::ReturnIndex);
Michael Kupersteinf79af6f2016-09-08 00:48:37 +0000563 AttrBuilder CalleeAttrs(cast<CallInst>(I)->getAttributes(),
Reid Klecknerb5180542017-03-21 16:57:19 +0000564 AttributeList::ReturnIndex);
Michael Kupersteinf79af6f2016-09-08 00:48:37 +0000565
David Green353cb3d2018-09-26 10:46:18 +0000566 // NoAlias and NonNull are completely benign as far as calling convention
567 // goes, they shouldn't affect whether the call is a tail call.
Bjorn Pettersson807f7322016-10-27 14:48:09 +0000568 CallerAttrs.removeAttribute(Attribute::NoAlias);
569 CalleeAttrs.removeAttribute(Attribute::NoAlias);
David Green353cb3d2018-09-26 10:46:18 +0000570 CallerAttrs.removeAttribute(Attribute::NonNull);
571 CalleeAttrs.removeAttribute(Attribute::NonNull);
Michael Kupersteinf79af6f2016-09-08 00:48:37 +0000572
573 if (CallerAttrs.contains(Attribute::ZExt)) {
574 if (!CalleeAttrs.contains(Attribute::ZExt))
575 return false;
576
577 ADS = false;
578 CallerAttrs.removeAttribute(Attribute::ZExt);
579 CalleeAttrs.removeAttribute(Attribute::ZExt);
580 } else if (CallerAttrs.contains(Attribute::SExt)) {
581 if (!CalleeAttrs.contains(Attribute::SExt))
582 return false;
583
584 ADS = false;
585 CallerAttrs.removeAttribute(Attribute::SExt);
586 CalleeAttrs.removeAttribute(Attribute::SExt);
587 }
588
Francis Visoiu Mistrihac6454a2019-01-09 19:46:15 +0000589 // Drop sext and zext return attributes if the result is not used.
590 // This enables tail calls for code like:
591 //
592 // define void @caller() {
593 // entry:
594 // %unused_result = tail call zeroext i1 @callee()
595 // br label %retlabel
596 // retlabel:
597 // ret void
598 // }
599 if (I->use_empty()) {
600 CalleeAttrs.removeAttribute(Attribute::SExt);
601 CalleeAttrs.removeAttribute(Attribute::ZExt);
602 }
603
Michael Kupersteinf79af6f2016-09-08 00:48:37 +0000604 // If they're still different, there's some facet we don't understand
605 // (currently only "inreg", but in future who knows). It may be OK but the
606 // only safe option is to reject the tail call.
607 return CallerAttrs == CalleeAttrs;
608}
609
Michael Gottesmance0e4c22013-08-20 08:36:50 +0000610bool llvm::returnTypeIsEligibleForTailCall(const Function *F,
611 const Instruction *I,
612 const ReturnInst *Ret,
613 const TargetLoweringBase &TLI) {
Dan Gohman450aa642010-04-21 01:22:34 +0000614 // If the block ends with a void return or unreachable, it doesn't matter
615 // what the call's return type is.
616 if (!Ret || Ret->getNumOperands() == 0) return true;
617
618 // If the return value is undef, it doesn't matter what the call's
619 // return type is.
620 if (isa<UndefValue>(Ret->getOperand(0))) return true;
621
Tim Northover707d68f2013-08-12 09:45:46 +0000622 // Make sure the attributes attached to each return are compatible.
Michael Kupersteinf79af6f2016-09-08 00:48:37 +0000623 bool AllowDifferingSizes;
624 if (!attributesPermitTailCall(F, I, Ret, TLI, &AllowDifferingSizes))
Dan Gohman450aa642010-04-21 01:22:34 +0000625 return false;
626
Tim Northovera4415852013-08-06 09:12:35 +0000627 const Value *RetVal = Ret->getOperand(0), *CallVal = I;
Wei Mi5d84d9b2017-09-08 16:44:52 +0000628 // Intrinsic like llvm.memcpy has no return value, but the expanded
629 // libcall may or may not have return value. On most platforms, it
630 // will be expanded as memcpy in libc, which returns the first
631 // argument. On other platforms like arm-none-eabi, memcpy may be
632 // expanded as library call without return value, like __aeabi_memcpy.
Wei Mi818d50a2017-09-06 16:05:17 +0000633 const CallInst *Call = cast<CallInst>(I);
634 if (Function *F = Call->getCalledFunction()) {
635 Intrinsic::ID IID = F->getIntrinsicID();
Wei Mi5d84d9b2017-09-08 16:44:52 +0000636 if (((IID == Intrinsic::memcpy &&
637 TLI.getLibcallName(RTLIB::MEMCPY) == StringRef("memcpy")) ||
638 (IID == Intrinsic::memmove &&
639 TLI.getLibcallName(RTLIB::MEMMOVE) == StringRef("memmove")) ||
640 (IID == Intrinsic::memset &&
641 TLI.getLibcallName(RTLIB::MEMSET) == StringRef("memset"))) &&
Wei Mi818d50a2017-09-06 16:05:17 +0000642 RetVal == Call->getArgOperand(0))
643 return true;
644 }
645
Tim Northovera4415852013-08-06 09:12:35 +0000646 SmallVector<unsigned, 4> RetPath, CallPath;
647 SmallVector<CompositeType *, 4> RetSubTypes, CallSubTypes;
648
649 bool RetEmpty = !firstRealType(RetVal->getType(), RetSubTypes, RetPath);
650 bool CallEmpty = !firstRealType(CallVal->getType(), CallSubTypes, CallPath);
651
652 // Nothing's actually returned, it doesn't matter what the callee put there
653 // it's a valid tail call.
654 if (RetEmpty)
655 return true;
656
657 // Iterate pairwise through each of the value types making up the tail call
658 // and the corresponding return. For each one we want to know whether it's
659 // essentially going directly from the tail call to the ret, via operations
660 // that end up not generating any code.
661 //
662 // We allow a certain amount of covariance here. For example it's permitted
663 // for the tail call to define more bits than the ret actually cares about
664 // (e.g. via a truncate).
665 do {
666 if (CallEmpty) {
667 // We've exhausted the values produced by the tail call instruction, the
668 // rest are essentially undef. The type doesn't really matter, but we need
669 // *something*.
670 Type *SlotType = RetSubTypes.back()->getTypeAtIndex(RetPath.back());
671 CallVal = UndefValue::get(SlotType);
672 }
673
674 // The manipulations performed when we're looking through an insertvalue or
675 // an extractvalue would happen at the front of the RetPath list, so since
676 // we have to copy it anyway it's more efficient to create a reversed copy.
Benjamin Kramer4f6ac162015-02-28 10:11:12 +0000677 SmallVector<unsigned, 4> TmpRetPath(RetPath.rbegin(), RetPath.rend());
678 SmallVector<unsigned, 4> TmpCallPath(CallPath.rbegin(), CallPath.rend());
Tim Northovera4415852013-08-06 09:12:35 +0000679
680 // Finally, we can check whether the value produced by the tail call at this
681 // index is compatible with the value we return.
Tim Northover707d68f2013-08-12 09:45:46 +0000682 if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath,
Mehdi Amini44ede332015-07-09 02:09:04 +0000683 AllowDifferingSizes, TLI,
684 F->getParent()->getDataLayout()))
Tim Northovera4415852013-08-06 09:12:35 +0000685 return false;
686
687 CallEmpty = !nextRealType(CallSubTypes, CallPath);
688 } while(nextRealType(RetSubTypes, RetPath));
689
690 return true;
Dan Gohman450aa642010-04-21 01:22:34 +0000691}
Rafael Espindolaf21434c2014-07-30 19:42:16 +0000692
Heejin Ahnd69acf32018-06-01 00:03:21 +0000693static void collectEHScopeMembers(
694 DenseMap<const MachineBasicBlock *, int> &EHScopeMembership, int EHScope,
695 const MachineBasicBlock *MBB) {
David Majnemer734d7c32016-01-22 18:49:50 +0000696 SmallVector<const MachineBasicBlock *, 16> Worklist = {MBB};
697 while (!Worklist.empty()) {
698 const MachineBasicBlock *Visiting = Worklist.pop_back_val();
Heejin Ahn1e4d3502018-05-23 00:32:46 +0000699 // Don't follow blocks which start new scopes.
David Majnemer734d7c32016-01-22 18:49:50 +0000700 if (Visiting->isEHPad() && Visiting != MBB)
701 continue;
David Blaikie7b54b522015-10-26 18:41:13 +0000702
Heejin Ahn1e4d3502018-05-23 00:32:46 +0000703 // Add this MBB to our scope.
Heejin Ahnd69acf32018-06-01 00:03:21 +0000704 auto P = EHScopeMembership.insert(std::make_pair(Visiting, EHScope));
David Majnemer734d7c32016-01-22 18:49:50 +0000705
706 // Don't revisit blocks.
707 if (!P.second) {
Heejin Ahnd69acf32018-06-01 00:03:21 +0000708 assert(P.first->second == EHScope && "MBB is part of two scopes!");
David Majnemer734d7c32016-01-22 18:49:50 +0000709 continue;
710 }
711
Heejin Ahn1e4d3502018-05-23 00:32:46 +0000712 // Returns are boundaries where scope transfer can occur, don't follow
David Majnemer734d7c32016-01-22 18:49:50 +0000713 // successors.
Heejin Ahned5e06b2018-08-21 19:44:11 +0000714 if (Visiting->isEHScopeReturnBlock())
David Majnemer734d7c32016-01-22 18:49:50 +0000715 continue;
716
717 for (const MachineBasicBlock *Succ : Visiting->successors())
718 Worklist.push_back(Succ);
David Majnemer16193552015-10-04 02:22:52 +0000719 }
David Majnemer16193552015-10-04 02:22:52 +0000720}
721
722DenseMap<const MachineBasicBlock *, int>
Heejin Ahn1e4d3502018-05-23 00:32:46 +0000723llvm::getEHScopeMembership(const MachineFunction &MF) {
Heejin Ahnd69acf32018-06-01 00:03:21 +0000724 DenseMap<const MachineBasicBlock *, int> EHScopeMembership;
David Majnemer16193552015-10-04 02:22:52 +0000725
726 // We don't have anything to do if there aren't any EH pads.
Heejin Ahn1e4d3502018-05-23 00:32:46 +0000727 if (!MF.hasEHScopes())
Heejin Ahnd69acf32018-06-01 00:03:21 +0000728 return EHScopeMembership;
David Majnemer16193552015-10-04 02:22:52 +0000729
David Majnemere4f9b092015-10-05 20:09:16 +0000730 int EntryBBNumber = MF.front().getNumber();
David Majnemer16193552015-10-04 02:22:52 +0000731 bool IsSEH = isAsynchronousEHPersonality(
Matthias Braunf1caa282017-12-15 22:22:58 +0000732 classifyEHPersonality(MF.getFunction().getPersonalityFn()));
David Majnemer16193552015-10-04 02:22:52 +0000733
734 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
Heejin Ahnd69acf32018-06-01 00:03:21 +0000735 SmallVector<const MachineBasicBlock *, 16> EHScopeBlocks;
David Majnemere4f9b092015-10-05 20:09:16 +0000736 SmallVector<const MachineBasicBlock *, 16> UnreachableBlocks;
737 SmallVector<const MachineBasicBlock *, 16> SEHCatchPads;
David Majnemer16193552015-10-04 02:22:52 +0000738 SmallVector<std::pair<const MachineBasicBlock *, int>, 16> CatchRetSuccessors;
739 for (const MachineBasicBlock &MBB : MF) {
Heejin Ahn1e4d3502018-05-23 00:32:46 +0000740 if (MBB.isEHScopeEntry()) {
Heejin Ahnd69acf32018-06-01 00:03:21 +0000741 EHScopeBlocks.push_back(&MBB);
David Majnemere4f9b092015-10-05 20:09:16 +0000742 } else if (IsSEH && MBB.isEHPad()) {
743 SEHCatchPads.push_back(&MBB);
744 } else if (MBB.pred_empty()) {
745 UnreachableBlocks.push_back(&MBB);
746 }
David Majnemer16193552015-10-04 02:22:52 +0000747
748 MachineBasicBlock::const_iterator MBBI = MBB.getFirstTerminator();
Duncan P. N. Exon Smith2e7af972016-08-11 15:29:02 +0000749
Heejin Ahn1e4d3502018-05-23 00:32:46 +0000750 // CatchPads are not scopes for SEH so do not consider CatchRet to
751 // transfer control to another scope.
Reid Kleckner26f9e9e2016-08-11 16:00:43 +0000752 if (MBBI == MBB.end() || MBBI->getOpcode() != TII->getCatchReturnOpcode())
David Majnemer16193552015-10-04 02:22:52 +0000753 continue;
754
David Majnemere4f9b092015-10-05 20:09:16 +0000755 // FIXME: SEH CatchPads are not necessarily in the parent function:
756 // they could be inside a finally block.
David Majnemer16193552015-10-04 02:22:52 +0000757 const MachineBasicBlock *Successor = MBBI->getOperand(0).getMBB();
758 const MachineBasicBlock *SuccessorColor = MBBI->getOperand(1).getMBB();
David Majnemere4f9b092015-10-05 20:09:16 +0000759 CatchRetSuccessors.push_back(
760 {Successor, IsSEH ? EntryBBNumber : SuccessorColor->getNumber()});
David Majnemer16193552015-10-04 02:22:52 +0000761 }
762
763 // We don't have anything to do if there aren't any EH pads.
Heejin Ahnd69acf32018-06-01 00:03:21 +0000764 if (EHScopeBlocks.empty())
765 return EHScopeMembership;
David Majnemer16193552015-10-04 02:22:52 +0000766
767 // Identify all the basic blocks reachable from the function entry.
Heejin Ahnd69acf32018-06-01 00:03:21 +0000768 collectEHScopeMembers(EHScopeMembership, EntryBBNumber, &MF.front());
Heejin Ahn1e4d3502018-05-23 00:32:46 +0000769 // All blocks not part of a scope are in the parent function.
David Majnemere4f9b092015-10-05 20:09:16 +0000770 for (const MachineBasicBlock *MBB : UnreachableBlocks)
Heejin Ahnd69acf32018-06-01 00:03:21 +0000771 collectEHScopeMembers(EHScopeMembership, EntryBBNumber, MBB);
Heejin Ahn1e4d3502018-05-23 00:32:46 +0000772 // Next, identify all the blocks inside the scopes.
Heejin Ahnd69acf32018-06-01 00:03:21 +0000773 for (const MachineBasicBlock *MBB : EHScopeBlocks)
774 collectEHScopeMembers(EHScopeMembership, MBB->getNumber(), MBB);
Heejin Ahn1e4d3502018-05-23 00:32:46 +0000775 // SEH CatchPads aren't really scopes, handle them separately.
David Majnemere4f9b092015-10-05 20:09:16 +0000776 for (const MachineBasicBlock *MBB : SEHCatchPads)
Heejin Ahnd69acf32018-06-01 00:03:21 +0000777 collectEHScopeMembers(EHScopeMembership, EntryBBNumber, MBB);
David Majnemer16193552015-10-04 02:22:52 +0000778 // Finally, identify all the targets of a catchret.
779 for (std::pair<const MachineBasicBlock *, int> CatchRetPair :
780 CatchRetSuccessors)
Heejin Ahnd69acf32018-06-01 00:03:21 +0000781 collectEHScopeMembers(EHScopeMembership, CatchRetPair.second,
David Majnemer16193552015-10-04 02:22:52 +0000782 CatchRetPair.first);
Heejin Ahnd69acf32018-06-01 00:03:21 +0000783 return EHScopeMembership;
David Majnemer16193552015-10-04 02:22:52 +0000784}