Updated to Clang 3.5a.
Change-Id: I8127eb568f674c2e72635b639a3295381fe8af82
diff --git a/lib/AST/APValue.cpp b/lib/AST/APValue.cpp
index 541836b..049518e 100644
--- a/lib/AST/APValue.cpp
+++ b/lib/AST/APValue.cpp
@@ -34,7 +34,7 @@
struct APValue::LV : LVBase {
static const unsigned InlinePathSpace =
- (MaxSize - sizeof(LVBase)) / sizeof(LValuePathEntry);
+ (DataSize - sizeof(LVBase)) / sizeof(LValuePathEntry);
/// Path - The sequence of base classes, fields and array indices to follow to
/// walk from Base to the subobject. When performing GCC-style folding, there
@@ -75,7 +75,7 @@
struct APValue::MemberPointerData : MemberPointerBase {
static const unsigned InlinePathSpace =
- (MaxSize - sizeof(MemberPointerBase)) / sizeof(const CXXRecordDecl*);
+ (DataSize - sizeof(MemberPointerBase)) / sizeof(const CXXRecordDecl*);
typedef const CXXRecordDecl *PathElem;
union {
PathElem Path[InlinePathSpace];
@@ -136,7 +136,7 @@
break;
case Vector:
MakeVector();
- setVector(((const Vec *)(const char *)RHS.Data)->Elts,
+ setVector(((const Vec *)(const char *)RHS.Data.buffer)->Elts,
RHS.getVectorLength());
break;
case ComplexInt:
@@ -188,27 +188,27 @@
void APValue::DestroyDataAndMakeUninit() {
if (Kind == Int)
- ((APSInt*)(char*)Data)->~APSInt();
+ ((APSInt*)(char*)Data.buffer)->~APSInt();
else if (Kind == Float)
- ((APFloat*)(char*)Data)->~APFloat();
+ ((APFloat*)(char*)Data.buffer)->~APFloat();
else if (Kind == Vector)
- ((Vec*)(char*)Data)->~Vec();
+ ((Vec*)(char*)Data.buffer)->~Vec();
else if (Kind == ComplexInt)
- ((ComplexAPSInt*)(char*)Data)->~ComplexAPSInt();
+ ((ComplexAPSInt*)(char*)Data.buffer)->~ComplexAPSInt();
else if (Kind == ComplexFloat)
- ((ComplexAPFloat*)(char*)Data)->~ComplexAPFloat();
+ ((ComplexAPFloat*)(char*)Data.buffer)->~ComplexAPFloat();
else if (Kind == LValue)
- ((LV*)(char*)Data)->~LV();
+ ((LV*)(char*)Data.buffer)->~LV();
else if (Kind == Array)
- ((Arr*)(char*)Data)->~Arr();
+ ((Arr*)(char*)Data.buffer)->~Arr();
else if (Kind == Struct)
- ((StructData*)(char*)Data)->~StructData();
+ ((StructData*)(char*)Data.buffer)->~StructData();
else if (Kind == Union)
- ((UnionData*)(char*)Data)->~UnionData();
+ ((UnionData*)(char*)Data.buffer)->~UnionData();
else if (Kind == MemberPointer)
- ((MemberPointerData*)(char*)Data)->~MemberPointerData();
+ ((MemberPointerData*)(char*)Data.buffer)->~MemberPointerData();
else if (Kind == AddrLabelDiff)
- ((AddrLabelDiffData*)(char*)Data)->~AddrLabelDiffData();
+ ((AddrLabelDiffData*)(char*)Data.buffer)->~AddrLabelDiffData();
Kind = Uninitialized;
}
@@ -239,19 +239,20 @@
"same size.");
return getComplexIntReal().needsCleanup();
case LValue:
- return reinterpret_cast<const LV *>(Data)->hasPathPtr();
+ return reinterpret_cast<const LV *>(Data.buffer)->hasPathPtr();
case MemberPointer:
- return reinterpret_cast<const MemberPointerData *>(Data)->hasPathPtr();
+ return reinterpret_cast<const MemberPointerData *>(Data.buffer)
+ ->hasPathPtr();
}
llvm_unreachable("Unknown APValue kind!");
}
void APValue::swap(APValue &RHS) {
std::swap(Kind, RHS.Kind);
- char TmpData[MaxSize];
- memcpy(TmpData, Data, MaxSize);
- memcpy(Data, RHS.Data, MaxSize);
- memcpy(RHS.Data, TmpData, MaxSize);
+ char TmpData[DataSize];
+ memcpy(TmpData, Data.buffer, DataSize);
+ memcpy(Data.buffer, RHS.Data.buffer, DataSize);
+ memcpy(RHS.Data.buffer, TmpData, DataSize);
}
void APValue::dump() const {
@@ -498,8 +499,7 @@
First = false;
}
}
- for (RecordDecl::field_iterator FI = RD->field_begin();
- FI != RD->field_end(); ++FI) {
+ for (const auto *FI : RD->fields()) {
if (!First)
Out << ", ";
if (FI->isUnnamedBitfield()) continue;
@@ -546,39 +546,39 @@
const APValue::LValueBase APValue::getLValueBase() const {
assert(isLValue() && "Invalid accessor");
- return ((const LV*)(const void*)Data)->BaseAndIsOnePastTheEnd.getPointer();
+ return ((const LV*)(const void*)Data.buffer)->BaseAndIsOnePastTheEnd.getPointer();
}
bool APValue::isLValueOnePastTheEnd() const {
assert(isLValue() && "Invalid accessor");
- return ((const LV*)(const void*)Data)->BaseAndIsOnePastTheEnd.getInt();
+ return ((const LV*)(const void*)Data.buffer)->BaseAndIsOnePastTheEnd.getInt();
}
CharUnits &APValue::getLValueOffset() {
assert(isLValue() && "Invalid accessor");
- return ((LV*)(void*)Data)->Offset;
+ return ((LV*)(void*)Data.buffer)->Offset;
}
bool APValue::hasLValuePath() const {
assert(isLValue() && "Invalid accessor");
- return ((const LV*)(const char*)Data)->hasPath();
+ return ((const LV*)(const char*)Data.buffer)->hasPath();
}
ArrayRef<APValue::LValuePathEntry> APValue::getLValuePath() const {
assert(isLValue() && hasLValuePath() && "Invalid accessor");
- const LV &LVal = *((const LV*)(const char*)Data);
+ const LV &LVal = *((const LV*)(const char*)Data.buffer);
return ArrayRef<LValuePathEntry>(LVal.getPath(), LVal.PathLength);
}
unsigned APValue::getLValueCallIndex() const {
assert(isLValue() && "Invalid accessor");
- return ((const LV*)(const char*)Data)->CallIndex;
+ return ((const LV*)(const char*)Data.buffer)->CallIndex;
}
void APValue::setLValue(LValueBase B, const CharUnits &O, NoLValuePath,
unsigned CallIndex) {
assert(isLValue() && "Invalid accessor");
- LV &LVal = *((LV*)(char*)Data);
+ LV &LVal = *((LV*)(char*)Data.buffer);
LVal.BaseAndIsOnePastTheEnd.setPointer(B);
LVal.BaseAndIsOnePastTheEnd.setInt(false);
LVal.Offset = O;
@@ -590,7 +590,7 @@
ArrayRef<LValuePathEntry> Path, bool IsOnePastTheEnd,
unsigned CallIndex) {
assert(isLValue() && "Invalid accessor");
- LV &LVal = *((LV*)(char*)Data);
+ LV &LVal = *((LV*)(char*)Data.buffer);
LVal.BaseAndIsOnePastTheEnd.setPointer(B);
LVal.BaseAndIsOnePastTheEnd.setInt(IsOnePastTheEnd);
LVal.Offset = O;
@@ -601,39 +601,42 @@
const ValueDecl *APValue::getMemberPointerDecl() const {
assert(isMemberPointer() && "Invalid accessor");
- const MemberPointerData &MPD = *((const MemberPointerData*)(const char*)Data);
+ const MemberPointerData &MPD =
+ *((const MemberPointerData *)(const char *)Data.buffer);
return MPD.MemberAndIsDerivedMember.getPointer();
}
bool APValue::isMemberPointerToDerivedMember() const {
assert(isMemberPointer() && "Invalid accessor");
- const MemberPointerData &MPD = *((const MemberPointerData*)(const char*)Data);
+ const MemberPointerData &MPD =
+ *((const MemberPointerData *)(const char *)Data.buffer);
return MPD.MemberAndIsDerivedMember.getInt();
}
ArrayRef<const CXXRecordDecl*> APValue::getMemberPointerPath() const {
assert(isMemberPointer() && "Invalid accessor");
- const MemberPointerData &MPD = *((const MemberPointerData*)(const char*)Data);
+ const MemberPointerData &MPD =
+ *((const MemberPointerData *)(const char *)Data.buffer);
return ArrayRef<const CXXRecordDecl*>(MPD.getPath(), MPD.PathLength);
}
void APValue::MakeLValue() {
assert(isUninit() && "Bad state change");
- assert(sizeof(LV) <= MaxSize && "LV too big");
- new ((void*)(char*)Data) LV();
+ static_assert(sizeof(LV) <= DataSize, "LV too big");
+ new ((void*)(char*)Data.buffer) LV();
Kind = LValue;
}
void APValue::MakeArray(unsigned InitElts, unsigned Size) {
assert(isUninit() && "Bad state change");
- new ((void*)(char*)Data) Arr(InitElts, Size);
+ new ((void*)(char*)Data.buffer) Arr(InitElts, Size);
Kind = Array;
}
void APValue::MakeMemberPointer(const ValueDecl *Member, bool IsDerivedMember,
ArrayRef<const CXXRecordDecl*> Path) {
assert(isUninit() && "Bad state change");
- MemberPointerData *MPD = new ((void*)(char*)Data) MemberPointerData;
+ MemberPointerData *MPD = new ((void*)(char*)Data.buffer) MemberPointerData;
Kind = MemberPointer;
MPD->MemberAndIsDerivedMember.setPointer(Member);
MPD->MemberAndIsDerivedMember.setInt(IsDerivedMember);
diff --git a/lib/AST/ASTContext.cpp b/lib/AST/ASTContext.cpp
index a03cf9e..3ed2a9f 100644
--- a/lib/AST/ASTContext.cpp
+++ b/lib/AST/ASTContext.cpp
@@ -29,6 +29,7 @@
#include "clang/AST/RecordLayout.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/TypeLoc.h"
+#include "clang/AST/VTableBuilder.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
@@ -62,6 +63,13 @@
RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
if (!CommentsLoaded && ExternalSource) {
ExternalSource->ReadComments();
+
+#ifndef NDEBUG
+ ArrayRef<RawComment *> RawComments = Comments.getComments();
+ assert(std::is_sorted(RawComments.begin(), RawComments.end(),
+ BeforeThanCompare<RawComment>(SourceMgr)));
+#endif
+
CommentsLoaded = true;
}
@@ -137,11 +145,23 @@
DeclLoc = D->getLocStart();
else {
DeclLoc = D->getLocation();
- // If location of the typedef name is in a macro, it is because being
- // declared via a macro. Try using declaration's starting location
- // as the "declaration location".
- if (DeclLoc.isMacroID() && isa<TypedefDecl>(D))
- DeclLoc = D->getLocStart();
+ if (DeclLoc.isMacroID()) {
+ if (isa<TypedefDecl>(D)) {
+ // If location of the typedef name is in a macro, it is because being
+ // declared via a macro. Try using declaration's starting location as
+ // the "declaration location".
+ DeclLoc = D->getLocStart();
+ } else if (const TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ // If location of the tag decl is inside a macro, but the spelling of
+ // the tag name comes from a macro argument, it looks like a special
+ // macro like NS_ENUM is being used to define the tag decl. In that
+ // case, adjust the source location to the expansion loc so that we can
+ // attach the comment to the tag decl.
+ if (SourceMgr.isMacroArgExpansion(DeclLoc) &&
+ TD->isCompleteDefinition())
+ DeclLoc = SourceMgr.getExpansionLoc(DeclLoc);
+ }
+ }
}
// If the declaration doesn't map directly to a location in a file, we
@@ -331,11 +351,9 @@
// Search for comments attached to declarations in the redeclaration chain.
const RawComment *RC = NULL;
const Decl *OriginalDeclForRC = NULL;
- for (Decl::redecl_iterator I = D->redecls_begin(),
- E = D->redecls_end();
- I != E; ++I) {
+ for (auto I : D->redecls()) {
llvm::DenseMap<const Decl *, RawCommentAndCacheFlags>::iterator Pos =
- RedeclComments.find(*I);
+ RedeclComments.find(I);
if (Pos != RedeclComments.end()) {
const RawCommentAndCacheFlags &Raw = Pos->second;
if (Raw.getKind() != RawCommentAndCacheFlags::NoCommentInDecl) {
@@ -344,16 +362,16 @@
break;
}
} else {
- RC = getRawCommentForDeclNoCache(*I);
- OriginalDeclForRC = *I;
+ RC = getRawCommentForDeclNoCache(I);
+ OriginalDeclForRC = I;
RawCommentAndCacheFlags Raw;
if (RC) {
Raw.setRaw(RC);
Raw.setKind(RawCommentAndCacheFlags::FromDecl);
} else
Raw.setKind(RawCommentAndCacheFlags::NoCommentInDecl);
- Raw.setOriginalDecl(*I);
- RedeclComments[*I] = Raw;
+ Raw.setOriginalDecl(I);
+ RedeclComments[I] = Raw;
if (RC)
break;
}
@@ -371,10 +389,8 @@
Raw.setKind(RawCommentAndCacheFlags::FromRedecl);
Raw.setOriginalDecl(OriginalDeclForRC);
- for (Decl::redecl_iterator I = D->redecls_begin(),
- E = D->redecls_end();
- I != E; ++I) {
- RawCommentAndCacheFlags &R = RedeclComments[*I];
+ for (auto I : D->redecls()) {
+ RawCommentAndCacheFlags &R = RedeclComments[I];
if (R.getKind() == RawCommentAndCacheFlags::NoCommentInDecl)
R = Raw;
}
@@ -390,10 +406,7 @@
if (!ID)
return;
// Add redeclared method here.
- for (ObjCInterfaceDecl::known_extensions_iterator
- Ext = ID->known_extensions_begin(),
- ExtEnd = ID->known_extensions_end();
- Ext != ExtEnd; ++Ext) {
+ for (const auto *Ext : ID->known_extensions()) {
if (ObjCMethodDecl *RedeclaredMethod =
Ext->getMethod(ObjCMethod->getSelector(),
ObjCMethod->isInstanceMethod()))
@@ -484,11 +497,10 @@
if (!(RD = RD->getDefinition()))
return NULL;
// Check non-virtual bases.
- for (CXXRecordDecl::base_class_const_iterator I =
- RD->bases_begin(), E = RD->bases_end(); I != E; ++I) {
- if (I->isVirtual() || (I->getAccessSpecifier() != AS_public))
+ for (const auto &I : RD->bases()) {
+ if (I.isVirtual() || (I.getAccessSpecifier() != AS_public))
continue;
- QualType Ty = I->getType();
+ QualType Ty = I.getType();
if (Ty.isNull())
continue;
if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) {
@@ -500,11 +512,10 @@
}
}
// Check virtual bases.
- for (CXXRecordDecl::base_class_const_iterator I =
- RD->vbases_begin(), E = RD->vbases_end(); I != E; ++I) {
- if (I->getAccessSpecifier() != AS_public)
+ for (const auto &I : RD->vbases()) {
+ if (I.getAccessSpecifier() != AS_public)
continue;
- QualType Ty = I->getType();
+ QualType Ty = I.getType();
if (Ty.isNull())
continue;
if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) {
@@ -667,6 +678,7 @@
switch (T.getCXXABI().getKind()) {
case TargetCXXABI::GenericARM:
case TargetCXXABI::iOS:
+ case TargetCXXABI::iOS64:
return CreateARMCXXABI(*this);
case TargetCXXABI::GenericAArch64: // Same as Itanium at this level
case TargetCXXABI::GenericItanium:
@@ -782,11 +794,7 @@
A != AEnd; ++A)
A->second->~AttrVec();
- for (llvm::DenseMap<const DeclContext *, MangleNumberingContext *>::iterator
- I = MangleNumberingContexts.begin(),
- E = MangleNumberingContexts.end();
- I != E; ++I)
- delete I->second;
+ llvm::DeleteContainerSeconds(MangleNumberingContexts);
}
void ASTContext::AddDeallocation(void (*Callback)(void*), void *Data) {
@@ -794,8 +802,8 @@
}
void
-ASTContext::setExternalSource(OwningPtr<ExternalASTSource> &Source) {
- ExternalSource.reset(Source.take());
+ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) {
+ ExternalSource = Source;
}
void ASTContext::PrintStats() const {
@@ -849,7 +857,7 @@
<< NumImplicitDestructors
<< " implicit destructors created\n";
- if (ExternalSource.get()) {
+ if (ExternalSource) {
llvm::errs() << "\n";
ExternalSource->PrintStats();
}
@@ -857,45 +865,47 @@
BumpAlloc.PrintStats();
}
+RecordDecl *ASTContext::buildImplicitRecord(StringRef Name,
+ RecordDecl::TagKind TK) const {
+ SourceLocation Loc;
+ RecordDecl *NewDecl;
+ if (getLangOpts().CPlusPlus)
+ NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc,
+ Loc, &Idents.get(Name));
+ else
+ NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc,
+ &Idents.get(Name));
+ NewDecl->setImplicit();
+ return NewDecl;
+}
+
+TypedefDecl *ASTContext::buildImplicitTypedef(QualType T,
+ StringRef Name) const {
+ TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T);
+ TypedefDecl *NewDecl = TypedefDecl::Create(
+ const_cast<ASTContext &>(*this), getTranslationUnitDecl(),
+ SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo);
+ NewDecl->setImplicit();
+ return NewDecl;
+}
+
TypedefDecl *ASTContext::getInt128Decl() const {
- if (!Int128Decl) {
- TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(Int128Ty);
- Int128Decl = TypedefDecl::Create(const_cast<ASTContext &>(*this),
- getTranslationUnitDecl(),
- SourceLocation(),
- SourceLocation(),
- &Idents.get("__int128_t"),
- TInfo);
- }
-
+ if (!Int128Decl)
+ Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t");
return Int128Decl;
}
TypedefDecl *ASTContext::getUInt128Decl() const {
- if (!UInt128Decl) {
- TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(UnsignedInt128Ty);
- UInt128Decl = TypedefDecl::Create(const_cast<ASTContext &>(*this),
- getTranslationUnitDecl(),
- SourceLocation(),
- SourceLocation(),
- &Idents.get("__uint128_t"),
- TInfo);
- }
-
+ if (!UInt128Decl)
+ UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t");
return UInt128Decl;
}
TypeDecl *ASTContext::getFloat128StubType() const {
assert(LangOpts.CPlusPlus && "should only be called for c++");
- if (!Float128StubDecl) {
- Float128StubDecl = CXXRecordDecl::Create(const_cast<ASTContext &>(*this),
- TTK_Struct,
- getTranslationUnitDecl(),
- SourceLocation(),
- SourceLocation(),
- &Idents.get("__float128"));
- }
-
+ if (!Float128StubDecl)
+ Float128StubDecl = buildImplicitRecord("__float128");
+
return Float128StubDecl;
}
@@ -1618,7 +1628,7 @@
}
case Type::MemberPointer: {
const MemberPointerType *MPT = cast<MemberPointerType>(T);
- llvm::tie(Width, Align) = ABI->getMemberPointerWidthAndAlign(MPT);
+ std::tie(Width, Align) = ABI->getMemberPointerWidthAndAlign(MPT);
break;
}
case Type::Complex: {
@@ -1632,8 +1642,9 @@
}
case Type::ObjCObject:
return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr());
+ case Type::Adjusted:
case Type::Decayed:
- return getTypeInfo(cast<DecayedType>(T)->getDecayedType().getTypePtr());
+ return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr());
case Type::ObjCInterface: {
const ObjCInterfaceType *ObjCI = cast<ObjCInterfaceType>(T);
const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
@@ -1761,13 +1772,18 @@
if (Target->getTriple().getArch() == llvm::Triple::xcore)
return ABIAlign; // Never overalign on XCore.
+ const TypedefType *TT = T->getAs<TypedefType>();
+
// Double and long long should be naturally aligned if possible.
- if (const ComplexType* CT = T->getAs<ComplexType>())
+ if (const ComplexType *CT = T->getAs<ComplexType>())
T = CT->getElementType().getTypePtr();
if (T->isSpecificBuiltinType(BuiltinType::Double) ||
T->isSpecificBuiltinType(BuiltinType::LongLong) ||
T->isSpecificBuiltinType(BuiltinType::ULongLong))
- return std::max(ABIAlign, (unsigned)getTypeSize(T));
+ // Don't increase the alignment if an alignment attribute was specified on a
+ // typedef declaration.
+ if (!TT || !TT->getDecl()->getMaxAlignment())
+ return std::max(ABIAlign, (unsigned)getTypeSize(T));
return ABIAlign;
}
@@ -1796,9 +1812,8 @@
if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
DeepCollectObjCIvars(SuperClass, false, Ivars);
if (!leafClass) {
- for (ObjCInterfaceDecl::ivar_iterator I = OI->ivar_begin(),
- E = OI->ivar_end(); I != E; ++I)
- Ivars.push_back(*I);
+ for (const auto *I : OI->ivars())
+ Ivars.push_back(I);
} else {
ObjCInterfaceDecl *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
@@ -1814,24 +1829,17 @@
if (const ObjCInterfaceDecl *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
// We can use protocol_iterator here instead of
// all_referenced_protocol_iterator since we are walking all categories.
- for (ObjCInterfaceDecl::all_protocol_iterator P = OI->all_referenced_protocol_begin(),
- PE = OI->all_referenced_protocol_end(); P != PE; ++P) {
- ObjCProtocolDecl *Proto = (*P);
+ for (auto *Proto : OI->all_referenced_protocols()) {
Protocols.insert(Proto->getCanonicalDecl());
- for (ObjCProtocolDecl::protocol_iterator P = Proto->protocol_begin(),
- PE = Proto->protocol_end(); P != PE; ++P) {
- Protocols.insert((*P)->getCanonicalDecl());
- CollectInheritedProtocols(*P, Protocols);
+ for (auto *P : Proto->protocols()) {
+ Protocols.insert(P->getCanonicalDecl());
+ CollectInheritedProtocols(P, Protocols);
}
}
// Categories of this Interface.
- for (ObjCInterfaceDecl::visible_categories_iterator
- Cat = OI->visible_categories_begin(),
- CatEnd = OI->visible_categories_end();
- Cat != CatEnd; ++Cat) {
- CollectInheritedProtocols(*Cat, Protocols);
- }
+ for (const auto *Cat : OI->visible_categories())
+ CollectInheritedProtocols(Cat, Protocols);
if (ObjCInterfaceDecl *SD = OI->getSuperClass())
while (SD) {
@@ -1839,22 +1847,16 @@
SD = SD->getSuperClass();
}
} else if (const ObjCCategoryDecl *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) {
- for (ObjCCategoryDecl::protocol_iterator P = OC->protocol_begin(),
- PE = OC->protocol_end(); P != PE; ++P) {
- ObjCProtocolDecl *Proto = (*P);
+ for (auto *Proto : OC->protocols()) {
Protocols.insert(Proto->getCanonicalDecl());
- for (ObjCProtocolDecl::protocol_iterator P = Proto->protocol_begin(),
- PE = Proto->protocol_end(); P != PE; ++P)
- CollectInheritedProtocols(*P, Protocols);
+ for (const auto *P : Proto->protocols())
+ CollectInheritedProtocols(P, Protocols);
}
} else if (const ObjCProtocolDecl *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) {
- for (ObjCProtocolDecl::protocol_iterator P = OP->protocol_begin(),
- PE = OP->protocol_end(); P != PE; ++P) {
- ObjCProtocolDecl *Proto = (*P);
+ for (auto *Proto : OP->protocols()) {
Protocols.insert(Proto->getCanonicalDecl());
- for (ObjCProtocolDecl::protocol_iterator P = Proto->protocol_begin(),
- PE = Proto->protocol_end(); P != PE; ++P)
- CollectInheritedProtocols(*P, Protocols);
+ for (const auto *P : Proto->protocols())
+ CollectInheritedProtocols(P, Protocols);
}
}
}
@@ -1862,12 +1864,8 @@
unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const {
unsigned count = 0;
// Count ivars declared in class extension.
- for (ObjCInterfaceDecl::known_extensions_iterator
- Ext = OI->known_extensions_begin(),
- ExtEnd = OI->known_extensions_end();
- Ext != ExtEnd; ++Ext) {
+ for (const auto *Ext : OI->known_extensions())
count += Ext->ivar_size();
- }
// Count ivar defined in this class's implementation. This
// includes synthesized ivars.
@@ -2080,12 +2078,12 @@
QualType Result;
if (const FunctionNoProtoType *FNPT = dyn_cast<FunctionNoProtoType>(T)) {
- Result = getFunctionNoProtoType(FNPT->getResultType(), Info);
+ Result = getFunctionNoProtoType(FNPT->getReturnType(), Info);
} else {
const FunctionProtoType *FPT = cast<FunctionProtoType>(T);
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
EPI.ExtInfo = Info;
- Result = getFunctionType(FPT->getResultType(), FPT->getArgTypes(), EPI);
+ Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI);
}
return cast<FunctionType>(Result.getTypePtr());
@@ -2097,7 +2095,7 @@
while (true) {
const FunctionProtoType *FPT = FD->getType()->castAs<FunctionProtoType>();
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
- FD->setType(getFunctionType(ResultType, FPT->getArgTypes(), EPI));
+ FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI));
if (FunctionDecl *Next = FD->getPreviousDecl())
FD = Next;
else
@@ -2163,15 +2161,30 @@
return QualType(New, 0);
}
+QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const {
+ llvm::FoldingSetNodeID ID;
+ AdjustedType::Profile(ID, Orig, New);
+ void *InsertPos = 0;
+ AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (AT)
+ return QualType(AT, 0);
+
+ QualType Canonical = getCanonicalType(New);
+
+ // Get the new insert position for the node we care about.
+ AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(AT == 0 && "Shouldn't be in the map!");
+
+ AT = new (*this, TypeAlignment)
+ AdjustedType(Type::Adjusted, Orig, New, Canonical);
+ Types.push_back(AT);
+ AdjustedTypes.InsertNode(AT, InsertPos);
+ return QualType(AT, 0);
+}
+
QualType ASTContext::getDecayedType(QualType T) const {
assert((T->isArrayType() || T->isFunctionType()) && "T does not decay");
- llvm::FoldingSetNodeID ID;
- DecayedType::Profile(ID, T);
- void *InsertPos = 0;
- if (DecayedType *DT = DecayedTypes.FindNodeOrInsertPos(ID, InsertPos))
- return QualType(DT, 0);
-
QualType Decayed;
// C99 6.7.5.3p7:
@@ -2189,17 +2202,23 @@
if (T->isFunctionType())
Decayed = getPointerType(T);
+ llvm::FoldingSetNodeID ID;
+ AdjustedType::Profile(ID, T, Decayed);
+ void *InsertPos = 0;
+ AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (AT)
+ return QualType(AT, 0);
+
QualType Canonical = getCanonicalType(Decayed);
// Get the new insert position for the node we care about.
- DecayedType *NewIP = DecayedTypes.FindNodeOrInsertPos(ID, InsertPos);
- assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
+ AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(AT == 0 && "Shouldn't be in the map!");
- DecayedType *New =
- new (*this, TypeAlignment) DecayedType(T, Decayed, Canonical);
- Types.push_back(New);
- DecayedTypes.InsertNode(New, InsertPos);
- return QualType(New, 0);
+ AT = new (*this, TypeAlignment) DecayedType(T, Decayed, Canonical);
+ Types.push_back(AT);
+ AdjustedTypes.InsertNode(AT, InsertPos);
+ return QualType(AT, 0);
}
/// getBlockPointerType - Return the uniqued reference to the type for
@@ -2798,8 +2817,6 @@
T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone);
}
-/// getFunctionType - Return a normal function type with a typed argument
-/// list. isVariadic indicates whether the argument list includes '...'.
QualType
ASTContext::getFunctionType(QualType ResultTy, ArrayRef<QualType> ArgArray,
const FunctionProtoType::ExtProtoInfo &EPI) const {
@@ -2873,7 +2890,7 @@
} else if (EPI.ExceptionSpecType == EST_Unevaluated) {
Size += sizeof(FunctionDecl*);
}
- if (EPI.ConsumedArguments)
+ if (EPI.ConsumedParameters)
Size += NumArgs * sizeof(bool);
FunctionProtoType *FTP = (FunctionProtoType*) Allocate(Size, TypeAlignment);
@@ -3507,6 +3524,56 @@
return QualType(T, 0);
}
+/// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's
+/// protocol list adopt all protocols in QT's qualified-id protocol
+/// list.
+bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT,
+ ObjCInterfaceDecl *IC) {
+ if (!QT->isObjCQualifiedIdType())
+ return false;
+
+ if (const ObjCObjectPointerType *OPT = QT->getAs<ObjCObjectPointerType>()) {
+ // If both the right and left sides have qualifiers.
+ for (auto *Proto : OPT->quals()) {
+ if (!IC->ClassImplementsProtocol(Proto, false))
+ return false;
+ }
+ return true;
+ }
+ return false;
+}
+
+/// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in
+/// QT's qualified-id protocol list adopt all protocols in IDecl's list
+/// of protocols.
+bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT,
+ ObjCInterfaceDecl *IDecl) {
+ if (!QT->isObjCQualifiedIdType())
+ return false;
+ const ObjCObjectPointerType *OPT = QT->getAs<ObjCObjectPointerType>();
+ if (!OPT)
+ return false;
+ if (!IDecl->hasDefinition())
+ return false;
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols;
+ CollectInheritedProtocols(IDecl, InheritedProtocols);
+ if (InheritedProtocols.empty())
+ return false;
+
+ for (auto *PI : InheritedProtocols) {
+ // If both the right and left sides have qualifiers.
+ bool Adopts = false;
+ for (auto *Proto : OPT->quals()) {
+ // return 'true' if '*PI' is in the inheritance hierarchy of Proto
+ if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto)))
+ break;
+ }
+ if (!Adopts)
+ return false;
+ }
+ return true;
+}
+
/// getObjCObjectPointerType - Return a ObjCObjectPointerType type for
/// the given object type.
QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const {
@@ -4479,22 +4546,10 @@
return 1;
}
-static RecordDecl *
-CreateRecordDecl(const ASTContext &Ctx, RecordDecl::TagKind TK,
- DeclContext *DC, IdentifierInfo *Id) {
- SourceLocation Loc;
- if (Ctx.getLangOpts().CPlusPlus)
- return CXXRecordDecl::Create(Ctx, TK, DC, Loc, Loc, Id);
- else
- return RecordDecl::Create(Ctx, TK, DC, Loc, Loc, Id);
-}
-
// getCFConstantStringType - Return the type used for constant CFStrings.
QualType ASTContext::getCFConstantStringType() const {
if (!CFConstantStringTypeDecl) {
- CFConstantStringTypeDecl =
- CreateRecordDecl(*this, TTK_Struct, TUDecl,
- &Idents.get("NSConstantString"));
+ CFConstantStringTypeDecl = buildImplicitRecord("NSConstantString");
CFConstantStringTypeDecl->startDefinition();
QualType FieldTypes[4];
@@ -4529,8 +4584,7 @@
QualType ASTContext::getObjCSuperType() const {
if (ObjCSuperType.isNull()) {
- RecordDecl *ObjCSuperTypeDecl =
- CreateRecordDecl(*this, TTK_Struct, TUDecl, &Idents.get("objc_super"));
+ RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord("objc_super");
TUDecl->addDecl(ObjCSuperTypeDecl);
ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl);
}
@@ -4547,12 +4601,11 @@
if (BlockDescriptorType)
return getTagDeclType(BlockDescriptorType);
- RecordDecl *T;
+ RecordDecl *RD;
// FIXME: Needs the FlagAppleBlock bit.
- T = CreateRecordDecl(*this, TTK_Struct, TUDecl,
- &Idents.get("__block_descriptor"));
- T->startDefinition();
-
+ RD = buildImplicitRecord("__block_descriptor");
+ RD->startDefinition();
+
QualType FieldTypes[] = {
UnsignedLongTy,
UnsignedLongTy,
@@ -4564,20 +4617,17 @@
};
for (size_t i = 0; i < 2; ++i) {
- FieldDecl *Field = FieldDecl::Create(*this, T, SourceLocation(),
- SourceLocation(),
- &Idents.get(FieldNames[i]),
- FieldTypes[i], /*TInfo=*/0,
- /*BitWidth=*/0,
- /*Mutable=*/false,
- ICIS_NoInit);
+ FieldDecl *Field = FieldDecl::Create(
+ *this, RD, SourceLocation(), SourceLocation(),
+ &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/0,
+ /*BitWidth=*/0, /*Mutable=*/false, ICIS_NoInit);
Field->setAccess(AS_public);
- T->addDecl(Field);
+ RD->addDecl(Field);
}
- T->completeDefinition();
+ RD->completeDefinition();
- BlockDescriptorType = T;
+ BlockDescriptorType = RD;
return getTagDeclType(BlockDescriptorType);
}
@@ -4586,12 +4636,11 @@
if (BlockDescriptorExtendedType)
return getTagDeclType(BlockDescriptorExtendedType);
- RecordDecl *T;
+ RecordDecl *RD;
// FIXME: Needs the FlagAppleBlock bit.
- T = CreateRecordDecl(*this, TTK_Struct, TUDecl,
- &Idents.get("__block_descriptor_withcopydispose"));
- T->startDefinition();
-
+ RD = buildImplicitRecord("__block_descriptor_withcopydispose");
+ RD->startDefinition();
+
QualType FieldTypes[] = {
UnsignedLongTy,
UnsignedLongTy,
@@ -4607,21 +4656,18 @@
};
for (size_t i = 0; i < 4; ++i) {
- FieldDecl *Field = FieldDecl::Create(*this, T, SourceLocation(),
- SourceLocation(),
- &Idents.get(FieldNames[i]),
- FieldTypes[i], /*TInfo=*/0,
- /*BitWidth=*/0,
- /*Mutable=*/false,
- ICIS_NoInit);
+ FieldDecl *Field = FieldDecl::Create(
+ *this, RD, SourceLocation(), SourceLocation(),
+ &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/0,
+ /*BitWidth=*/0,
+ /*Mutable=*/false, ICIS_NoInit);
Field->setAccess(AS_public);
- T->addDecl(Field);
+ RD->addDecl(Field);
}
- T->completeDefinition();
+ RD->completeDefinition();
- BlockDescriptorExtendedType = T;
-
+ BlockDescriptorExtendedType = RD;
return getTagDeclType(BlockDescriptorExtendedType);
}
@@ -4691,12 +4737,8 @@
TypedefDecl *ASTContext::getObjCInstanceTypeDecl() {
if (!ObjCInstanceTypeDecl)
- ObjCInstanceTypeDecl = TypedefDecl::Create(*this,
- getTranslationUnitDecl(),
- SourceLocation(),
- SourceLocation(),
- &Idents.get("instancetype"),
- getTrivialTypeSourceInfo(getObjCIdType()));
+ ObjCInstanceTypeDecl =
+ buildImplicitTypedef(getObjCIdType(), "instancetype");
return ObjCInstanceTypeDecl;
}
@@ -4742,21 +4784,19 @@
Expr->getType()->getAs<BlockPointerType>()->getPointeeType();
// Encode result type.
if (getLangOpts().EncodeExtendedBlockSig)
- getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None,
- BlockTy->getAs<FunctionType>()->getResultType(),
- S, true /*Extended*/);
+ getObjCEncodingForMethodParameter(
+ Decl::OBJC_TQ_None, BlockTy->getAs<FunctionType>()->getReturnType(), S,
+ true /*Extended*/);
else
- getObjCEncodingForType(BlockTy->getAs<FunctionType>()->getResultType(),
- S);
+ getObjCEncodingForType(BlockTy->getAs<FunctionType>()->getReturnType(), S);
// Compute size of all parameters.
// Start with computing size of a pointer in number of bytes.
// FIXME: There might(should) be a better way of doing this computation!
SourceLocation Loc;
CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy);
CharUnits ParmOffset = PtrSize;
- for (BlockDecl::param_const_iterator PI = Decl->param_begin(),
- E = Decl->param_end(); PI != E; ++PI) {
- QualType PType = (*PI)->getType();
+ for (auto PI : Decl->params()) {
+ QualType PType = PI->getType();
CharUnits sz = getObjCEncodingTypeSize(PType);
if (sz.isZero())
continue;
@@ -4770,9 +4810,7 @@
// Argument types.
ParmOffset = PtrSize;
- for (BlockDecl::param_const_iterator PI = Decl->param_begin(), E =
- Decl->param_end(); PI != E; ++PI) {
- ParmVarDecl *PVDecl = *PI;
+ for (auto PVDecl : Decl->params()) {
QualType PType = PVDecl->getOriginalType();
if (const ArrayType *AT =
dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
@@ -4797,12 +4835,11 @@
bool ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl,
std::string& S) {
// Encode result type.
- getObjCEncodingForType(Decl->getResultType(), S);
+ getObjCEncodingForType(Decl->getReturnType(), S);
CharUnits ParmOffset;
// Compute size of all parameters.
- for (FunctionDecl::param_const_iterator PI = Decl->param_begin(),
- E = Decl->param_end(); PI != E; ++PI) {
- QualType PType = (*PI)->getType();
+ for (auto PI : Decl->params()) {
+ QualType PType = PI->getType();
CharUnits sz = getObjCEncodingTypeSize(PType);
if (sz.isZero())
continue;
@@ -4815,9 +4852,7 @@
ParmOffset = CharUnits::Zero();
// Argument types.
- for (FunctionDecl::param_const_iterator PI = Decl->param_begin(),
- E = Decl->param_end(); PI != E; ++PI) {
- ParmVarDecl *PVDecl = *PI;
+ for (auto PVDecl : Decl->params()) {
QualType PType = PVDecl->getOriginalType();
if (const ArrayType *AT =
dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
@@ -4859,8 +4894,8 @@
bool Extended) const {
// FIXME: This is not very efficient.
// Encode return type.
- getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(),
- Decl->getResultType(), S, Extended);
+ getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(),
+ Decl->getReturnType(), S, Extended);
// Compute size of all parameters.
// Start with computing size of a pointer in number of bytes.
// FIXME: There might(should) be a better way of doing this computation!
@@ -4907,6 +4942,26 @@
return false;
}
+ObjCPropertyImplDecl *
+ASTContext::getObjCPropertyImplDeclForPropertyDecl(
+ const ObjCPropertyDecl *PD,
+ const Decl *Container) const {
+ if (!Container)
+ return 0;
+ if (const ObjCCategoryImplDecl *CID =
+ dyn_cast<ObjCCategoryImplDecl>(Container)) {
+ for (auto *PID : CID->property_impls())
+ if (PID->getPropertyDecl() == PD)
+ return PID;
+ } else {
+ const ObjCImplementationDecl *OID=cast<ObjCImplementationDecl>(Container);
+ for (auto *PID : OID->property_impls())
+ if (PID->getPropertyDecl() == PD)
+ return PID;
+ }
+ return 0;
+}
+
/// getObjCEncodingForPropertyDecl - Return the encoded type for this
/// property declaration. If non-NULL, Container must be either an
/// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be
@@ -4939,37 +4994,12 @@
bool Dynamic = false;
ObjCPropertyImplDecl *SynthesizePID = 0;
- // FIXME: Duplicated code due to poor abstraction.
- if (Container) {
- if (const ObjCCategoryImplDecl *CID =
- dyn_cast<ObjCCategoryImplDecl>(Container)) {
- for (ObjCCategoryImplDecl::propimpl_iterator
- i = CID->propimpl_begin(), e = CID->propimpl_end();
- i != e; ++i) {
- ObjCPropertyImplDecl *PID = *i;
- if (PID->getPropertyDecl() == PD) {
- if (PID->getPropertyImplementation()==ObjCPropertyImplDecl::Dynamic) {
- Dynamic = true;
- } else {
- SynthesizePID = PID;
- }
- }
- }
- } else {
- const ObjCImplementationDecl *OID=cast<ObjCImplementationDecl>(Container);
- for (ObjCCategoryImplDecl::propimpl_iterator
- i = OID->propimpl_begin(), e = OID->propimpl_end();
- i != e; ++i) {
- ObjCPropertyImplDecl *PID = *i;
- if (PID->getPropertyDecl() == PD) {
- if (PID->getPropertyImplementation()==ObjCPropertyImplDecl::Dynamic) {
- Dynamic = true;
- } else {
- SynthesizePID = PID;
- }
- }
- }
- }
+ if (ObjCPropertyImplDecl *PropertyImpDecl =
+ getObjCPropertyImplDeclForPropertyDecl(PD, Container)) {
+ if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ Dynamic = true;
+ else
+ SynthesizePID = PropertyImpDecl;
}
// FIXME: This is not very efficient.
@@ -4988,6 +5018,8 @@
S += ",C";
if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_retain)
S += ",&";
+ if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak)
+ S += ",W";
} else {
switch (PD->getSetterKind()) {
case ObjCPropertyDecl::Assign: break;
@@ -5322,9 +5354,7 @@
if (!RDecl->isUnion()) {
getObjCEncodingForStructureImpl(RDecl, S, FD);
} else {
- for (RecordDecl::field_iterator Field = RDecl->field_begin(),
- FieldEnd = RDecl->field_end();
- Field != FieldEnd; ++Field) {
+ for (const auto *Field : RDecl->fields()) {
if (FD) {
S += '"';
S += Field->getNameAsString();
@@ -5334,7 +5364,7 @@
// Special case bit-fields.
if (Field->isBitField()) {
getObjCEncodingForTypeImpl(Field->getType(), S, false, true,
- *Field);
+ Field);
} else {
QualType qt = Field->getType();
getLegacyIntegralTypeEncoding(qt);
@@ -5358,37 +5388,38 @@
S += '<';
// Block return type
- getObjCEncodingForTypeImpl(FT->getResultType(), S,
- ExpandPointedToStructures, ExpandStructures,
- FD,
- false /* OutermostType */,
- EncodingProperty,
- false /* StructField */,
- EncodeBlockParameters,
- EncodeClassNames);
+ getObjCEncodingForTypeImpl(
+ FT->getReturnType(), S, ExpandPointedToStructures, ExpandStructures,
+ FD, false /* OutermostType */, EncodingProperty,
+ false /* StructField */, EncodeBlockParameters, EncodeClassNames);
// Block self
S += "@?";
// Block parameters
if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) {
- for (FunctionProtoType::arg_type_iterator I = FPT->arg_type_begin(),
- E = FPT->arg_type_end(); I && (I != E); ++I) {
- getObjCEncodingForTypeImpl(*I, S,
- ExpandPointedToStructures,
- ExpandStructures,
- FD,
- false /* OutermostType */,
- EncodingProperty,
- false /* StructField */,
- EncodeBlockParameters,
- EncodeClassNames);
- }
+ for (const auto &I : FPT->param_types())
+ getObjCEncodingForTypeImpl(
+ I, S, ExpandPointedToStructures, ExpandStructures, FD,
+ false /* OutermostType */, EncodingProperty,
+ false /* StructField */, EncodeBlockParameters, EncodeClassNames);
}
S += '>';
}
return;
}
- case Type::ObjCObject:
+ case Type::ObjCObject: {
+ // hack to match legacy encoding of *id and *Class
+ QualType Ty = getObjCObjectPointerType(CT);
+ if (Ty->isObjCIdType()) {
+ S += "{objc_object=}";
+ return;
+ }
+ else if (Ty->isObjCClassType()) {
+ S += "{objc_class=}";
+ return;
+ }
+ }
+
case Type::ObjCInterface: {
// Ignore protocol qualifiers when mangling at this level.
T = T->castAs<ObjCObjectType>()->getBaseType();
@@ -5440,10 +5471,9 @@
// Note that we do extended encoding of protocol qualifer list
// Only when doing ivar or property encoding.
S += '"';
- for (ObjCObjectPointerType::qual_iterator I = OPT->qual_begin(),
- E = OPT->qual_end(); I != E; ++I) {
+ for (const auto *I : OPT->quals()) {
S += '<';
- S += (*I)->getNameAsString();
+ S += I->getNameAsString();
S += '>';
}
S += '"';
@@ -5486,10 +5516,9 @@
(FD || EncodingProperty || EncodeClassNames)) {
S += '"';
S += OPT->getInterfaceDecl()->getIdentifier()->getName();
- for (ObjCObjectPointerType::qual_iterator I = OPT->qual_begin(),
- E = OPT->qual_end(); I != E; ++I) {
+ for (const auto *I : OPT->quals()) {
S += '<';
- S += (*I)->getNameAsString();
+ S += I->getNameAsString();
S += '>';
}
S += '"';
@@ -5542,11 +5571,9 @@
const ASTRecordLayout &layout = getASTRecordLayout(RDecl);
if (CXXRec) {
- for (CXXRecordDecl::base_class_iterator
- BI = CXXRec->bases_begin(),
- BE = CXXRec->bases_end(); BI != BE; ++BI) {
- if (!BI->isVirtual()) {
- CXXRecordDecl *base = BI->getType()->getAsCXXRecordDecl();
+ for (const auto &BI : CXXRec->bases()) {
+ if (!BI.isVirtual()) {
+ CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl();
if (base->isEmpty())
continue;
uint64_t offs = toBits(layout.getBaseClassOffset(base));
@@ -5566,10 +5593,8 @@
}
if (CXXRec && includeVBases) {
- for (CXXRecordDecl::base_class_iterator
- BI = CXXRec->vbases_begin(),
- BE = CXXRec->vbases_end(); BI != BE; ++BI) {
- CXXRecordDecl *base = BI->getType()->getAsCXXRecordDecl();
+ for (const auto &BI : CXXRec->vbases()) {
+ CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl();
if (base->isEmpty())
continue;
uint64_t offs = toBits(layout.getVBaseClassOffset(base));
@@ -5587,7 +5612,9 @@
size = layout.getSize();
}
+#ifndef NDEBUG
uint64_t CurOffs = 0;
+#endif
std::multimap<uint64_t, NamedDecl *>::iterator
CurLayObj = FieldOrBaseOffsets.begin();
@@ -5601,7 +5628,9 @@
S += '"';
}
S += "^^?";
+#ifndef NDEBUG
CurOffs += getTypeSize(VoidPtrTy);
+#endif
}
if (!RDecl->hasFlexibleArrayMember()) {
@@ -5612,8 +5641,8 @@
}
for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) {
+#ifndef NDEBUG
assert(CurOffs <= CurLayObj->first);
-
if (CurOffs < CurLayObj->first) {
uint64_t padding = CurLayObj->first - CurOffs;
// FIXME: There doesn't seem to be a way to indicate in the encoding that
@@ -5625,6 +5654,7 @@
// longer then though.
CurOffs += padding;
}
+#endif
NamedDecl *dcl = CurLayObj->second;
if (dcl == 0)
@@ -5637,7 +5667,9 @@
// making the encoding type bigger than it really is.
getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false);
assert(!base->isEmpty());
+#ifndef NDEBUG
CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize());
+#endif
} else {
FieldDecl *field = cast<FieldDecl>(dcl);
if (FD) {
@@ -5648,7 +5680,9 @@
if (field->isBitField()) {
EncodeBitField(this, S, field->getType(), field);
+#ifndef NDEBUG
CurOffs += field->getBitWidthValue(*this);
+#endif
} else {
QualType qt = field->getType();
getLegacyIntegralTypeEncoding(qt);
@@ -5656,7 +5690,9 @@
/*OutermostType*/false,
/*EncodingProperty*/false,
/*StructField*/true);
+#ifndef NDEBUG
CurOffs += getTypeSize(field->getType());
+#endif
}
}
}
@@ -5682,24 +5718,15 @@
if (!ObjCIdDecl) {
QualType T = getObjCObjectType(ObjCBuiltinIdTy, 0, 0);
T = getObjCObjectPointerType(T);
- TypeSourceInfo *IdInfo = getTrivialTypeSourceInfo(T);
- ObjCIdDecl = TypedefDecl::Create(const_cast<ASTContext &>(*this),
- getTranslationUnitDecl(),
- SourceLocation(), SourceLocation(),
- &Idents.get("id"), IdInfo);
+ ObjCIdDecl = buildImplicitTypedef(T, "id");
}
-
return ObjCIdDecl;
}
TypedefDecl *ASTContext::getObjCSelDecl() const {
if (!ObjCSelDecl) {
- QualType SelT = getPointerType(ObjCBuiltinSelTy);
- TypeSourceInfo *SelInfo = getTrivialTypeSourceInfo(SelT);
- ObjCSelDecl = TypedefDecl::Create(const_cast<ASTContext &>(*this),
- getTranslationUnitDecl(),
- SourceLocation(), SourceLocation(),
- &Idents.get("SEL"), SelInfo);
+ QualType T = getPointerType(ObjCBuiltinSelTy);
+ ObjCSelDecl = buildImplicitTypedef(T, "SEL");
}
return ObjCSelDecl;
}
@@ -5708,13 +5735,8 @@
if (!ObjCClassDecl) {
QualType T = getObjCObjectType(ObjCBuiltinClassTy, 0, 0);
T = getObjCObjectPointerType(T);
- TypeSourceInfo *ClassInfo = getTrivialTypeSourceInfo(T);
- ObjCClassDecl = TypedefDecl::Create(const_cast<ASTContext &>(*this),
- getTranslationUnitDecl(),
- SourceLocation(), SourceLocation(),
- &Idents.get("Class"), ClassInfo);
+ ObjCClassDecl = buildImplicitTypedef(T, "Class");
}
-
return ObjCClassDecl;
}
@@ -5737,37 +5759,20 @@
static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) {
// typedef char* __builtin_va_list;
- QualType CharPtrType = Context->getPointerType(Context->CharTy);
- TypeSourceInfo *TInfo
- = Context->getTrivialTypeSourceInfo(CharPtrType);
-
- TypedefDecl *VaListTypeDecl
- = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
- Context->getTranslationUnitDecl(),
- SourceLocation(), SourceLocation(),
- &Context->Idents.get("__builtin_va_list"),
- TInfo);
- return VaListTypeDecl;
+ QualType T = Context->getPointerType(Context->CharTy);
+ return Context->buildImplicitTypedef(T, "__builtin_va_list");
}
static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) {
// typedef void* __builtin_va_list;
- QualType VoidPtrType = Context->getPointerType(Context->VoidTy);
- TypeSourceInfo *TInfo
- = Context->getTrivialTypeSourceInfo(VoidPtrType);
-
- TypedefDecl *VaListTypeDecl
- = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
- Context->getTranslationUnitDecl(),
- SourceLocation(), SourceLocation(),
- &Context->Idents.get("__builtin_va_list"),
- TInfo);
- return VaListTypeDecl;
+ QualType T = Context->getPointerType(Context->VoidTy);
+ return Context->buildImplicitTypedef(T, "__builtin_va_list");
}
static TypedefDecl *
CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) {
- RecordDecl *VaListTagDecl;
+ // struct __va_list
+ RecordDecl *VaListTagDecl = Context->buildImplicitRecord("__va_list");
if (Context->getLangOpts().CPlusPlus) {
// namespace std { struct __va_list {
NamespaceDecl *NS;
@@ -5776,17 +5781,8 @@
/*Inline*/false, SourceLocation(),
SourceLocation(), &Context->Idents.get("std"),
/*PrevDecl*/0);
-
- VaListTagDecl = CXXRecordDecl::Create(*Context, TTK_Struct,
- Context->getTranslationUnitDecl(),
- SourceLocation(), SourceLocation(),
- &Context->Idents.get("__va_list"));
+ NS->setImplicit();
VaListTagDecl->setDeclContext(NS);
- } else {
- // struct __va_list
- VaListTagDecl = CreateRecordDecl(*Context, TTK_Struct,
- Context->getTranslationUnitDecl(),
- &Context->Idents.get("__va_list"));
}
VaListTagDecl->startDefinition();
@@ -5834,23 +5830,14 @@
Context->VaListTagTy = VaListTagType;
// } __builtin_va_list;
- TypedefDecl *VaListTypedefDecl
- = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
- Context->getTranslationUnitDecl(),
- SourceLocation(), SourceLocation(),
- &Context->Idents.get("__builtin_va_list"),
- Context->getTrivialTypeSourceInfo(VaListTagType));
-
- return VaListTypedefDecl;
+ return Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list");
}
static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) {
// typedef struct __va_list_tag {
RecordDecl *VaListTagDecl;
- VaListTagDecl = CreateRecordDecl(*Context, TTK_Struct,
- Context->getTranslationUnitDecl(),
- &Context->Idents.get("__va_list_tag"));
+ VaListTagDecl = Context->buildImplicitRecord("__va_list_tag");
VaListTagDecl->startDefinition();
const size_t NumFields = 5;
@@ -5895,12 +5882,9 @@
Context->VaListTagTy = VaListTagType;
// } __va_list_tag;
- TypedefDecl *VaListTagTypedefDecl
- = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
- Context->getTranslationUnitDecl(),
- SourceLocation(), SourceLocation(),
- &Context->Idents.get("__va_list_tag"),
- Context->getTrivialTypeSourceInfo(VaListTagType));
+ TypedefDecl *VaListTagTypedefDecl =
+ Context->buildImplicitTypedef(VaListTagType, "__va_list_tag");
+
QualType VaListTagTypedefType =
Context->getTypedefType(VaListTagTypedefDecl);
@@ -5909,25 +5893,14 @@
QualType VaListTagArrayType
= Context->getConstantArrayType(VaListTagTypedefType,
Size, ArrayType::Normal, 0);
- TypeSourceInfo *TInfo
- = Context->getTrivialTypeSourceInfo(VaListTagArrayType);
- TypedefDecl *VaListTypedefDecl
- = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
- Context->getTranslationUnitDecl(),
- SourceLocation(), SourceLocation(),
- &Context->Idents.get("__builtin_va_list"),
- TInfo);
-
- return VaListTypedefDecl;
+ return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
}
static TypedefDecl *
CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) {
// typedef struct __va_list_tag {
RecordDecl *VaListTagDecl;
- VaListTagDecl = CreateRecordDecl(*Context, TTK_Struct,
- Context->getTranslationUnitDecl(),
- &Context->Idents.get("__va_list_tag"));
+ VaListTagDecl = Context->buildImplicitRecord("__va_list_tag");
VaListTagDecl->startDefinition();
const size_t NumFields = 4;
@@ -5969,12 +5942,9 @@
Context->VaListTagTy = VaListTagType;
// } __va_list_tag;
- TypedefDecl *VaListTagTypedefDecl
- = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
- Context->getTranslationUnitDecl(),
- SourceLocation(), SourceLocation(),
- &Context->Idents.get("__va_list_tag"),
- Context->getTrivialTypeSourceInfo(VaListTagType));
+ TypedefDecl *VaListTagTypedefDecl =
+ Context->buildImplicitTypedef(VaListTagType, "__va_list_tag");
+
QualType VaListTagTypedefType =
Context->getTypedefType(VaListTagTypedefDecl);
@@ -5983,16 +5953,7 @@
QualType VaListTagArrayType
= Context->getConstantArrayType(VaListTagTypedefType,
Size, ArrayType::Normal,0);
- TypeSourceInfo *TInfo
- = Context->getTrivialTypeSourceInfo(VaListTagArrayType);
- TypedefDecl *VaListTypedefDecl
- = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
- Context->getTranslationUnitDecl(),
- SourceLocation(), SourceLocation(),
- &Context->Idents.get("__builtin_va_list"),
- TInfo);
-
- return VaListTypedefDecl;
+ return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
}
static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) {
@@ -6001,19 +5962,13 @@
QualType IntArrayType
= Context->getConstantArrayType(Context->IntTy,
Size, ArrayType::Normal, 0);
- TypedefDecl *VaListTypedefDecl
- = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
- Context->getTranslationUnitDecl(),
- SourceLocation(), SourceLocation(),
- &Context->Idents.get("__builtin_va_list"),
- Context->getTrivialTypeSourceInfo(IntArrayType));
-
- return VaListTypedefDecl;
+ return Context->buildImplicitTypedef(IntArrayType, "__builtin_va_list");
}
static TypedefDecl *
CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) {
- RecordDecl *VaListDecl;
+ // struct __va_list
+ RecordDecl *VaListDecl = Context->buildImplicitRecord("__va_list");
if (Context->getLangOpts().CPlusPlus) {
// namespace std { struct __va_list {
NamespaceDecl *NS;
@@ -6022,19 +5977,8 @@
/*Inline*/false, SourceLocation(),
SourceLocation(), &Context->Idents.get("std"),
/*PrevDecl*/0);
-
- VaListDecl = CXXRecordDecl::Create(*Context, TTK_Struct,
- Context->getTranslationUnitDecl(),
- SourceLocation(), SourceLocation(),
- &Context->Idents.get("__va_list"));
-
+ NS->setImplicit();
VaListDecl->setDeclContext(NS);
-
- } else {
- // struct __va_list {
- VaListDecl = CreateRecordDecl(*Context, TTK_Struct,
- Context->getTranslationUnitDecl(),
- &Context->Idents.get("__va_list"));
}
VaListDecl->startDefinition();
@@ -6057,26 +6001,15 @@
VaListDecl->completeDefinition();
// typedef struct __va_list __builtin_va_list;
- TypeSourceInfo *TInfo
- = Context->getTrivialTypeSourceInfo(Context->getRecordType(VaListDecl));
-
- TypedefDecl *VaListTypeDecl
- = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
- Context->getTranslationUnitDecl(),
- SourceLocation(), SourceLocation(),
- &Context->Idents.get("__builtin_va_list"),
- TInfo);
-
- return VaListTypeDecl;
+ QualType T = Context->getRecordType(VaListDecl);
+ return Context->buildImplicitTypedef(T, "__builtin_va_list");
}
static TypedefDecl *
CreateSystemZBuiltinVaListDecl(const ASTContext *Context) {
// typedef struct __va_list_tag {
RecordDecl *VaListTagDecl;
- VaListTagDecl = CreateRecordDecl(*Context, TTK_Struct,
- Context->getTranslationUnitDecl(),
- &Context->Idents.get("__va_list_tag"));
+ VaListTagDecl = Context->buildImplicitRecord("__va_list_tag");
VaListTagDecl->startDefinition();
const size_t NumFields = 4;
@@ -6118,12 +6051,8 @@
Context->VaListTagTy = VaListTagType;
// } __va_list_tag;
- TypedefDecl *VaListTagTypedefDecl
- = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
- Context->getTranslationUnitDecl(),
- SourceLocation(), SourceLocation(),
- &Context->Idents.get("__va_list_tag"),
- Context->getTrivialTypeSourceInfo(VaListTagType));
+ TypedefDecl *VaListTagTypedefDecl =
+ Context->buildImplicitTypedef(VaListTagType, "__va_list_tag");
QualType VaListTagTypedefType =
Context->getTypedefType(VaListTagTypedefDecl);
@@ -6132,16 +6061,8 @@
QualType VaListTagArrayType
= Context->getConstantArrayType(VaListTagTypedefType,
Size, ArrayType::Normal,0);
- TypeSourceInfo *TInfo
- = Context->getTrivialTypeSourceInfo(VaListTagArrayType);
- TypedefDecl *VaListTypedefDecl
- = TypedefDecl::Create(const_cast<ASTContext &>(*Context),
- Context->getTranslationUnitDecl(),
- SourceLocation(), SourceLocation(),
- &Context->Idents.get("__builtin_va_list"),
- TInfo);
- return VaListTypedefDecl;
+ return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
}
static TypedefDecl *CreateVaListDecl(const ASTContext *Context,
@@ -6169,8 +6090,10 @@
}
TypedefDecl *ASTContext::getBuiltinVaListDecl() const {
- if (!BuiltinVaListDecl)
+ if (!BuiltinVaListDecl) {
BuiltinVaListDecl = CreateVaListDecl(this, Target->getBuiltinVaListKind());
+ assert(BuiltinVaListDecl->isImplicit());
+ }
return BuiltinVaListDecl;
}
@@ -6454,9 +6377,8 @@
ObjCProtocolDecl *rProto) const {
if (declaresSameEntity(lProto, rProto))
return true;
- for (ObjCProtocolDecl::protocol_iterator PI = rProto->protocol_begin(),
- E = rProto->protocol_end(); PI != E; ++PI)
- if (ProtocolCompatibleWithProtocol(lProto, *PI))
+ for (auto *PI : rProto->protocols())
+ if (ProtocolCompatibleWithProtocol(lProto, PI))
return true;
return false;
}
@@ -6469,13 +6391,9 @@
const ObjCObjectPointerType *rhsOPT = rhs->getAs<ObjCObjectPointerType>();
assert ((lhsQID && rhsOPT) && "ObjCQualifiedClassTypesAreCompatible");
- for (ObjCObjectPointerType::qual_iterator I = lhsQID->qual_begin(),
- E = lhsQID->qual_end(); I != E; ++I) {
+ for (auto *lhsProto : lhsQID->quals()) {
bool match = false;
- ObjCProtocolDecl *lhsProto = *I;
- for (ObjCObjectPointerType::qual_iterator J = rhsOPT->qual_begin(),
- E = rhsOPT->qual_end(); J != E; ++J) {
- ObjCProtocolDecl *rhsProto = *J;
+ for (auto *rhsProto : rhsOPT->quals()) {
if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) {
match = true;
break;
@@ -6508,12 +6426,11 @@
// If the RHS is a unqualified interface pointer "NSString*",
// make sure we check the class hierarchy.
if (ObjCInterfaceDecl *rhsID = rhsOPT->getInterfaceDecl()) {
- for (ObjCObjectPointerType::qual_iterator I = lhsQID->qual_begin(),
- E = lhsQID->qual_end(); I != E; ++I) {
+ for (auto *I : lhsQID->quals()) {
// when comparing an id<P> on lhs with a static type on rhs,
// see if static class implements all of id's protocols, directly or
// through its super class and categories.
- if (!rhsID->ClassImplementsProtocol(*I, true))
+ if (!rhsID->ClassImplementsProtocol(I, true))
return false;
}
}
@@ -6521,17 +6438,13 @@
return true;
}
// Both the right and left sides have qualifiers.
- for (ObjCObjectPointerType::qual_iterator I = lhsQID->qual_begin(),
- E = lhsQID->qual_end(); I != E; ++I) {
- ObjCProtocolDecl *lhsProto = *I;
+ for (auto *lhsProto : lhsQID->quals()) {
bool match = false;
// when comparing an id<P> on lhs with a static type on rhs,
// see if static class implements all of id's protocols, directly or
// through its super class and categories.
- for (ObjCObjectPointerType::qual_iterator J = rhsOPT->qual_begin(),
- E = rhsOPT->qual_end(); J != E; ++J) {
- ObjCProtocolDecl *rhsProto = *J;
+ for (auto *rhsProto : rhsOPT->quals()) {
if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) ||
(compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) {
match = true;
@@ -6541,12 +6454,11 @@
// If the RHS is a qualified interface pointer "NSString<P>*",
// make sure we check the class hierarchy.
if (ObjCInterfaceDecl *rhsID = rhsOPT->getInterfaceDecl()) {
- for (ObjCObjectPointerType::qual_iterator I = lhsQID->qual_begin(),
- E = lhsQID->qual_end(); I != E; ++I) {
+ for (auto *I : lhsQID->quals()) {
// when comparing an id<P> on lhs with a static type on rhs,
// see if static class implements all of id's protocols, directly or
// through its super class and categories.
- if (rhsID->ClassImplementsProtocol(*I, true)) {
+ if (rhsID->ClassImplementsProtocol(I, true)) {
match = true;
break;
}
@@ -6565,9 +6477,7 @@
if (const ObjCObjectPointerType *lhsOPT =
lhs->getAsObjCInterfacePointerType()) {
// If both the right and left sides have qualifiers.
- for (ObjCObjectPointerType::qual_iterator I = lhsOPT->qual_begin(),
- E = lhsOPT->qual_end(); I != E; ++I) {
- ObjCProtocolDecl *lhsProto = *I;
+ for (auto *lhsProto : lhsOPT->quals()) {
bool match = false;
// when comparing an id<P> on rhs with a static type on lhs,
@@ -6575,9 +6485,7 @@
// through its super class and categories.
// First, lhs protocols in the qualifier list must be found, direct
// or indirect in rhs's qualifier list or it is a mismatch.
- for (ObjCObjectPointerType::qual_iterator J = rhsQID->qual_begin(),
- E = rhsQID->qual_end(); J != E; ++J) {
- ObjCProtocolDecl *rhsProto = *J;
+ for (auto *rhsProto : rhsQID->quals()) {
if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) ||
(compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) {
match = true;
@@ -6598,14 +6506,9 @@
// assume that it is mismatch.
if (LHSInheritedProtocols.empty() && lhsOPT->qual_empty())
return false;
- for (llvm::SmallPtrSet<ObjCProtocolDecl*,8>::iterator I =
- LHSInheritedProtocols.begin(),
- E = LHSInheritedProtocols.end(); I != E; ++I) {
+ for (auto *lhsProto : LHSInheritedProtocols) {
bool match = false;
- ObjCProtocolDecl *lhsProto = (*I);
- for (ObjCObjectPointerType::qual_iterator J = rhsQID->qual_begin(),
- E = rhsQID->qual_end(); J != E; ++J) {
- ObjCProtocolDecl *rhsProto = *J;
+ for (auto *rhsProto : rhsQID->quals()) {
if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) ||
(compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) {
match = true;
@@ -6798,16 +6701,9 @@
if (SuperClassInheritedProtocols.empty())
return false;
- for (ObjCObjectType::qual_iterator LHSPI = LHS->qual_begin(),
- LHSPE = LHS->qual_end();
- LHSPI != LHSPE; LHSPI++) {
- bool SuperImplementsProtocol = false;
- ObjCProtocolDecl *LHSProto = (*LHSPI);
-
- for (llvm::SmallPtrSet<ObjCProtocolDecl*,8>::iterator I =
- SuperClassInheritedProtocols.begin(),
- E = SuperClassInheritedProtocols.end(); I != E; ++I) {
- ObjCProtocolDecl *SuperClassProto = (*I);
+ for (const auto *LHSProto : LHS->quals()) {
+ bool SuperImplementsProtocol = false;
+ for (auto *SuperClassProto : SuperClassInheritedProtocols) {
if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) {
SuperImplementsProtocol = true;
break;
@@ -6821,17 +6717,13 @@
return false;
}
- for (ObjCObjectType::qual_iterator LHSPI = LHS->qual_begin(),
- LHSPE = LHS->qual_end();
- LHSPI != LHSPE; LHSPI++) {
+ for (const auto *LHSPI : LHS->quals()) {
bool RHSImplementsProtocol = false;
// If the RHS doesn't implement the protocol on the left, the types
// are incompatible.
- for (ObjCObjectType::qual_iterator RHSPI = RHS->qual_begin(),
- RHSPE = RHS->qual_end();
- RHSPI != RHSPE; RHSPI++) {
- if ((*RHSPI)->lookupProtocolNamed((*LHSPI)->getIdentifier())) {
+ for (auto *RHSPI : RHS->quals()) {
+ if (RHSPI->lookupProtocolNamed(LHSPI->getIdentifier())) {
RHSImplementsProtocol = true;
break;
}
@@ -6891,9 +6783,8 @@
if (const RecordType *UT = T->getAsUnionType()) {
RecordDecl *UD = UT->getDecl();
if (UD->hasAttr<TransparentUnionAttr>()) {
- for (RecordDecl::field_iterator it = UD->field_begin(),
- itend = UD->field_end(); it != itend; ++it) {
- QualType ET = it->getType().getUnqualifiedType();
+ for (const auto *I : UD->fields()) {
+ QualType ET = I->getType().getUnqualifiedType();
QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified);
if (!MT.isNull())
return MT;
@@ -6904,11 +6795,11 @@
return QualType();
}
-/// mergeFunctionArgumentTypes - merge two types which appear as function
-/// argument types
-QualType ASTContext::mergeFunctionArgumentTypes(QualType lhs, QualType rhs,
- bool OfBlockPointer,
- bool Unqualified) {
+/// mergeFunctionParameterTypes - merge two types which appear as function
+/// parameter types
+QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs,
+ bool OfBlockPointer,
+ bool Unqualified) {
// GNU extension: two types are compatible if they appear as a function
// argument, one of the types is a transparent union type and the other
// type is compatible with a union member
@@ -6938,23 +6829,23 @@
// Check return type
QualType retType;
if (OfBlockPointer) {
- QualType RHS = rbase->getResultType();
- QualType LHS = lbase->getResultType();
+ QualType RHS = rbase->getReturnType();
+ QualType LHS = lbase->getReturnType();
bool UnqualifiedResult = Unqualified;
if (!UnqualifiedResult)
UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers());
retType = mergeTypes(LHS, RHS, true, UnqualifiedResult, true);
}
else
- retType = mergeTypes(lbase->getResultType(), rbase->getResultType(), false,
+ retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), false,
Unqualified);
if (retType.isNull()) return QualType();
if (Unqualified)
retType = retType.getUnqualifiedType();
- CanQualType LRetType = getCanonicalType(lbase->getResultType());
- CanQualType RRetType = getCanonicalType(rbase->getResultType());
+ CanQualType LRetType = getCanonicalType(lbase->getReturnType());
+ CanQualType RRetType = getCanonicalType(rbase->getReturnType());
if (Unqualified) {
LRetType = LRetType.getUnqualifiedType();
RRetType = RRetType.getUnqualifiedType();
@@ -6998,11 +6889,8 @@
if (lproto && rproto) { // two C99 style function prototypes
assert(!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec() &&
"C++ shouldn't be here");
- unsigned lproto_nargs = lproto->getNumArgs();
- unsigned rproto_nargs = rproto->getNumArgs();
-
- // Compatible functions must have the same number of arguments
- if (lproto_nargs != rproto_nargs)
+ // Compatible functions must have the same number of parameters
+ if (lproto->getNumParams() != rproto->getNumParams())
return QualType();
// Variadic and non-variadic functions aren't compatible
@@ -7015,29 +6903,29 @@
if (LangOpts.ObjCAutoRefCount &&
!FunctionTypesMatchOnNSConsumedAttrs(rproto, lproto))
return QualType();
-
- // Check argument compatibility
+
+ // Check parameter type compatibility
SmallVector<QualType, 10> types;
- for (unsigned i = 0; i < lproto_nargs; i++) {
- QualType largtype = lproto->getArgType(i).getUnqualifiedType();
- QualType rargtype = rproto->getArgType(i).getUnqualifiedType();
- QualType argtype = mergeFunctionArgumentTypes(largtype, rargtype,
- OfBlockPointer,
- Unqualified);
- if (argtype.isNull()) return QualType();
-
+ for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) {
+ QualType lParamType = lproto->getParamType(i).getUnqualifiedType();
+ QualType rParamType = rproto->getParamType(i).getUnqualifiedType();
+ QualType paramType = mergeFunctionParameterTypes(
+ lParamType, rParamType, OfBlockPointer, Unqualified);
+ if (paramType.isNull())
+ return QualType();
+
if (Unqualified)
- argtype = argtype.getUnqualifiedType();
-
- types.push_back(argtype);
+ paramType = paramType.getUnqualifiedType();
+
+ types.push_back(paramType);
if (Unqualified) {
- largtype = largtype.getUnqualifiedType();
- rargtype = rargtype.getUnqualifiedType();
+ lParamType = lParamType.getUnqualifiedType();
+ rParamType = rParamType.getUnqualifiedType();
}
-
- if (getCanonicalType(argtype) != getCanonicalType(largtype))
+
+ if (getCanonicalType(paramType) != getCanonicalType(lParamType))
allLTypes = false;
- if (getCanonicalType(argtype) != getCanonicalType(rargtype))
+ if (getCanonicalType(paramType) != getCanonicalType(rParamType))
allRTypes = false;
}
@@ -7061,20 +6949,19 @@
// The only types actually affected are promotable integer
// types and floats, which would be passed as a different
// type depending on whether the prototype is visible.
- unsigned proto_nargs = proto->getNumArgs();
- for (unsigned i = 0; i < proto_nargs; ++i) {
- QualType argTy = proto->getArgType(i);
-
+ for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) {
+ QualType paramTy = proto->getParamType(i);
+
// Look at the converted type of enum types, since that is the type used
// to pass enum values.
- if (const EnumType *Enum = argTy->getAs<EnumType>()) {
- argTy = Enum->getDecl()->getIntegerType();
- if (argTy.isNull())
+ if (const EnumType *Enum = paramTy->getAs<EnumType>()) {
+ paramTy = Enum->getDecl()->getIntegerType();
+ if (paramTy.isNull())
return QualType();
}
-
- if (argTy->isPromotableIntegerType() ||
- getCanonicalType(argTy).getUnqualifiedType() == FloatTy)
+
+ if (paramTy->isPromotableIntegerType() ||
+ getCanonicalType(paramTy).getUnqualifiedType() == FloatTy)
return QualType();
}
@@ -7083,7 +6970,7 @@
FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo();
EPI.ExtInfo = einfo;
- return getFunctionType(retType, proto->getArgTypes(), EPI);
+ return getFunctionType(retType, proto->getParamTypes(), EPI);
}
if (allLTypes) return lhs;
@@ -7387,18 +7274,16 @@
bool ASTContext::FunctionTypesMatchOnNSConsumedAttrs(
const FunctionProtoType *FromFunctionType,
const FunctionProtoType *ToFunctionType) {
- if (FromFunctionType->hasAnyConsumedArgs() !=
- ToFunctionType->hasAnyConsumedArgs())
+ if (FromFunctionType->hasAnyConsumedParams() !=
+ ToFunctionType->hasAnyConsumedParams())
return false;
FunctionProtoType::ExtProtoInfo FromEPI =
FromFunctionType->getExtProtoInfo();
FunctionProtoType::ExtProtoInfo ToEPI =
ToFunctionType->getExtProtoInfo();
- if (FromEPI.ConsumedArguments && ToEPI.ConsumedArguments)
- for (unsigned ArgIdx = 0, NumArgs = FromFunctionType->getNumArgs();
- ArgIdx != NumArgs; ++ArgIdx) {
- if (FromEPI.ConsumedArguments[ArgIdx] !=
- ToEPI.ConsumedArguments[ArgIdx])
+ if (FromEPI.ConsumedParameters && ToEPI.ConsumedParameters)
+ for (unsigned i = 0, n = FromFunctionType->getNumParams(); i != n; ++i) {
+ if (FromEPI.ConsumedParameters[i] != ToEPI.ConsumedParameters[i])
return false;
}
return true;
@@ -7416,10 +7301,10 @@
if (RHSCan->isFunctionType()) {
if (!LHSCan->isFunctionType())
return QualType();
- QualType OldReturnType =
- cast<FunctionType>(RHSCan.getTypePtr())->getResultType();
+ QualType OldReturnType =
+ cast<FunctionType>(RHSCan.getTypePtr())->getReturnType();
QualType NewReturnType =
- cast<FunctionType>(LHSCan.getTypePtr())->getResultType();
+ cast<FunctionType>(LHSCan.getTypePtr())->getReturnType();
QualType ResReturnType =
mergeObjCGCQualifiers(NewReturnType, OldReturnType);
if (ResReturnType.isNull())
@@ -7432,7 +7317,7 @@
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
EPI.ExtInfo = getFunctionExtInfo(LHS);
QualType ResultType =
- getFunctionType(OldReturnType, FPT->getArgTypes(), EPI);
+ getFunctionType(OldReturnType, FPT->getParamTypes(), EPI);
return ResultType;
}
}
@@ -7573,6 +7458,19 @@
assert(HowLong <= 2 && "Can't have LLLL modifier");
++HowLong;
break;
+ case 'W':
+ // This modifier represents int64 type.
+ assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!");
+ switch (Context.getTargetInfo().getInt64Type()) {
+ default:
+ llvm_unreachable("Unexpected integer type");
+ case TargetInfo::SignedLong:
+ HowLong = 1;
+ break;
+ case TargetInfo::SignedLongLong:
+ HowLong = 2;
+ break;
+ }
}
}
@@ -7834,7 +7732,7 @@
return getFunctionType(ResType, ArgTypes, EPI);
}
-GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) {
+GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const {
if (!FD->isExternallyVisible())
return GVA_Internal;
@@ -7846,7 +7744,7 @@
break;
case TSK_ExplicitInstantiationDefinition:
- return GVA_ExplicitTemplateInstantiation;
+ return GVA_StrongODR;
case TSK_ExplicitInstantiationDeclaration:
case TSK_ImplicitInstantiation:
@@ -7857,7 +7755,7 @@
if (!FD->isInlined())
return External;
- if ((!getLangOpts().CPlusPlus && !getLangOpts().MicrosoftMode) ||
+ if ((!getLangOpts().CPlusPlus && !getLangOpts().MSVCCompat) ||
FD->hasAttr<GNUInlineAttr>()) {
// GNU or C99 inline semantics. Determine whether this symbol should be
// externally visible.
@@ -7878,6 +7776,12 @@
== TSK_ExplicitInstantiationDeclaration)
return GVA_C99Inline;
+ // Functions specified with extern and inline in -fms-compatibility mode
+ // forcibly get emitted. While the body of the function cannot be later
+ // replaced, the function definition cannot be discarded.
+ if (FD->getMostRecentDecl()->isMSExternInline())
+ return GVA_StrongODR;
+
return GVA_CXXInline;
}
@@ -7895,7 +7799,7 @@
// Fall through to treat this like any other instantiation.
case TSK_ExplicitInstantiationDefinition:
- return GVA_ExplicitTemplateInstantiation;
+ return GVA_StrongODR;
case TSK_ImplicitInstantiation:
return GVA_TemplateInstantiation;
@@ -7996,12 +7900,23 @@
return ABI->isNearlyEmpty(RD);
}
+VTableContextBase *ASTContext::getVTableContext() {
+ if (!VTContext.get()) {
+ if (Target->getCXXABI().isMicrosoft())
+ VTContext.reset(new MicrosoftVTableContext(*this));
+ else
+ VTContext.reset(new ItaniumVTableContext(*this));
+ }
+ return VTContext.get();
+}
+
MangleContext *ASTContext::createMangleContext() {
switch (Target->getCXXABI().getKind()) {
case TargetCXXABI::GenericAArch64:
case TargetCXXABI::GenericItanium:
case TargetCXXABI::GenericARM:
case TargetCXXABI::iOS:
+ case TargetCXXABI::iOS64:
return ItaniumMangleContext::create(*this, getDiagnostics());
case TargetCXXABI::Microsoft:
return MicrosoftMangleContext::create(*this, getDiagnostics());
@@ -8071,6 +7986,17 @@
return I != MangleNumbers.end() ? I->second : 1;
}
+void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) {
+ if (Number > 1)
+ StaticLocalNumbers[VD] = Number;
+}
+
+unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const {
+ llvm::DenseMap<const VarDecl *, unsigned>::const_iterator I =
+ StaticLocalNumbers.find(VD);
+ return I != StaticLocalNumbers.end() ? I->second : 1;
+}
+
MangleNumberingContext &
ASTContext::getManglingNumberContext(const DeclContext *DC) {
assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C.
@@ -8230,8 +8156,7 @@
if (MethodDecl->getObjCDeclQualifier() !=
MethodImpl->getObjCDeclQualifier())
return false;
- if (!hasSameType(MethodDecl->getResultType(),
- MethodImpl->getResultType()))
+ if (!hasSameType(MethodDecl->getReturnType(), MethodImpl->getReturnType()))
return false;
if (MethodDecl->param_size() != MethodImpl->param_size())
diff --git a/lib/AST/ASTDiagnostic.cpp b/lib/AST/ASTDiagnostic.cpp
index fce8f64..7c6bec4 100644
--- a/lib/AST/ASTDiagnostic.cpp
+++ b/lib/AST/ASTDiagnostic.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/ExprCXX.h"
@@ -51,6 +52,11 @@
QT = AT->desugar();
continue;
}
+ // ...or an adjusted type...
+ if (const AdjustedType *AT = dyn_cast<AdjustedType>(Ty)) {
+ QT = AT->desugar();
+ continue;
+ }
// ... or an auto type.
if (const AutoType *AT = dyn_cast<AutoType>(Ty)) {
if (!AT->isSugared())
@@ -354,6 +360,14 @@
NeedQuotes = false;
break;
}
+ case DiagnosticsEngine::ak_attr: {
+ const Attr *At = reinterpret_cast<Attr *>(Val);
+ assert(At && "Received null Attr object!");
+ OS << '\'' << At->getSpelling() << '\'';
+ NeedQuotes = false;
+ break;
+ }
+
}
OS.flush();
@@ -1349,8 +1363,7 @@
FromType.getLocalUnqualifiedType() ==
ToType.getLocalUnqualifiedType()) {
Qualifiers FromQual = FromType.getLocalQualifiers(),
- ToQual = ToType.getLocalQualifiers(),
- CommonQual;
+ ToQual = ToType.getLocalQualifiers();
PrintQualifiers(FromQual, ToQual);
FromType.getLocalUnqualifiedType().print(OS, Policy);
return;
diff --git a/lib/AST/ASTDumper.cpp b/lib/AST/ASTDumper.cpp
index 2f40255..dcfba3e 100644
--- a/lib/AST/ASTDumper.cpp
+++ b/lib/AST/ASTDumper.cpp
@@ -32,12 +32,23 @@
namespace {
// Colors used for various parts of the AST dump
+ // Do not use bold yellow for any text. It is hard to read on white screens.
struct TerminalColor {
raw_ostream::Colors Color;
bool Bold;
};
+ // Red - CastColor
+ // Green - TypeColor
+ // Bold Green - DeclKindNameColor, UndeserializedColor
+ // Yellow - AddressColor, LocationColor
+ // Blue - CommentColor, NullColor, IndentColor
+ // Bold Blue - AttrColor
+ // Bold Magenta - StmtColor
+ // Cyan - ValueKindColor, ObjectKindColor
+ // Bold Cyan - ValueColor, DeclNameColor
+
// Decl kind names (VarDecl, FunctionDecl, etc)
static const TerminalColor DeclKindNameColor = { raw_ostream::GREEN, true };
// Attr names (CleanupAttr, GuardedByAttr, etc)
@@ -45,7 +56,7 @@
// Statement names (DeclStmt, ImplicitCastExpr, etc)
static const TerminalColor StmtColor = { raw_ostream::MAGENTA, true };
// Comment names (FullComment, ParagraphComment, TextComment, etc)
- static const TerminalColor CommentColor = { raw_ostream::YELLOW, true };
+ static const TerminalColor CommentColor = { raw_ostream::BLUE, false };
// Type names (int, float, etc, plus user defined types)
static const TerminalColor TypeColor = { raw_ostream::GREEN, false };
@@ -138,6 +149,38 @@
}
};
+ class ChildDumper {
+ ASTDumper &Dumper;
+
+ const Decl *Prev;
+ bool PrevRef;
+ public:
+ ChildDumper(ASTDumper &Dumper) : Dumper(Dumper), Prev(0) {}
+ ~ChildDumper() {
+ if (Prev) {
+ Dumper.lastChild();
+ dump(0);
+ }
+ }
+
+ // FIXME: This should take an arbitrary callable as the dumping action.
+ void dump(const Decl *D, bool Ref = false) {
+ if (Prev) {
+ if (PrevRef)
+ Dumper.dumpDeclRef(Prev);
+ else
+ Dumper.dumpDecl(Prev);
+ }
+ Prev = D;
+ PrevRef = Ref;
+ }
+ void dumpRef(const Decl *D) { dump(D, true); }
+
+ // Give up ownership of the children of the node. By calling this,
+ // the caller takes back responsibility for calling lastChild().
+ void release() { dump(0); }
+ };
+
public:
ASTDumper(raw_ostream &OS, const CommandTraits *Traits,
const SourceManager *SM)
@@ -211,6 +254,13 @@
void VisitTypeAliasTemplateDecl(const TypeAliasTemplateDecl *D);
void VisitCXXRecordDecl(const CXXRecordDecl *D);
void VisitStaticAssertDecl(const StaticAssertDecl *D);
+ template<typename SpecializationDecl>
+ void VisitTemplateDeclSpecialization(ChildDumper &Children,
+ const SpecializationDecl *D,
+ bool DumpExplicitInst,
+ bool DumpRefOnly);
+ template<typename TemplateDecl>
+ void VisitTemplateDecl(const TemplateDecl *D, bool DumpExplicitInst);
void VisitFunctionTemplateDecl(const FunctionTemplateDecl *D);
void VisitClassTemplateDecl(const ClassTemplateDecl *D);
void VisitClassTemplateSpecializationDecl(
@@ -500,17 +550,14 @@
void ASTDumper::dumpDeclContext(const DeclContext *DC) {
if (!DC)
return;
- bool HasUndeserializedDecls = DC->hasExternalLexicalStorage();
- for (DeclContext::decl_iterator I = DC->noload_decls_begin(),
- E = DC->noload_decls_end();
- I != E; ++I) {
- DeclContext::decl_iterator Next = I;
- ++Next;
- if (Next == E && !HasUndeserializedDecls)
- lastChild();
- dumpDecl(*I);
- }
- if (HasUndeserializedDecls) {
+
+ ChildDumper Children(*this);
+ for (auto *D : DC->noload_decls())
+ Children.dump(D);
+
+ if (DC->hasExternalLexicalStorage()) {
+ Children.release();
+
lastChild();
IndentScope Indent(*this);
ColorScope Color(*this, UndeserializedColor);
@@ -569,6 +616,7 @@
IndentScope Indent(*this);
{
ColorScope Color(*this, AttrColor);
+
switch (A->getKind()) {
#define ATTR(X) case attr::X: OS << #X; break;
#include "clang/Basic/AttrList.inc"
@@ -579,6 +627,8 @@
dumpPointer(A);
dumpSourceRange(A->getRange());
#include "clang/AST/AttrDump.inc"
+ if (A->isImplicit())
+ OS << " Implicit";
}
static void dumpPreviousDeclImpl(raw_ostream &OS, ...) {}
@@ -742,13 +792,15 @@
OS << " parent " << cast<Decl>(D->getDeclContext());
dumpPreviousDecl(OS, D);
dumpSourceRange(D->getSourceRange());
+ OS << ' ';
+ dumpLocation(D->getLocation());
if (Module *M = D->getOwningModule())
OS << " in " << M->getFullModuleName();
if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
if (ND->isHidden())
OS << " hidden";
- bool HasAttrs = D->attr_begin() != D->attr_end();
+ bool HasAttrs = D->hasAttrs();
const FullComment *Comment =
D->getASTContext().getLocalCommentForDeclUncached(D);
// Decls within functions are visited by the body
@@ -824,13 +876,10 @@
void ASTDumper::VisitIndirectFieldDecl(const IndirectFieldDecl *D) {
dumpName(D);
dumpType(D->getType());
- for (IndirectFieldDecl::chain_iterator I = D->chain_begin(),
- E = D->chain_end();
- I != E; ++I) {
- if (I + 1 == E)
- lastChild();
- dumpDeclRef(*I);
- }
+
+ ChildDumper Children(*this);
+ for (auto *Child : D->chain())
+ Children.dumpRef(Child);
}
void ASTDumper::VisitFunctionDecl(const FunctionDecl *D) {
@@ -1016,15 +1065,13 @@
if (!D->isCompleteDefinition())
return;
- for (CXXRecordDecl::base_class_const_iterator I = D->bases_begin(),
- E = D->bases_end();
- I != E; ++I) {
+ for (const auto &I : D->bases()) {
IndentScope Indent(*this);
- if (I->isVirtual())
+ if (I.isVirtual())
OS << "virtual ";
- dumpAccessSpecifier(I->getAccessSpecifier());
- dumpType(I->getType());
- if (I->isPackExpansion())
+ dumpAccessSpecifier(I.getAccessSpecifier());
+ dumpType(I.getType());
+ if (I.isPackExpansion())
OS << "...";
}
}
@@ -1035,63 +1082,69 @@
dumpStmt(D->getMessage());
}
-void ASTDumper::VisitFunctionTemplateDecl(const FunctionTemplateDecl *D) {
- dumpName(D);
- dumpTemplateParameters(D->getTemplateParameters());
- dumpDecl(D->getTemplatedDecl());
- for (FunctionTemplateDecl::spec_iterator I = D->spec_begin(),
- E = D->spec_end();
- I != E; ++I) {
- FunctionTemplateDecl::spec_iterator Next = I;
- ++Next;
- if (Next == E)
- lastChild();
- switch (I->getTemplateSpecializationKind()) {
- case TSK_Undeclared:
- case TSK_ImplicitInstantiation:
+template<typename SpecializationDecl>
+void ASTDumper::VisitTemplateDeclSpecialization(ChildDumper &Children,
+ const SpecializationDecl *D,
+ bool DumpExplicitInst,
+ bool DumpRefOnly) {
+ bool DumpedAny = false;
+ for (auto *RedeclWithBadType : D->redecls()) {
+ // FIXME: The redecls() range sometimes has elements of a less-specific
+ // type. (In particular, ClassTemplateSpecializationDecl::redecls() gives
+ // us TagDecls, and should give CXXRecordDecls).
+ auto *Redecl = dyn_cast<SpecializationDecl>(RedeclWithBadType);
+ if (!Redecl) {
+ // Found the injected-class-name for a class template. This will be dumped
+ // as part of its surrounding class so we don't need to dump it here.
+ assert(isa<CXXRecordDecl>(RedeclWithBadType) &&
+ "expected an injected-class-name");
+ continue;
+ }
+
+ switch (Redecl->getTemplateSpecializationKind()) {
case TSK_ExplicitInstantiationDeclaration:
case TSK_ExplicitInstantiationDefinition:
- if (D == D->getCanonicalDecl())
- dumpDecl(*I);
- else
- dumpDeclRef(*I);
+ if (!DumpExplicitInst)
+ break;
+ // Fall through.
+ case TSK_Undeclared:
+ case TSK_ImplicitInstantiation:
+ Children.dump(Redecl, DumpRefOnly);
+ DumpedAny = true;
break;
case TSK_ExplicitSpecialization:
- dumpDeclRef(*I);
break;
}
}
+
+ // Ensure we dump at least one decl for each specialization.
+ if (!DumpedAny)
+ Children.dumpRef(D);
+}
+
+template<typename TemplateDecl>
+void ASTDumper::VisitTemplateDecl(const TemplateDecl *D,
+ bool DumpExplicitInst) {
+ dumpName(D);
+ dumpTemplateParameters(D->getTemplateParameters());
+
+ ChildDumper Children(*this);
+ Children.dump(D->getTemplatedDecl());
+
+ for (auto *Child : D->specializations())
+ VisitTemplateDeclSpecialization(Children, Child, DumpExplicitInst,
+ !D->isCanonicalDecl());
+}
+
+void ASTDumper::VisitFunctionTemplateDecl(const FunctionTemplateDecl *D) {
+ // FIXME: We don't add a declaration of a function template specialization
+ // to its context when it's explicitly instantiated, so dump explicit
+ // instantiations when we dump the template itself.
+ VisitTemplateDecl(D, true);
}
void ASTDumper::VisitClassTemplateDecl(const ClassTemplateDecl *D) {
- dumpName(D);
- dumpTemplateParameters(D->getTemplateParameters());
-
- ClassTemplateDecl::spec_iterator I = D->spec_begin();
- ClassTemplateDecl::spec_iterator E = D->spec_end();
- if (I == E)
- lastChild();
- dumpDecl(D->getTemplatedDecl());
- for (; I != E; ++I) {
- ClassTemplateDecl::spec_iterator Next = I;
- ++Next;
- if (Next == E)
- lastChild();
- switch (I->getTemplateSpecializationKind()) {
- case TSK_Undeclared:
- case TSK_ImplicitInstantiation:
- if (D == D->getCanonicalDecl())
- dumpDecl(*I);
- else
- dumpDeclRef(*I);
- break;
- case TSK_ExplicitSpecialization:
- case TSK_ExplicitInstantiationDeclaration:
- case TSK_ExplicitInstantiationDefinition:
- dumpDeclRef(*I);
- break;
- }
- }
+ VisitTemplateDecl(D, false);
}
void ASTDumper::VisitClassTemplateSpecializationDecl(
@@ -1114,34 +1167,7 @@
}
void ASTDumper::VisitVarTemplateDecl(const VarTemplateDecl *D) {
- dumpName(D);
- dumpTemplateParameters(D->getTemplateParameters());
-
- VarTemplateDecl::spec_iterator I = D->spec_begin();
- VarTemplateDecl::spec_iterator E = D->spec_end();
- if (I == E)
- lastChild();
- dumpDecl(D->getTemplatedDecl());
- for (; I != E; ++I) {
- VarTemplateDecl::spec_iterator Next = I;
- ++Next;
- if (Next == E)
- lastChild();
- switch (I->getTemplateSpecializationKind()) {
- case TSK_Undeclared:
- case TSK_ImplicitInstantiation:
- if (D == D->getCanonicalDecl())
- dumpDecl(*I);
- else
- dumpDeclRef(*I);
- break;
- case TSK_ExplicitSpecialization:
- case TSK_ExplicitInstantiationDeclaration:
- case TSK_ExplicitInstantiationDefinition:
- dumpDeclRef(*I);
- break;
- }
- }
+ VisitTemplateDecl(D, false);
}
void ASTDumper::VisitVarTemplateSpecializationDecl(
@@ -1164,8 +1190,10 @@
if (D->isParameterPack())
OS << " ...";
dumpName(D);
- if (D->hasDefaultArgument())
- dumpType(D->getDefaultArgument());
+ if (D->hasDefaultArgument()) {
+ lastChild();
+ dumpTemplateArgument(D->getDefaultArgument());
+ }
}
void ASTDumper::VisitNonTypeTemplateParmDecl(const NonTypeTemplateParmDecl *D) {
@@ -1173,8 +1201,10 @@
if (D->isParameterPack())
OS << " ...";
dumpName(D);
- if (D->hasDefaultArgument())
- dumpStmt(D->getDefaultArgument());
+ if (D->hasDefaultArgument()) {
+ lastChild();
+ dumpTemplateArgument(D->getDefaultArgument());
+ }
}
void ASTDumper::VisitTemplateTemplateParmDecl(
@@ -1183,8 +1213,10 @@
OS << " ...";
dumpName(D);
dumpTemplateParameters(D->getTemplateParameters());
- if (D->hasDefaultArgument())
+ if (D->hasDefaultArgument()) {
+ lastChild();
dumpTemplateArgumentLoc(D->getDefaultArgument());
+ }
}
void ASTDumper::VisitUsingDecl(const UsingDecl *D) {
@@ -1241,8 +1273,6 @@
dumpType(D->getType());
if (D->getSynthesize())
OS << " synthesize";
- if (D->getBackingIvarReferencedInAccessor())
- OS << " BackingIvarReferencedInAccessor";
switch (D->getAccessControl()) {
case ObjCIvarDecl::None:
@@ -1269,7 +1299,7 @@
else
OS << " +";
dumpName(D);
- dumpType(D->getResultType());
+ dumpType(D->getReturnType());
bool OldMoreChildren = hasMoreChildren();
bool IsVariadic = D->isVariadic();
@@ -1327,28 +1357,20 @@
void ASTDumper::VisitObjCProtocolDecl(const ObjCProtocolDecl *D) {
dumpName(D);
- for (ObjCProtocolDecl::protocol_iterator I = D->protocol_begin(),
- E = D->protocol_end();
- I != E; ++I) {
- if (I + 1 == E)
- lastChild();
- dumpDeclRef(*I);
- }
+
+ ChildDumper Children(*this);
+ for (auto *Child : D->protocols())
+ Children.dumpRef(Child);
}
void ASTDumper::VisitObjCInterfaceDecl(const ObjCInterfaceDecl *D) {
dumpName(D);
dumpDeclRef(D->getSuperClass(), "super");
- if (D->protocol_begin() == D->protocol_end())
- lastChild();
- dumpDeclRef(D->getImplementation());
- for (ObjCInterfaceDecl::protocol_iterator I = D->protocol_begin(),
- E = D->protocol_end();
- I != E; ++I) {
- if (I + 1 == E)
- lastChild();
- dumpDeclRef(*I);
- }
+
+ ChildDumper Children(*this);
+ Children.dumpRef(D->getImplementation());
+ for (auto *Child : D->protocols())
+ Children.dumpRef(Child);
}
void ASTDumper::VisitObjCImplementationDecl(const ObjCImplementationDecl *D) {
@@ -1427,9 +1449,8 @@
}
void ASTDumper::VisitBlockDecl(const BlockDecl *D) {
- for (BlockDecl::param_const_iterator I = D->param_begin(), E = D->param_end();
- I != E; ++I)
- dumpDecl(*I);
+ for (auto I : D->params())
+ dumpDecl(I);
if (D->isVariadic()) {
IndentScope Indent(*this);
@@ -1440,20 +1461,19 @@
IndentScope Indent(*this);
OS << "capture this";
}
- for (BlockDecl::capture_iterator I = D->capture_begin(), E = D->capture_end();
- I != E; ++I) {
+ for (const auto &I : D->captures()) {
IndentScope Indent(*this);
OS << "capture";
- if (I->isByRef())
+ if (I.isByRef())
OS << " byref";
- if (I->isNested())
+ if (I.isNested())
OS << " nested";
- if (I->getVariable()) {
+ if (I.getVariable()) {
OS << ' ';
- dumpBareDeclRef(I->getVariable());
+ dumpBareDeclRef(I.getVariable());
}
- if (I->hasCopyExpr())
- dumpStmt(I->getCopyExpr());
+ if (I.hasCopyExpr())
+ dumpStmt(I.getCopyExpr());
}
lastChild();
dumpStmt(D->getBody());
@@ -1838,7 +1858,8 @@
void ASTDumper::VisitObjCMessageExpr(const ObjCMessageExpr *Node) {
VisitExpr(Node);
- OS << " selector=" << Node->getSelector().getAsString();
+ OS << " selector=";
+ Node->getSelector().print(OS);
switch (Node->getReceiverKind()) {
case ObjCMessageExpr::Instance:
break;
@@ -1860,7 +1881,8 @@
void ASTDumper::VisitObjCBoxedExpr(const ObjCBoxedExpr *Node) {
VisitExpr(Node);
- OS << " selector=" << Node->getBoxingMethod()->getSelector().getAsString();
+ OS << " selector=";
+ Node->getBoxingMethod()->getSelector().print(OS);
}
void ASTDumper::VisitObjCAtCatchStmt(const ObjCAtCatchStmt *Node) {
@@ -1879,7 +1901,8 @@
void ASTDumper::VisitObjCSelectorExpr(const ObjCSelectorExpr *Node) {
VisitExpr(Node);
- OS << " " << Node->getSelector().getAsString();
+ OS << " ";
+ Node->getSelector().print(OS);
}
void ASTDumper::VisitObjCProtocolExpr(const ObjCProtocolExpr *Node) {
@@ -1893,13 +1916,13 @@
if (Node->isImplicitProperty()) {
OS << " Kind=MethodRef Getter=\"";
if (Node->getImplicitPropertyGetter())
- OS << Node->getImplicitPropertyGetter()->getSelector().getAsString();
+ Node->getImplicitPropertyGetter()->getSelector().print(OS);
else
OS << "(null)";
OS << "\" Setter=\"";
if (ObjCMethodDecl *Setter = Node->getImplicitPropertySetter())
- OS << Setter->getSelector().getAsString();
+ Setter->getSelector().print(OS);
else
OS << "(null)";
OS << "\"";
@@ -1926,7 +1949,7 @@
else
OS << " Kind=DictionarySubscript GetterForDictionary=\"";
if (Node->getAtIndexMethodDecl())
- OS << Node->getAtIndexMethodDecl()->getSelector().getAsString();
+ Node->getAtIndexMethodDecl()->getSelector().print(OS);
else
OS << "(null)";
@@ -1935,7 +1958,7 @@
else
OS << "\" SetterForDictionary=\"";
if (Node->setAtIndexMethodDecl())
- OS << Node->setAtIndexMethodDecl()->getSelector().getAsString();
+ Node->setAtIndexMethodDecl()->getSelector().print(OS);
else
OS << "(null)";
}
@@ -2054,7 +2077,7 @@
OS << " Param=\"" << C->getParamNameAsWritten() << "\"";
}
- if (C->isParamIndexValid())
+ if (C->isParamIndexValid() && !C->isVarArgParam())
OS << " ParamIndex=" << C->getParamIndex();
}
@@ -2095,27 +2118,25 @@
// Decl method implementations
//===----------------------------------------------------------------------===//
-void Decl::dump() const {
- dump(llvm::errs());
-}
+LLVM_DUMP_METHOD void Decl::dump() const { dump(llvm::errs()); }
-void Decl::dump(raw_ostream &OS) const {
+LLVM_DUMP_METHOD void Decl::dump(raw_ostream &OS) const {
ASTDumper P(OS, &getASTContext().getCommentCommandTraits(),
&getASTContext().getSourceManager());
P.dumpDecl(this);
}
-void Decl::dumpColor() const {
+LLVM_DUMP_METHOD void Decl::dumpColor() const {
ASTDumper P(llvm::errs(), &getASTContext().getCommentCommandTraits(),
&getASTContext().getSourceManager(), /*ShowColors*/true);
P.dumpDecl(this);
}
-void DeclContext::dumpLookups() const {
+LLVM_DUMP_METHOD void DeclContext::dumpLookups() const {
dumpLookups(llvm::errs());
}
-void DeclContext::dumpLookups(raw_ostream &OS) const {
+LLVM_DUMP_METHOD void DeclContext::dumpLookups(raw_ostream &OS) const {
const DeclContext *DC = this;
while (!DC->isTranslationUnit())
DC = DC->getParent();
@@ -2128,21 +2149,21 @@
// Stmt method implementations
//===----------------------------------------------------------------------===//
-void Stmt::dump(SourceManager &SM) const {
+LLVM_DUMP_METHOD void Stmt::dump(SourceManager &SM) const {
dump(llvm::errs(), SM);
}
-void Stmt::dump(raw_ostream &OS, SourceManager &SM) const {
+LLVM_DUMP_METHOD void Stmt::dump(raw_ostream &OS, SourceManager &SM) const {
ASTDumper P(OS, 0, &SM);
P.dumpStmt(this);
}
-void Stmt::dump() const {
+LLVM_DUMP_METHOD void Stmt::dump() const {
ASTDumper P(llvm::errs(), 0, 0);
P.dumpStmt(this);
}
-void Stmt::dumpColor() const {
+LLVM_DUMP_METHOD void Stmt::dumpColor() const {
ASTDumper P(llvm::errs(), 0, 0, /*ShowColors*/true);
P.dumpStmt(this);
}
@@ -2151,11 +2172,9 @@
// Comment method implementations
//===----------------------------------------------------------------------===//
-void Comment::dump() const {
- dump(llvm::errs(), 0, 0);
-}
+LLVM_DUMP_METHOD void Comment::dump() const { dump(llvm::errs(), 0, 0); }
-void Comment::dump(const ASTContext &Context) const {
+LLVM_DUMP_METHOD void Comment::dump(const ASTContext &Context) const {
dump(llvm::errs(), &Context.getCommentCommandTraits(),
&Context.getSourceManager());
}
@@ -2167,7 +2186,7 @@
D.dumpFullComment(FC);
}
-void Comment::dumpColor() const {
+LLVM_DUMP_METHOD void Comment::dumpColor() const {
const FullComment *FC = dyn_cast<FullComment>(this);
ASTDumper D(llvm::errs(), 0, 0, /*ShowColors*/true);
D.dumpFullComment(FC);
diff --git a/lib/AST/ASTImporter.cpp b/lib/AST/ASTImporter.cpp
index e16015b..ace526e 100644
--- a/lib/AST/ASTImporter.cpp
+++ b/lib/AST/ASTImporter.cpp
@@ -407,10 +407,11 @@
return false;
break;
+ case Type::Adjusted:
case Type::Decayed:
if (!IsStructurallyEquivalent(Context,
- cast<DecayedType>(T1)->getPointeeType(),
- cast<DecayedType>(T2)->getPointeeType()))
+ cast<AdjustedType>(T1)->getOriginalType(),
+ cast<AdjustedType>(T2)->getOriginalType()))
return false;
break;
@@ -534,12 +535,11 @@
case Type::FunctionProto: {
const FunctionProtoType *Proto1 = cast<FunctionProtoType>(T1);
const FunctionProtoType *Proto2 = cast<FunctionProtoType>(T2);
- if (Proto1->getNumArgs() != Proto2->getNumArgs())
+ if (Proto1->getNumParams() != Proto2->getNumParams())
return false;
- for (unsigned I = 0, N = Proto1->getNumArgs(); I != N; ++I) {
- if (!IsStructurallyEquivalent(Context,
- Proto1->getArgType(I),
- Proto2->getArgType(I)))
+ for (unsigned I = 0, N = Proto1->getNumParams(); I != N; ++I) {
+ if (!IsStructurallyEquivalent(Context, Proto1->getParamType(I),
+ Proto2->getParamType(I)))
return false;
}
if (Proto1->isVariadic() != Proto2->isVariadic())
@@ -570,9 +570,8 @@
case Type::FunctionNoProto: {
const FunctionType *Function1 = cast<FunctionType>(T1);
const FunctionType *Function2 = cast<FunctionType>(T2);
- if (!IsStructurallyEquivalent(Context,
- Function1->getResultType(),
- Function2->getResultType()))
+ if (!IsStructurallyEquivalent(Context, Function1->getReturnType(),
+ Function2->getReturnType()))
return false;
if (Function1->getExtInfo() != Function2->getExtInfo())
return false;
@@ -931,10 +930,8 @@
return None;
unsigned Index = 0;
- for (DeclContext::decl_iterator D = Owner->noload_decls_begin(),
- DEnd = Owner->noload_decls_end();
- D != DEnd; ++D) {
- FieldDecl *F = dyn_cast<FieldDecl>(*D);
+ for (const auto *D : Owner->noload_decls()) {
+ const auto *F = dyn_cast<FieldDecl>(D);
if (!F || !F->isAnonymousStructOrUnion())
continue;
@@ -1586,7 +1583,7 @@
ASTNodeImporter::VisitFunctionNoProtoType(const FunctionNoProtoType *T) {
// FIXME: What happens if we're importing a function without a prototype
// into C++? Should we make it variadic?
- QualType ToResultType = Importer.Import(T->getResultType());
+ QualType ToResultType = Importer.Import(T->getReturnType());
if (ToResultType.isNull())
return QualType();
@@ -1595,16 +1592,14 @@
}
QualType ASTNodeImporter::VisitFunctionProtoType(const FunctionProtoType *T) {
- QualType ToResultType = Importer.Import(T->getResultType());
+ QualType ToResultType = Importer.Import(T->getReturnType());
if (ToResultType.isNull())
return QualType();
// Import argument types
SmallVector<QualType, 4> ArgTypes;
- for (FunctionProtoType::arg_type_iterator A = T->arg_type_begin(),
- AEnd = T->arg_type_end();
- A != AEnd; ++A) {
- QualType ArgType = Importer.Import(*A);
+ for (const auto &A : T->param_types()) {
+ QualType ArgType = Importer.Import(A);
if (ArgType.isNull())
return QualType();
ArgTypes.push_back(ArgType);
@@ -1612,10 +1607,8 @@
// Import exception types
SmallVector<QualType, 4> ExceptionTypes;
- for (FunctionProtoType::exception_iterator E = T->exception_begin(),
- EEnd = T->exception_end();
- E != EEnd; ++E) {
- QualType ExceptionType = Importer.Import(*E);
+ for (const auto &E : T->exceptions()) {
+ QualType ExceptionType = Importer.Import(E);
if (ExceptionType.isNull())
return QualType();
ExceptionTypes.push_back(ExceptionType);
@@ -1631,7 +1624,7 @@
ToEPI.RefQualifier = FromEPI.RefQualifier;
ToEPI.NumExceptions = ExceptionTypes.size();
ToEPI.Exceptions = ExceptionTypes.data();
- ToEPI.ConsumedArguments = FromEPI.ConsumedArguments;
+ ToEPI.ConsumedParameters = FromEPI.ConsumedParameters;
ToEPI.ExceptionSpecType = FromEPI.ExceptionSpecType;
ToEPI.NoexceptExpr = Importer.Import(FromEPI.NoexceptExpr);
ToEPI.ExceptionSpecDecl = cast_or_null<FunctionDecl>(
@@ -1787,11 +1780,9 @@
return QualType();
SmallVector<ObjCProtocolDecl *, 4> Protocols;
- for (ObjCObjectType::qual_iterator P = T->qual_begin(),
- PEnd = T->qual_end();
- P != PEnd; ++P) {
+ for (auto *P : T->quals()) {
ObjCProtocolDecl *Protocol
- = dyn_cast_or_null<ObjCProtocolDecl>(Importer.Import(*P));
+ = dyn_cast_or_null<ObjCProtocolDecl>(Importer.Import(P));
if (!Protocol)
return QualType();
Protocols.push_back(Protocol);
@@ -1909,11 +1900,8 @@
return;
}
- for (DeclContext::decl_iterator From = FromDC->decls_begin(),
- FromEnd = FromDC->decls_end();
- From != FromEnd;
- ++From)
- Importer.Import(*From);
+ for (auto *From : FromDC->decls())
+ Importer.Import(From);
}
bool ASTNodeImporter::ImportDefinition(RecordDecl *From, RecordDecl *To,
@@ -1946,6 +1934,7 @@
ToData.HasProtectedFields = FromData.HasProtectedFields;
ToData.HasPublicFields = FromData.HasPublicFields;
ToData.HasMutableFields = FromData.HasMutableFields;
+ ToData.HasVariantMembers = FromData.HasVariantMembers;
ToData.HasOnlyCMembers = FromData.HasOnlyCMembers;
ToData.HasInClassInitializer = FromData.HasInClassInitializer;
ToData.HasUninitializedReferenceMember
@@ -1986,29 +1975,25 @@
ToData.IsLambda = FromData.IsLambda;
SmallVector<CXXBaseSpecifier *, 4> Bases;
- for (CXXRecordDecl::base_class_iterator
- Base1 = FromCXX->bases_begin(),
- FromBaseEnd = FromCXX->bases_end();
- Base1 != FromBaseEnd;
- ++Base1) {
- QualType T = Importer.Import(Base1->getType());
+ for (const auto &Base1 : FromCXX->bases()) {
+ QualType T = Importer.Import(Base1.getType());
if (T.isNull())
return true;
SourceLocation EllipsisLoc;
- if (Base1->isPackExpansion())
- EllipsisLoc = Importer.Import(Base1->getEllipsisLoc());
+ if (Base1.isPackExpansion())
+ EllipsisLoc = Importer.Import(Base1.getEllipsisLoc());
// Ensure that we have a definition for the base.
- ImportDefinitionIfNeeded(Base1->getType()->getAsCXXRecordDecl());
+ ImportDefinitionIfNeeded(Base1.getType()->getAsCXXRecordDecl());
Bases.push_back(
new (Importer.getToContext())
- CXXBaseSpecifier(Importer.Import(Base1->getSourceRange()),
- Base1->isVirtual(),
- Base1->isBaseOfClass(),
- Base1->getAccessSpecifierAsWritten(),
- Importer.Import(Base1->getTypeSourceInfo()),
+ CXXBaseSpecifier(Importer.Import(Base1.getSourceRange()),
+ Base1.isVirtual(),
+ Base1.isBaseOfClass(),
+ Base1.getAccessSpecifierAsWritten(),
+ Importer.Import(Base1.getTypeSourceInfo()),
EllipsisLoc));
}
if (!Bases.empty())
@@ -2538,6 +2523,21 @@
} else if (!D->isCompleteDefinition()) {
// We have a forward declaration of this type, so adopt that forward
// declaration rather than building a new one.
+
+ // If one or both can be completed from external storage then try one
+ // last time to complete and compare them before doing this.
+
+ if (FoundRecord->hasExternalLexicalStorage() &&
+ !FoundRecord->isCompleteDefinition())
+ FoundRecord->getASTContext().getExternalSource()->CompleteType(FoundRecord);
+ if (D->hasExternalLexicalStorage())
+ D->getASTContext().getExternalSource()->CompleteType(D);
+
+ if (FoundRecord->isCompleteDefinition() &&
+ D->isCompleteDefinition() &&
+ !IsStructuralMatch(D, FoundRecord))
+ continue;
+
AdoptDecl = FoundRecord;
continue;
} else if (!SearchName) {
@@ -2716,7 +2716,7 @@
FromEPI.NoexceptExpr) {
FunctionProtoType::ExtProtoInfo DefaultEPI;
FromTy = Importer.getFromContext().getFunctionType(
- FromFPT->getResultType(), FromFPT->getArgTypes(), DefaultEPI);
+ FromFPT->getReturnType(), FromFPT->getParamTypes(), DefaultEPI);
usedDifferentExceptionSpec = true;
}
}
@@ -2728,9 +2728,8 @@
// Import the function parameters.
SmallVector<ParmVarDecl *, 8> Parameters;
- for (FunctionDecl::param_iterator P = D->param_begin(), PEnd = D->param_end();
- P != PEnd; ++P) {
- ParmVarDecl *ToP = cast_or_null<ParmVarDecl>(Importer.Import(*P));
+ for (auto P : D->params()) {
+ ParmVarDecl *ToP = cast_or_null<ParmVarDecl>(Importer.Import(P));
if (!ToP)
return 0;
@@ -2838,10 +2837,8 @@
return 0;
unsigned Index = 1;
- for (DeclContext::decl_iterator D = Owner->noload_decls_begin(),
- DEnd = Owner->noload_decls_end();
- D != DEnd; ++D) {
- if (*D == F)
+ for (const auto *D : Owner->noload_decls()) {
+ if (D == F)
return Index;
if (isa<FieldDecl>(*D) || isa<IndirectFieldDecl>(*D))
@@ -2953,9 +2950,8 @@
new (Importer.getToContext())NamedDecl*[D->getChainingSize()];
unsigned i = 0;
- for (IndirectFieldDecl::chain_iterator PI = D->chain_begin(),
- PE = D->chain_end(); PI != PE; ++PI) {
- Decl* D = Importer.Import(*PI);
+ for (auto *PI : D->chain()) {
+ Decl *D = Importer.Import(PI);
if (!D)
return 0;
NamedChain[i++] = cast<NamedDecl>(D);
@@ -3014,8 +3010,7 @@
Importer.Import(D->getInnerLocStart()),
Loc, Name.getAsIdentifierInfo(),
T, TInfo, D->getAccessControl(),
- BitWidth, D->getSynthesize(),
- D->getBackingIvarReferencedInAccessor());
+ BitWidth, D->getSynthesize());
ToIvar->setLexicalDeclContext(LexicalDC);
Importer.Imported(D, ToIvar);
LexicalDC->addDeclInternal(ToIvar);
@@ -3215,11 +3210,11 @@
continue;
// Check return types.
- if (!Importer.IsStructurallyEquivalent(D->getResultType(),
- FoundMethod->getResultType())) {
+ if (!Importer.IsStructurallyEquivalent(D->getReturnType(),
+ FoundMethod->getReturnType())) {
Importer.ToDiag(Loc, diag::err_odr_objc_method_result_type_inconsistent)
- << D->isInstanceMethod() << Name
- << D->getResultType() << FoundMethod->getResultType();
+ << D->isInstanceMethod() << Name << D->getReturnType()
+ << FoundMethod->getReturnType();
Importer.ToDiag(FoundMethod->getLocation(),
diag::note_odr_objc_method_here)
<< D->isInstanceMethod() << Name;
@@ -3270,36 +3265,25 @@
}
// Import the result type.
- QualType ResultTy = Importer.Import(D->getResultType());
+ QualType ResultTy = Importer.Import(D->getReturnType());
if (ResultTy.isNull())
return 0;
- TypeSourceInfo *ResultTInfo = Importer.Import(D->getResultTypeSourceInfo());
+ TypeSourceInfo *ReturnTInfo = Importer.Import(D->getReturnTypeSourceInfo());
- ObjCMethodDecl *ToMethod
- = ObjCMethodDecl::Create(Importer.getToContext(),
- Loc,
- Importer.Import(D->getLocEnd()),
- Name.getObjCSelector(),
- ResultTy, ResultTInfo, DC,
- D->isInstanceMethod(),
- D->isVariadic(),
- D->isPropertyAccessor(),
- D->isImplicit(),
- D->isDefined(),
- D->getImplementationControl(),
- D->hasRelatedResultType());
+ ObjCMethodDecl *ToMethod = ObjCMethodDecl::Create(
+ Importer.getToContext(), Loc, Importer.Import(D->getLocEnd()),
+ Name.getObjCSelector(), ResultTy, ReturnTInfo, DC, D->isInstanceMethod(),
+ D->isVariadic(), D->isPropertyAccessor(), D->isImplicit(), D->isDefined(),
+ D->getImplementationControl(), D->hasRelatedResultType());
// FIXME: When we decide to merge method definitions, we'll need to
// deal with implicit parameters.
// Import the parameters
SmallVector<ParmVarDecl *, 5> ToParams;
- for (ObjCMethodDecl::param_iterator FromP = D->param_begin(),
- FromPEnd = D->param_end();
- FromP != FromPEnd;
- ++FromP) {
- ParmVarDecl *ToP = cast_or_null<ParmVarDecl>(Importer.Import(*FromP));
+ for (auto *FromP : D->params()) {
+ ParmVarDecl *ToP = cast_or_null<ParmVarDecl>(Importer.Import(FromP));
if (!ToP)
return 0;
@@ -3558,12 +3542,8 @@
// Import categories. When the categories themselves are imported, they'll
// hook themselves into this interface.
- for (ObjCInterfaceDecl::known_categories_iterator
- Cat = From->known_categories_begin(),
- CatEnd = From->known_categories_end();
- Cat != CatEnd; ++Cat) {
- Importer.Import(*Cat);
- }
+ for (auto *Cat : From->known_categories())
+ Importer.Import(Cat);
// If we have an @implementation, import it as well.
if (From->getImplementation()) {
@@ -4240,8 +4220,7 @@
return 0;
VarTemplateDecl *D2 = VarTemplateDecl::Create(
- Importer.getToContext(), DC, Loc, Name, TemplateParams, D2Templated,
- /*PrevDecl=*/0);
+ Importer.getToContext(), DC, Loc, Name, TemplateParams, D2Templated);
D2Templated->setDescribedVarTemplate(D2);
D2->setAccess(D->getAccess());
diff --git a/lib/AST/ASTTypeTraits.cpp b/lib/AST/ASTTypeTraits.cpp
index ae47ea9..02d82d8 100644
--- a/lib/AST/ASTTypeTraits.cpp
+++ b/lib/AST/ASTTypeTraits.cpp
@@ -39,18 +39,24 @@
#include "clang/AST/TypeNodes.def"
};
-bool ASTNodeKind::isBaseOf(ASTNodeKind Other) const {
- return isBaseOf(KindId, Other.KindId);
+bool ASTNodeKind::isBaseOf(ASTNodeKind Other, unsigned *Distance) const {
+ return isBaseOf(KindId, Other.KindId, Distance);
}
bool ASTNodeKind::isSame(ASTNodeKind Other) const {
return KindId != NKI_None && KindId == Other.KindId;
}
-bool ASTNodeKind::isBaseOf(NodeKindId Base, NodeKindId Derived) {
+bool ASTNodeKind::isBaseOf(NodeKindId Base, NodeKindId Derived,
+ unsigned *Distance) {
if (Base == NKI_None || Derived == NKI_None) return false;
- while (Derived != Base && Derived != NKI_None)
+ unsigned Dist = 0;
+ while (Derived != Base && Derived != NKI_None) {
Derived = AllKindInfo[Derived].ParentId;
+ ++Dist;
+ }
+ if (Distance)
+ *Distance = Dist;
return Derived == Base;
}
diff --git a/lib/AST/Android.mk b/lib/AST/Android.mk
index 25eb48e..9d0c005 100644
--- a/lib/AST/Android.mk
+++ b/lib/AST/Android.mk
@@ -10,6 +10,7 @@
AttrImpl.inc \
AttrList.inc \
Attrs.inc \
+ AttrVisitor.inc \
CommentCommandInfo.inc \
CommentCommandList.inc \
CommentHTMLNamedCharacterReferences.inc \
diff --git a/lib/AST/AttrImpl.cpp b/lib/AST/AttrImpl.cpp
index 7af3c8b..0bf6bcd 100644
--- a/lib/AST/AttrImpl.cpp
+++ b/lib/AST/AttrImpl.cpp
@@ -24,6 +24,4 @@
void InheritableParamAttr::anchor() { }
-void MSInheritanceAttr::anchor() { }
-
#include "clang/AST/AttrImpl.inc"
diff --git a/lib/AST/CMakeLists.txt b/lib/AST/CMakeLists.txt
index 461e8b3..9006be6 100644
--- a/lib/AST/CMakeLists.txt
+++ b/lib/AST/CMakeLists.txt
@@ -57,29 +57,8 @@
TypePrinter.cpp
VTableBuilder.cpp
VTTBuilder.cpp
- )
-add_dependencies(clangAST
- ClangARMNeon
- ClangAttrClasses
- ClangAttrList
- ClangAttrImpl
- ClangAttrDump
- ClangCommentCommandInfo
- ClangCommentCommandList
- ClangCommentNodes
- ClangCommentHTMLTags
- ClangCommentHTMLTagsProperties
- ClangCommentHTMLNamedCharacterReferences
- ClangDeclNodes
- ClangDiagnosticAST
- ClangDiagnosticComment
- ClangDiagnosticCommon
- ClangDiagnosticSema
- ClangStmtNodes
- )
-
-target_link_libraries(clangAST
+ LINK_LIBS
clangBasic
clangLex
)
diff --git a/lib/AST/CXXInheritance.cpp b/lib/AST/CXXInheritance.cpp
index b51014b..eef043c 100644
--- a/lib/AST/CXXInheritance.cpp
+++ b/lib/AST/CXXInheritance.cpp
@@ -35,16 +35,12 @@
std::copy(Decls.begin(), Decls.end(), DeclsFound);
}
-CXXBasePaths::decl_iterator CXXBasePaths::found_decls_begin() {
+CXXBasePaths::decl_range CXXBasePaths::found_decls() {
if (NumDeclsFound == 0)
ComputeDeclsFound();
- return DeclsFound;
-}
-CXXBasePaths::decl_iterator CXXBasePaths::found_decls_end() {
- if (NumDeclsFound == 0)
- ComputeDeclsFound();
- return DeclsFound + NumDeclsFound;
+ return decl_range(decl_iterator(DeclsFound),
+ decl_iterator(DeclsFound + NumDeclsFound));
}
/// isAmbiguous - Determines whether the set of paths provided is
@@ -141,9 +137,8 @@
const CXXRecordDecl *Record = this;
bool AllMatches = true;
while (true) {
- for (CXXRecordDecl::base_class_const_iterator
- I = Record->bases_begin(), E = Record->bases_end(); I != E; ++I) {
- const RecordType *Ty = I->getType()->getAs<RecordType>();
+ for (const auto &I : Record->bases()) {
+ const RecordType *Ty = I.getType()->getAs<RecordType>();
if (!Ty) {
if (AllowShortCircuit) return false;
AllMatches = false;
@@ -186,14 +181,11 @@
AccessSpecifier AccessToHere = ScratchPath.Access;
bool IsFirstStep = ScratchPath.empty();
- for (CXXRecordDecl::base_class_const_iterator BaseSpec = Record->bases_begin(),
- BaseSpecEnd = Record->bases_end();
- BaseSpec != BaseSpecEnd;
- ++BaseSpec) {
+ for (const auto &BaseSpec : Record->bases()) {
// Find the record of the base class subobjects for this type.
- QualType BaseType = Context.getCanonicalType(BaseSpec->getType())
- .getUnqualifiedType();
-
+ QualType BaseType =
+ Context.getCanonicalType(BaseSpec.getType()).getUnqualifiedType();
+
// C++ [temp.dep]p3:
// In the definition of a class template or a member of a class template,
// if a base class of the class template depends on a template-parameter,
@@ -208,7 +200,7 @@
std::pair<bool, unsigned>& Subobjects = ClassSubobjects[BaseType];
bool VisitBase = true;
bool SetVirtual = false;
- if (BaseSpec->isVirtual()) {
+ if (BaseSpec.isVirtual()) {
VisitBase = !Subobjects.first;
Subobjects.first = true;
if (isDetectingVirtual() && DetectedVirtual == 0) {
@@ -223,9 +215,9 @@
if (isRecordingPaths()) {
// Add this base specifier to the current path.
CXXBasePathElement Element;
- Element.Base = &*BaseSpec;
+ Element.Base = &BaseSpec;
Element.Class = Record;
- if (BaseSpec->isVirtual())
+ if (BaseSpec.isVirtual())
Element.SubobjectNumber = 0;
else
Element.SubobjectNumber = Subobjects.second;
@@ -247,16 +239,16 @@
// 3. Otherwise, overall access is determined by the most restrictive
// access in the sequence.
if (IsFirstStep)
- ScratchPath.Access = BaseSpec->getAccessSpecifier();
+ ScratchPath.Access = BaseSpec.getAccessSpecifier();
else
ScratchPath.Access = CXXRecordDecl::MergeAccess(AccessToHere,
- BaseSpec->getAccessSpecifier());
+ BaseSpec.getAccessSpecifier());
}
// Track whether there's a path involving this specific base.
bool FoundPathThroughBase = false;
- if (BaseMatches(BaseSpec, ScratchPath, UserData)) {
+ if (BaseMatches(&BaseSpec, ScratchPath, UserData)) {
// We've found a path that terminates at this base.
FoundPath = FoundPathThroughBase = true;
if (isRecordingPaths()) {
@@ -269,7 +261,7 @@
}
} else if (VisitBase) {
CXXRecordDecl *BaseRecord
- = cast<CXXRecordDecl>(BaseSpec->getType()->castAs<RecordType>()
+ = cast<CXXRecordDecl>(BaseSpec.getType()->castAs<RecordType>()
->getDecl());
if (lookupInBases(Context, BaseRecord, BaseMatches, UserData)) {
// C++ [class.member.lookup]p2:
@@ -501,14 +493,13 @@
SubobjectNumber
= ++SubobjectCount[cast<CXXRecordDecl>(RD->getCanonicalDecl())];
- for (CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin(),
- BaseEnd = RD->bases_end(); Base != BaseEnd; ++Base) {
- if (const RecordType *RT = Base->getType()->getAs<RecordType>()) {
+ for (const auto &Base : RD->bases()) {
+ if (const RecordType *RT = Base.getType()->getAs<RecordType>()) {
const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(RT->getDecl());
if (!BaseDecl->isPolymorphic())
continue;
- if (Overriders.empty() && !Base->isVirtual()) {
+ if (Overriders.empty() && !Base.isVirtual()) {
// There are no other overriders of virtual member functions,
// so let the base class fill in our overriders for us.
Collect(BaseDecl, false, InVirtualSubobject, Overriders);
@@ -522,7 +513,7 @@
// its base classes) more than once.
CXXFinalOverriderMap ComputedBaseOverriders;
CXXFinalOverriderMap *BaseOverriders = &ComputedBaseOverriders;
- if (Base->isVirtual()) {
+ if (Base.isVirtual()) {
CXXFinalOverriderMap *&MyVirtualOverriders = VirtualOverriders[BaseDecl];
BaseOverriders = MyVirtualOverriders;
if (!MyVirtualOverriders) {
@@ -551,10 +542,7 @@
}
}
- for (CXXRecordDecl::method_iterator M = RD->method_begin(),
- MEnd = RD->method_end();
- M != MEnd;
- ++M) {
+ for (auto *M : RD->methods()) {
// We only care about virtual methods.
if (!M->isVirtual())
continue;
@@ -702,13 +690,12 @@
if (Layout.isPrimaryBaseVirtual())
Bases.insert(Layout.getPrimaryBase());
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- assert(!I->getType()->isDependentType() &&
+ for (const auto &I : RD->bases()) {
+ assert(!I.getType()->isDependentType() &&
"Cannot get indirect primary bases for class with dependent bases.");
const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
// Only bases with virtual bases participate in computing the
// indirect primary virtual base classes.
@@ -725,13 +712,12 @@
if (!getNumVBases())
return;
- for (CXXRecordDecl::base_class_const_iterator I = bases_begin(),
- E = bases_end(); I != E; ++I) {
- assert(!I->getType()->isDependentType() &&
+ for (const auto &I : bases()) {
+ assert(!I.getType()->isDependentType() &&
"Cannot get indirect primary bases for class with dependent bases.");
const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
// Only bases with virtual bases participate in computing the
// indirect primary virtual base classes.
diff --git a/lib/AST/Comment.cpp b/lib/AST/Comment.cpp
index f24a23d..b0b2351 100644
--- a/lib/AST/Comment.cpp
+++ b/lib/AST/Comment.cpp
@@ -159,7 +159,7 @@
Kind = FunctionKind;
ParamVars = ArrayRef<const ParmVarDecl *>(FD->param_begin(),
FD->getNumParams());
- ResultType = FD->getResultType();
+ ReturnType = FD->getReturnType();
unsigned NumLists = FD->getNumTemplateParameterLists();
if (NumLists != 0) {
TemplateKind = TemplateSpecialization;
@@ -180,7 +180,7 @@
Kind = FunctionKind;
ParamVars = ArrayRef<const ParmVarDecl *>(MD->param_begin(),
MD->param_size());
- ResultType = MD->getResultType();
+ ReturnType = MD->getReturnType();
IsObjCMethod = true;
IsInstanceMethod = MD->isInstanceMethod();
IsClassMethod = !IsInstanceMethod;
@@ -193,7 +193,7 @@
const FunctionDecl *FD = FTD->getTemplatedDecl();
ParamVars = ArrayRef<const ParmVarDecl *>(FD->param_begin(),
FD->getNumParams());
- ResultType = FD->getResultType();
+ ReturnType = FD->getReturnType();
TemplateParameters = FTD->getTemplateParameters();
break;
}
@@ -251,6 +251,16 @@
TL = PointerTL.getPointeeLoc().getUnqualifiedLoc();
continue;
}
+ // Look through reference types.
+ if (ReferenceTypeLoc ReferenceTL = TL.getAs<ReferenceTypeLoc>()) {
+ TL = ReferenceTL.getPointeeLoc().getUnqualifiedLoc();
+ continue;
+ }
+ // Look through adjusted types.
+ if (AdjustedTypeLoc ATL = TL.getAs<AdjustedTypeLoc>()) {
+ TL = ATL.getOriginalLoc();
+ continue;
+ }
if (BlockPointerTypeLoc BlockPointerTL =
TL.getAs<BlockPointerTypeLoc>()) {
TL = BlockPointerTL.getPointeeLoc().getUnqualifiedLoc();
@@ -261,13 +271,39 @@
TL = MemberPointerTL.getPointeeLoc().getUnqualifiedLoc();
continue;
}
+ if (ElaboratedTypeLoc ETL = TL.getAs<ElaboratedTypeLoc>()) {
+ TL = ETL.getNamedTypeLoc();
+ continue;
+ }
// Is this a typedef for a function type?
if (FunctionTypeLoc FTL = TL.getAs<FunctionTypeLoc>()) {
Kind = FunctionKind;
ArrayRef<ParmVarDecl *> Params = FTL.getParams();
ParamVars = ArrayRef<const ParmVarDecl *>(Params.data(),
Params.size());
- ResultType = FTL.getResultLoc().getType();
+ ReturnType = FTL.getReturnLoc().getType();
+ break;
+ }
+ if (TemplateSpecializationTypeLoc STL =
+ TL.getAs<TemplateSpecializationTypeLoc>()) {
+ // If we have a typedef to a template specialization with exactly one
+ // template argument of a function type, this looks like std::function,
+ // boost::function, or other function wrapper. Treat these typedefs as
+ // functions.
+ if (STL.getNumArgs() != 1)
+ break;
+ TemplateArgumentLoc MaybeFunction = STL.getArgLoc(0);
+ if (MaybeFunction.getArgument().getKind() != TemplateArgument::Type)
+ break;
+ TypeSourceInfo *MaybeFunctionTSI = MaybeFunction.getTypeSourceInfo();
+ TypeLoc TL = MaybeFunctionTSI->getTypeLoc().getUnqualifiedLoc();
+ if (FunctionTypeLoc FTL = TL.getAs<FunctionTypeLoc>()) {
+ Kind = FunctionKind;
+ ArrayRef<ParmVarDecl *> Params = FTL.getParams();
+ ParamVars = ArrayRef<const ParmVarDecl *>(Params.data(),
+ Params.size());
+ ReturnType = FTL.getReturnLoc().getType();
+ }
break;
}
break;
diff --git a/lib/AST/CommentCommandTraits.cpp b/lib/AST/CommentCommandTraits.cpp
index 01bd12e..8879aac 100644
--- a/lib/AST/CommentCommandTraits.cpp
+++ b/lib/AST/CommentCommandTraits.cpp
@@ -43,47 +43,42 @@
return getRegisteredCommandInfo(CommandID);
}
-static void
-HelperTypoCorrectCommandInfo(SmallVectorImpl<const CommandInfo *> &BestCommand,
- StringRef Typo, const CommandInfo *Command) {
- const unsigned MaxEditDistance = 1;
- unsigned BestEditDistance = MaxEditDistance + 1;
- StringRef Name = Command->Name;
-
- unsigned MinPossibleEditDistance = abs((int)Name.size() - (int)Typo.size());
- if (MinPossibleEditDistance > 0 &&
- Typo.size() / MinPossibleEditDistance < 1)
- return;
- unsigned EditDistance = Typo.edit_distance(Name, true, MaxEditDistance);
- if (EditDistance > MaxEditDistance)
- return;
- if (EditDistance == BestEditDistance)
- BestCommand.push_back(Command);
- else if (EditDistance < BestEditDistance) {
- BestCommand.clear();
- BestCommand.push_back(Command);
- BestEditDistance = EditDistance;
- }
-}
-
const CommandInfo *
CommandTraits::getTypoCorrectCommandInfo(StringRef Typo) const {
- // single character command impostures, such as \t or \n must not go
+ // Single-character command impostures, such as \t or \n, should not go
// through the fixit logic.
if (Typo.size() <= 1)
- return NULL;
-
+ return nullptr;
+
+ // The maximum edit distance we're prepared to accept.
+ const unsigned MaxEditDistance = 1;
+
+ unsigned BestEditDistance = MaxEditDistance;
SmallVector<const CommandInfo *, 2> BestCommand;
-
- const int NumOfCommands = llvm::array_lengthof(Commands);
- for (int i = 0; i < NumOfCommands; i++)
- HelperTypoCorrectCommandInfo(BestCommand, Typo, &Commands[i]);
-
- for (unsigned i = 0, e = RegisteredCommands.size(); i != e; ++i)
- if (!RegisteredCommands[i]->IsUnknownCommand)
- HelperTypoCorrectCommandInfo(BestCommand, Typo, RegisteredCommands[i]);
-
- return (BestCommand.size() != 1) ? NULL : BestCommand[0];
+
+ auto ConsiderCorrection = [&](const CommandInfo *Command) {
+ StringRef Name = Command->Name;
+
+ unsigned MinPossibleEditDistance = abs((int)Name.size() - (int)Typo.size());
+ if (MinPossibleEditDistance <= BestEditDistance) {
+ unsigned EditDistance = Typo.edit_distance(Name, true, BestEditDistance);
+ if (EditDistance < BestEditDistance) {
+ BestEditDistance = EditDistance;
+ BestCommand.clear();
+ }
+ if (EditDistance == BestEditDistance)
+ BestCommand.push_back(Command);
+ }
+ };
+
+ for (const auto &Command : Commands)
+ ConsiderCorrection(&Command);
+
+ for (const auto *Command : RegisteredCommands)
+ if (!Command->IsUnknownCommand)
+ ConsiderCorrection(Command);
+
+ return BestCommand.size() == 1 ? BestCommand[0] : nullptr;
}
CommandInfo *CommandTraits::createCommandInfoWithName(StringRef CommandName) {
diff --git a/lib/AST/CommentLexer.cpp b/lib/AST/CommentLexer.cpp
index 01ed3ce..792a832 100644
--- a/lib/AST/CommentLexer.cpp
+++ b/lib/AST/CommentLexer.cpp
@@ -268,6 +268,19 @@
} // unnamed namespace
+void Lexer::formTokenWithChars(Token &Result, const char *TokEnd,
+ tok::TokenKind Kind) {
+ const unsigned TokLen = TokEnd - BufferPtr;
+ Result.setLocation(getSourceLocation(BufferPtr));
+ Result.setKind(Kind);
+ Result.setLength(TokLen);
+#ifndef NDEBUG
+ Result.TextPtr = "<UNSET>";
+ Result.IntVal = 7;
+#endif
+ BufferPtr = TokEnd;
+}
+
void Lexer::lexCommentText(Token &T) {
assert(CommentState == LCS_InsideBCPLComment ||
CommentState == LCS_InsideCComment);
diff --git a/lib/AST/CommentSema.cpp b/lib/AST/CommentSema.cpp
index 1c6222f..e51f878 100644
--- a/lib/AST/CommentSema.cpp
+++ b/lib/AST/CommentSema.cpp
@@ -68,8 +68,12 @@
Command->setParagraph(Paragraph);
checkBlockCommandEmptyParagraph(Command);
checkBlockCommandDuplicate(Command);
- checkReturnsCommand(Command);
- checkDeprecatedCommand(Command);
+ if (ThisDeclInfo) {
+ // These checks only make sense if the comment is attached to a
+ // declaration.
+ checkReturnsCommand(Command);
+ checkDeprecatedCommand(Command);
+ }
}
ParamCommandComment *Sema::actOnParamCommandStart(
@@ -122,7 +126,7 @@
<< (DiagSelect-1) << (DiagSelect-1)
<< Comment->getSourceRange();
}
-
+
void Sema::checkContainerDeclVerbatimLine(const BlockCommandComment *Comment) {
const CommandInfo *Info = Traits.getCommandInfo(Comment->getCommandID());
if (!Info->IsRecordLikeDeclarationCommand)
@@ -558,8 +562,11 @@
void Sema::checkReturnsCommand(const BlockCommandComment *Command) {
if (!Traits.getCommandInfo(Command->getCommandID())->IsReturnsCommand)
return;
+
+ assert(ThisDeclInfo && "should not call this check on a bare comment");
+
if (isFunctionDecl()) {
- if (ThisDeclInfo->ResultType->isVoidType()) {
+ if (ThisDeclInfo->ReturnType->isVoidType()) {
unsigned DiagKind;
switch (ThisDeclInfo->CommentDecl->getKind()) {
default:
@@ -586,7 +593,7 @@
}
else if (isObjCPropertyDecl())
return;
-
+
Diag(Command->getLocation(),
diag::warn_doc_returns_not_attached_to_a_function_decl)
<< Command->getCommandMarker()
@@ -636,6 +643,8 @@
if (!Traits.getCommandInfo(Command->getCommandID())->IsDeprecatedCommand)
return;
+ assert(ThisDeclInfo && "should not call this check on a bare comment");
+
const Decl *D = ThisDeclInfo->CommentDecl;
if (!D)
return;
@@ -783,11 +792,14 @@
}
bool Sema::isFunctionOrMethodVariadic() {
- if (!isAnyFunctionDecl() && !isObjCMethodDecl())
+ if (!isAnyFunctionDecl() && !isObjCMethodDecl() && !isFunctionTemplateDecl())
return false;
if (const FunctionDecl *FD =
dyn_cast<FunctionDecl>(ThisDeclInfo->CurrentDecl))
return FD->isVariadic();
+ if (const FunctionTemplateDecl *FTD =
+ dyn_cast<FunctionTemplateDecl>(ThisDeclInfo->CurrentDecl))
+ return FTD->getTemplatedDecl()->isVariadic();
if (const ObjCMethodDecl *MD =
dyn_cast<ObjCMethodDecl>(ThisDeclInfo->CurrentDecl))
return MD->isVariadic();
@@ -812,7 +824,7 @@
}
return false;
}
-
+
bool Sema::isObjCPropertyDecl() {
if (!ThisDeclInfo)
return false;
@@ -834,8 +846,8 @@
return false;
if (!ThisDeclInfo->IsFilled)
inspectThisDecl();
- return isUnionDecl() || isClassOrStructDecl()
- || isObjCInterfaceDecl() || isObjCProtocolDecl();
+ return isUnionDecl() || isClassOrStructDecl() || isObjCInterfaceDecl() ||
+ isObjCProtocolDecl();
}
bool Sema::isUnionDecl() {
@@ -848,7 +860,7 @@
return RD->isUnion();
return false;
}
-
+
bool Sema::isClassOrStructDecl() {
if (!ThisDeclInfo)
return false;
@@ -858,7 +870,7 @@
isa<RecordDecl>(ThisDeclInfo->CurrentDecl) &&
!isUnionDecl();
}
-
+
bool Sema::isClassTemplateDecl() {
if (!ThisDeclInfo)
return false;
@@ -874,7 +886,7 @@
if (!ThisDeclInfo->IsFilled)
inspectThisDecl();
return ThisDeclInfo->CurrentDecl &&
- (isa<FunctionTemplateDecl>(ThisDeclInfo->CurrentDecl));
+ (isa<FunctionTemplateDecl>(ThisDeclInfo->CurrentDecl));
}
bool Sema::isObjCInterfaceDecl() {
@@ -885,7 +897,7 @@
return ThisDeclInfo->CurrentDecl &&
isa<ObjCInterfaceDecl>(ThisDeclInfo->CurrentDecl);
}
-
+
bool Sema::isObjCProtocolDecl() {
if (!ThisDeclInfo)
return false;
@@ -894,7 +906,7 @@
return ThisDeclInfo->CurrentDecl &&
isa<ObjCProtocolDecl>(ThisDeclInfo->CurrentDecl);
}
-
+
ArrayRef<const ParmVarDecl *> Sema::getParamVars() {
if (!ThisDeclInfo->IsFilled)
inspectThisDecl();
diff --git a/lib/AST/Decl.cpp b/lib/AST/Decl.cpp
index 6bd9858..15410f0 100644
--- a/lib/AST/Decl.cpp
+++ b/lib/AST/Decl.cpp
@@ -13,6 +13,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTLambda.h"
#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
@@ -29,7 +30,6 @@
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/type_traits.h"
#include <algorithm>
using namespace clang;
@@ -158,8 +158,7 @@
/// Does the given declaration have member specialization information,
/// and if so, is it an explicit specialization?
template <class T> static typename
-llvm::enable_if_c<!llvm::is_base_of<RedeclarableTemplateDecl, T>::value,
- bool>::type
+std::enable_if<!std::is_base_of<RedeclarableTemplateDecl, T>::value, bool>::type
isExplicitMemberSpecialization(const T *D) {
if (const MemberSpecializationInfo *member =
D->getMemberSpecializationInfo()) {
@@ -209,11 +208,8 @@
// If we're on Mac OS X, an 'availability' for Mac OS X attribute
// implies visibility(default).
if (D->getASTContext().getTargetInfo().getTriple().isOSDarwin()) {
- for (specific_attr_iterator<AvailabilityAttr>
- A = D->specific_attr_begin<AvailabilityAttr>(),
- AEnd = D->specific_attr_end<AvailabilityAttr>();
- A != AEnd; ++A)
- if ((*A)->getPlatform()->getName().equals("macosx"))
+ for (const auto *A : D->specific_attrs<AvailabilityAttr>())
+ if (A->getPlatform()->getName().equals("macosx"))
return DefaultVisibility;
}
@@ -512,9 +508,9 @@
return First->isInExternCContext();
}
-static bool isSingleLineExternC(const Decl &D) {
+static bool isSingleLineLanguageLinkage(const Decl &D) {
if (const LinkageSpecDecl *SD = dyn_cast<LinkageSpecDecl>(D.getDeclContext()))
- if (SD->getLanguage() == LinkageSpecDecl::lang_c && !SD->hasBraces())
+ if (!SD->hasBraces())
return true;
return false;
}
@@ -548,7 +544,7 @@
if (Var->getStorageClass() != SC_Extern &&
Var->getStorageClass() != SC_PrivateExtern &&
- !isSingleLineExternC(*Var))
+ !isSingleLineLanguageLinkage(*Var))
return LinkageInfo::internal();
}
@@ -561,16 +557,10 @@
if (PrevVar->getStorageClass() == SC_Static)
return LinkageInfo::internal();
}
- } else if (isa<FunctionDecl>(D) || isa<FunctionTemplateDecl>(D)) {
+ } else if (const FunctionDecl *Function = D->getAsFunction()) {
// C++ [temp]p4:
// A non-member function template can have internal linkage; any
// other template name shall have external linkage.
- const FunctionDecl *Function = 0;
- if (const FunctionTemplateDecl *FunTmpl
- = dyn_cast<FunctionTemplateDecl>(D))
- Function = FunTmpl->getTemplatedDecl();
- else
- Function = cast<FunctionDecl>(D);
// Explicitly declared static.
if (Function->getCanonicalDecl()->getStorageClass() == SC_Static)
@@ -782,11 +772,18 @@
// really have linkage, but it's convenient to say they do for the
// purposes of calculating linkage of pointer-to-data-member
// template arguments.
+ //
+ // Templates also don't officially have linkage, but since we ignore
+ // the C++ standard and look at template arguments when determining
+ // linkage and visibility of a template specialization, we might hit
+ // a template template argument that way. If we do, we need to
+ // consider its linkage.
if (!(isa<CXXMethodDecl>(D) ||
isa<VarDecl>(D) ||
isa<FieldDecl>(D) ||
isa<IndirectFieldDecl>(D) ||
- isa<TagDecl>(D)))
+ isa<TagDecl>(D) ||
+ isa<TemplateDecl>(D)))
return LinkageInfo::none();
LinkageInfo LV;
@@ -1242,16 +1239,13 @@
// We have just computed the linkage for this decl. By induction we know
// that all other computed linkages match, check that the one we just
- // computed
- // also does.
+ // computed also does.
NamedDecl *Old = NULL;
- for (NamedDecl::redecl_iterator I = D->redecls_begin(),
- E = D->redecls_end();
- I != E; ++I) {
- NamedDecl *T = cast<NamedDecl>(*I);
+ for (auto I : D->redecls()) {
+ NamedDecl *T = cast<NamedDecl>(I);
if (T == D)
continue;
- if (T->hasCachedLinkage()) {
+ if (!T->isInvalidDecl() && T->hasCachedLinkage()) {
Old = T;
break;
}
@@ -1270,13 +1264,9 @@
}
std::string NamedDecl::getQualifiedNameAsString() const {
- return getQualifiedNameAsString(getASTContext().getPrintingPolicy());
-}
-
-std::string NamedDecl::getQualifiedNameAsString(const PrintingPolicy &P) const {
std::string QualName;
llvm::raw_string_ostream OS(QualName);
- printQualifiedName(OS, P);
+ printQualifiedName(OS, getASTContext().getPrintingPolicy());
return OS.str();
}
@@ -1314,12 +1304,12 @@
P);
} else if (const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(*I)) {
if (ND->isAnonymousNamespace())
- OS << "<anonymous namespace>";
+ OS << "(anonymous namespace)";
else
OS << *ND;
} else if (const RecordDecl *RD = dyn_cast<RecordDecl>(*I)) {
if (!RD->getIdentifier())
- OS << "<anonymous " << RD->getKindName() << '>';
+ OS << "(anonymous " << RD->getKindName() << ')';
else
OS << *RD;
} else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
@@ -1352,7 +1342,7 @@
if (getDeclName())
OS << *this;
else
- OS << "<anonymous>";
+ OS << "(anonymous)";
}
void NamedDecl::getNameForDiagnostic(raw_ostream &OS,
@@ -1392,6 +1382,7 @@
if (isa<ObjCMethodDecl>(this))
return false;
+ // FIXME: Is this correct if one of the decls comes from an inline namespace?
if (isa<ObjCInterfaceDecl>(this) && isa<ObjCCompatibleAliasDecl>(OldD))
return true;
@@ -1418,14 +1409,19 @@
// A typedef of an Objective-C class type can replace an Objective-C class
// declaration or definition, and vice versa.
+ // FIXME: Is this correct if one of the decls comes from an inline namespace?
if ((isa<TypedefNameDecl>(this) && isa<ObjCInterfaceDecl>(OldD)) ||
(isa<ObjCInterfaceDecl>(this) && isa<TypedefNameDecl>(OldD)))
return true;
-
+
// For non-function declarations, if the declarations are of the
- // same kind then this must be a redeclaration, or semantic analysis
- // would not have given us the new declaration.
- return this->getKind() == OldD->getKind();
+ // same kind and have the same parent then this must be a redeclaration,
+ // or semantic analysis would not have given us the new declaration.
+ // Note that inline namespaces can give us two declarations with the same
+ // name and kind in the same scope but different contexts.
+ return this->getKind() == OldD->getKind() &&
+ this->getDeclContext()->getRedeclContext()->Equals(
+ OldD->getDeclContext()->getRedeclContext());
}
bool NamedDecl::hasLinkage() const {
@@ -1453,11 +1449,9 @@
if (isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D) || isa<MSPropertyDecl>(D))
return true;
- if (isa<CXXMethodDecl>(D))
- return cast<CXXMethodDecl>(D)->isInstance();
- if (isa<FunctionTemplateDecl>(D))
- return cast<CXXMethodDecl>(cast<FunctionTemplateDecl>(D)
- ->getTemplatedDecl())->isInstance();
+ if (const CXXMethodDecl *MD =
+ dyn_cast_or_null<CXXMethodDecl>(D->getAsFunction()))
+ return MD->isInstance();
return false;
}
@@ -1574,7 +1568,9 @@
SourceRange DeclaratorDecl::getSourceRange() const {
SourceLocation RangeEnd = getLocation();
if (TypeSourceInfo *TInfo = getTypeSourceInfo()) {
- if (typeIsPostfix(TInfo->getType()))
+ // If the declaration has no name or the type extends past the name take the
+ // end location of the type.
+ if (!getDeclName() || typeIsPostfix(TInfo->getType()))
RangeEnd = TInfo->getTypeLoc().getSourceRange().getEnd();
}
return SourceRange(getOuterLocStart(), RangeEnd);
@@ -1624,8 +1620,10 @@
SourceLocation IdLoc, IdentifierInfo *Id, QualType T,
TypeSourceInfo *TInfo, StorageClass SC)
: DeclaratorDecl(DK, DC, IdLoc, Id, T, TInfo, StartLoc), Init() {
- assert(sizeof(VarDeclBitfields) <= sizeof(unsigned));
- assert(sizeof(ParmVarDeclBitfields) <= sizeof(unsigned));
+ static_assert(sizeof(VarDeclBitfields) <= sizeof(unsigned),
+ "VarDeclBitfields too large!");
+ static_assert(sizeof(ParmVarDeclBitfields) <= sizeof(unsigned),
+ "ParmVarDeclBitfields too large!");
AllBits = 0;
VarDeclBits.SClass = SC;
// Everything else is implicitly initialized to false.
@@ -1635,13 +1633,12 @@
SourceLocation StartL, SourceLocation IdL,
IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo,
StorageClass S) {
- return new (C) VarDecl(Var, DC, StartL, IdL, Id, T, TInfo, S);
+ return new (C, DC) VarDecl(Var, DC, StartL, IdL, Id, T, TInfo, S);
}
VarDecl *VarDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(VarDecl));
- return new (Mem) VarDecl(Var, 0, SourceLocation(), SourceLocation(), 0,
- QualType(), 0, SC_None);
+ return new (C, ID) VarDecl(Var, 0, SourceLocation(), SourceLocation(), 0,
+ QualType(), 0, SC_None);
}
void VarDecl::setStorageClass(StorageClass SC) {
@@ -1775,7 +1772,7 @@
// A declaration directly contained in a linkage-specification is treated
// as if it contains the extern specifier for the purpose of determining
// the linkage of the declared name and whether it is a definition.
- if (isSingleLineExternC(*this))
+ if (isSingleLineLanguageLinkage(*this))
return DeclarationOnly;
// C99 6.9.2p2:
@@ -1798,23 +1795,21 @@
VarDecl *LastTentative = 0;
VarDecl *First = getFirstDecl();
- for (redecl_iterator I = First->redecls_begin(), E = First->redecls_end();
- I != E; ++I) {
- Kind = (*I)->isThisDeclarationADefinition();
+ for (auto I : First->redecls()) {
+ Kind = I->isThisDeclarationADefinition();
if (Kind == Definition)
return 0;
else if (Kind == TentativeDefinition)
- LastTentative = *I;
+ LastTentative = I;
}
return LastTentative;
}
VarDecl *VarDecl::getDefinition(ASTContext &C) {
VarDecl *First = getFirstDecl();
- for (redecl_iterator I = First->redecls_begin(), E = First->redecls_end();
- I != E; ++I) {
- if ((*I)->isThisDeclarationADefinition(C) == Definition)
- return *I;
+ for (auto I : First->redecls()) {
+ if (I->isThisDeclarationADefinition(C) == Definition)
+ return I;
}
return 0;
}
@@ -1823,9 +1818,8 @@
DefinitionKind Kind = DeclarationOnly;
const VarDecl *First = getFirstDecl();
- for (redecl_iterator I = First->redecls_begin(), E = First->redecls_end();
- I != E; ++I) {
- Kind = std::max(Kind, (*I)->isThisDeclarationADefinition(C));
+ for (auto I : First->redecls()) {
+ Kind = std::max(Kind, I->isThisDeclarationADefinition(C));
if (Kind == Definition)
break;
}
@@ -1834,13 +1828,11 @@
}
const Expr *VarDecl::getAnyInitializer(const VarDecl *&D) const {
- redecl_iterator I = redecls_begin(), E = redecls_end();
- while (I != E && !I->getInit())
- ++I;
-
- if (I != E) {
- D = *I;
- return I->getInit();
+ for (auto I : redecls()) {
+ if (auto Expr = I->getInit()) {
+ D = I;
+ return Expr;
+ }
}
return 0;
}
@@ -1865,10 +1857,9 @@
if (!isStaticDataMember())
return 0;
- for (VarDecl::redecl_iterator RD = redecls_begin(), RDEnd = redecls_end();
- RD != RDEnd; ++RD) {
+ for (auto RD : redecls()) {
if (RD->getLexicalDeclContext()->isFileContext())
- return *RD;
+ return RD;
}
return 0;
@@ -2108,8 +2099,8 @@
SourceLocation IdLoc, IdentifierInfo *Id,
QualType T, TypeSourceInfo *TInfo,
StorageClass S, Expr *DefArg) {
- return new (C) ParmVarDecl(ParmVar, DC, StartLoc, IdLoc, Id, T, TInfo,
- S, DefArg);
+ return new (C, DC) ParmVarDecl(ParmVar, DC, StartLoc, IdLoc, Id, T, TInfo,
+ S, DefArg);
}
QualType ParmVarDecl::getOriginalType() const {
@@ -2121,9 +2112,8 @@
}
ParmVarDecl *ParmVarDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ParmVarDecl));
- return new (Mem) ParmVarDecl(ParmVar, 0, SourceLocation(), SourceLocation(),
- 0, QualType(), 0, SC_None, 0);
+ return new (C, ID) ParmVarDecl(ParmVar, 0, SourceLocation(), SourceLocation(),
+ 0, QualType(), 0, SC_None, 0);
}
SourceRange ParmVarDecl::getSourceRange() const {
@@ -2196,9 +2186,9 @@
}
bool FunctionDecl::hasBody(const FunctionDecl *&Definition) const {
- for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I) {
+ for (auto I : redecls()) {
if (I->Body || I->IsLateTemplateParsed) {
- Definition = *I;
+ Definition = I;
return true;
}
}
@@ -2221,10 +2211,10 @@
}
bool FunctionDecl::isDefined(const FunctionDecl *&Definition) const {
- for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I) {
+ for (auto I : redecls()) {
if (I->IsDeleted || I->IsDefaulted || I->Body || I->IsLateTemplateParsed ||
I->hasAttr<AliasAttr>()) {
- Definition = I->IsDeleted ? I->getCanonicalDecl() : *I;
+ Definition = I->IsDeleted ? I->getCanonicalDecl() : I;
return true;
}
}
@@ -2303,11 +2293,12 @@
getDeclName().getCXXOverloadedOperator() == OO_Array_New ||
getDeclName().getCXXOverloadedOperator() == OO_Array_Delete);
- if (isa<CXXRecordDecl>(getDeclContext())) return false;
- assert(getDeclContext()->getRedeclContext()->isTranslationUnit());
+ if (!getDeclContext()->getRedeclContext()->isTranslationUnit())
+ return false;
const FunctionProtoType *proto = getType()->castAs<FunctionProtoType>();
- if (proto->getNumArgs() != 2 || proto->isVariadic()) return false;
+ if (proto->getNumParams() != 2 || proto->isVariadic())
+ return false;
ASTContext &Context =
cast<TranslationUnitDecl>(getDeclContext()->getRedeclContext())
@@ -2315,7 +2306,7 @@
// The result type and first argument type are constant across all
// these operators. The second argument must be exactly void*.
- return (proto->getArgType(1).getCanonicalType() == Context.VoidPtrTy);
+ return (proto->getParamType(1).getCanonicalType() == Context.VoidPtrTy);
}
static bool isNamespaceStd(const DeclContext *DC) {
@@ -2335,20 +2326,23 @@
if (isa<CXXRecordDecl>(getDeclContext()))
return false;
- assert(getDeclContext()->getRedeclContext()->isTranslationUnit());
+
+ // This can only fail for an invalid 'operator new' declaration.
+ if (!getDeclContext()->getRedeclContext()->isTranslationUnit())
+ return false;
const FunctionProtoType *FPT = getType()->castAs<FunctionProtoType>();
- if (FPT->getNumArgs() > 2 || FPT->isVariadic())
+ if (FPT->getNumParams() > 2 || FPT->isVariadic())
return false;
// If this is a single-parameter function, it must be a replaceable global
// allocation or deallocation function.
- if (FPT->getNumArgs() == 1)
+ if (FPT->getNumParams() == 1)
return true;
// Otherwise, we're looking for a second parameter whose type is
// 'const std::nothrow_t &', or, in C++1y, 'std::size_t'.
- QualType Ty = FPT->getArgType(1);
+ QualType Ty = FPT->getParamType(1);
ASTContext &Ctx = getASTContext();
if (Ctx.getLangOpts().SizedDeallocation &&
Ctx.hasSameType(Ty, Ctx.getSizeType()))
@@ -2376,10 +2370,12 @@
return 0;
if (isa<CXXRecordDecl>(getDeclContext()))
return 0;
- assert(getDeclContext()->getRedeclContext()->isTranslationUnit());
+
+ if (!getDeclContext()->getRedeclContext()->isTranslationUnit())
+ return 0;
if (getNumParams() != 2 || isVariadic() ||
- !Ctx.hasSameType(getType()->castAs<FunctionProtoType>()->getArgType(1),
+ !Ctx.hasSameType(getType()->castAs<FunctionProtoType>()->getParamType(1),
Ctx.getSizeType()))
return 0;
@@ -2488,7 +2484,7 @@
// If the function is marked "overloadable", it has a different mangled name
// and is not the C library function.
- if (getAttr<OverloadableAttr>())
+ if (hasAttr<OverloadableAttr>())
return 0;
if (!Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))
@@ -2510,11 +2506,8 @@
/// based on its FunctionType. This is the length of the ParamInfo array
/// after it has been created.
unsigned FunctionDecl::getNumParams() const {
- const FunctionType *FT = getType()->castAs<FunctionType>();
- if (isa<FunctionNoProtoType>(FT))
- return 0;
- return cast<FunctionProtoType>(FT)->getNumArgs();
-
+ const FunctionProtoType *FPT = getType()->getAs<FunctionProtoType>();
+ return FPT ? FPT->getNumParams() : 0;
}
void FunctionDecl::setParams(ASTContext &C,
@@ -2542,38 +2535,49 @@
/// getMinRequiredArguments - Returns the minimum number of arguments
/// needed to call this function. This may be fewer than the number of
/// function parameters, if some of the parameters have default
-/// arguments (in C++) or the last parameter is a parameter pack.
+/// arguments (in C++) or are parameter packs (C++11).
unsigned FunctionDecl::getMinRequiredArguments() const {
if (!getASTContext().getLangOpts().CPlusPlus)
return getNumParams();
-
- unsigned NumRequiredArgs = getNumParams();
-
- // If the last parameter is a parameter pack, we don't need an argument for
- // it.
- if (NumRequiredArgs > 0 &&
- getParamDecl(NumRequiredArgs - 1)->isParameterPack())
- --NumRequiredArgs;
-
- // If this parameter has a default argument, we don't need an argument for
- // it.
- while (NumRequiredArgs > 0 &&
- getParamDecl(NumRequiredArgs-1)->hasDefaultArg())
- --NumRequiredArgs;
- // We might have parameter packs before the end. These can't be deduced,
- // but they can still handle multiple arguments.
- unsigned ArgIdx = NumRequiredArgs;
- while (ArgIdx > 0) {
- if (getParamDecl(ArgIdx - 1)->isParameterPack())
- NumRequiredArgs = ArgIdx;
-
- --ArgIdx;
- }
-
+ unsigned NumRequiredArgs = 0;
+ for (auto *Param : params())
+ if (!Param->isParameterPack() && !Param->hasDefaultArg())
+ ++NumRequiredArgs;
return NumRequiredArgs;
}
+/// \brief The combination of the extern and inline keywords under MSVC forces
+/// the function to be required.
+///
+/// Note: This function assumes that we will only get called when isInlined()
+/// would return true for this FunctionDecl.
+bool FunctionDecl::isMSExternInline() const {
+ assert(isInlined() && "expected to get called on an inlined function!");
+
+ const ASTContext &Context = getASTContext();
+ if (!Context.getLangOpts().MSVCCompat)
+ return false;
+
+ for (const FunctionDecl *FD = this; FD; FD = FD->getPreviousDecl())
+ if (FD->getStorageClass() == SC_Extern)
+ return true;
+
+ return false;
+}
+
+static bool redeclForcesDefMSVC(const FunctionDecl *Redecl) {
+ if (Redecl->getStorageClass() != SC_Extern)
+ return false;
+
+ for (const FunctionDecl *FD = Redecl->getPreviousDecl(); FD;
+ FD = FD->getPreviousDecl())
+ if (FD->getStorageClass() == SC_Extern)
+ return false;
+
+ return true;
+}
+
static bool RedeclForcesDefC99(const FunctionDecl *Redecl) {
// Only consider file-scope declarations in this test.
if (!Redecl->getLexicalDeclContext()->isTranslationUnit())
@@ -2593,7 +2597,7 @@
/// \brief For a function declaration in C or C++, determine whether this
/// declaration causes the definition to be externally visible.
///
-/// Specifically, this determines if adding the current declaration to the set
+/// For instance, this determines if adding the current declaration to the set
/// of redeclarations of the given functions causes
/// isInlineDefinitionExternallyVisible to change from false to true.
bool FunctionDecl::doesDeclarationForceExternallyVisibleDefinition() const {
@@ -2602,6 +2606,13 @@
ASTContext &Context = getASTContext();
+ if (Context.getLangOpts().MSVCCompat) {
+ const FunctionDecl *Definition;
+ if (hasBody(Definition) && Definition->isInlined() &&
+ redeclForcesDefMSVC(this))
+ return true;
+ }
+
if (Context.getLangOpts().GNUInline || hasAttr<GNUInlineAttr>()) {
// With GNU inlining, a declaration with 'inline' but not 'extern', forces
// an externally visible definition.
@@ -2683,9 +2694,7 @@
// If any declaration is 'inline' but not 'extern', then this definition
// is externally visible.
- for (redecl_iterator Redecl = redecls_begin(), RedeclEnd = redecls_end();
- Redecl != RedeclEnd;
- ++Redecl) {
+ for (auto Redecl : redecls()) {
if (Redecl->isInlineSpecified() &&
Redecl->getStorageClass() != SC_Extern)
return true;
@@ -2702,10 +2711,8 @@
// [...] If all of the file scope declarations for a function in a
// translation unit include the inline function specifier without extern,
// then the definition in that translation unit is an inline definition.
- for (redecl_iterator Redecl = redecls_begin(), RedeclEnd = redecls_end();
- Redecl != RedeclEnd;
- ++Redecl) {
- if (RedeclForcesDefC99(*Redecl))
+ for (auto Redecl : redecls()) {
+ if (RedeclForcesDefC99(Redecl))
return true;
}
@@ -2824,14 +2831,34 @@
// Handle class scope explicit specialization special case.
if (getTemplateSpecializationKind() == TSK_ExplicitSpecialization)
return getClassScopeSpecializationPattern();
+
+ // If this is a generic lambda call operator specialization, its
+ // instantiation pattern is always its primary template's pattern
+ // even if its primary template was instantiated from another
+ // member template (which happens with nested generic lambdas).
+ // Since a lambda's call operator's body is transformed eagerly,
+ // we don't have to go hunting for a prototype definition template
+ // (i.e. instantiated-from-member-template) to use as an instantiation
+ // pattern.
+ if (isGenericLambdaCallOperatorSpecialization(
+ dyn_cast<CXXMethodDecl>(this))) {
+ assert(getPrimaryTemplate() && "A generic lambda specialization must be "
+ "generated from a primary call operator "
+ "template");
+ assert(getPrimaryTemplate()->getTemplatedDecl()->getBody() &&
+ "A generic lambda call operator template must always have a body - "
+ "even if instantiated from a prototype (i.e. as written) member "
+ "template");
+ return getPrimaryTemplate()->getTemplatedDecl();
+ }
+
if (FunctionTemplateDecl *Primary = getPrimaryTemplate()) {
while (Primary->getInstantiatedFromMemberTemplate()) {
// If we have hit a point where the user provided a specialization of
// this template, we're done looking.
if (Primary->isMemberSpecialization())
break;
-
Primary = Primary->getInstantiatedFromMemberTemplate();
}
@@ -3101,14 +3128,13 @@
IdentifierInfo *Id, QualType T,
TypeSourceInfo *TInfo, Expr *BW, bool Mutable,
InClassInitStyle InitStyle) {
- return new (C) FieldDecl(Decl::Field, DC, StartLoc, IdLoc, Id, T, TInfo,
- BW, Mutable, InitStyle);
+ return new (C, DC) FieldDecl(Decl::Field, DC, StartLoc, IdLoc, Id, T, TInfo,
+ BW, Mutable, InitStyle);
}
FieldDecl *FieldDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FieldDecl));
- return new (Mem) FieldDecl(Field, 0, SourceLocation(), SourceLocation(),
- 0, QualType(), 0, 0, false, ICIS_NoInit);
+ return new (C, ID) FieldDecl(Field, 0, SourceLocation(), SourceLocation(),
+ 0, QualType(), 0, 0, false, ICIS_NoInit);
}
bool FieldDecl::isAnonymousStructOrUnion() const {
@@ -3191,8 +3217,8 @@
if (CXXRecordDecl *D = dyn_cast<CXXRecordDecl>(this)) {
struct CXXRecordDecl::DefinitionData *Data =
new (getASTContext()) struct CXXRecordDecl::DefinitionData(D);
- for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I)
- cast<CXXRecordDecl>(*I)->DefinitionData = Data;
+ for (auto I : redecls())
+ cast<CXXRecordDecl>(I)->DefinitionData = Data;
}
}
@@ -3224,10 +3250,9 @@
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(this))
return CXXRD->getDefinition();
- for (redecl_iterator R = redecls_begin(), REnd = redecls_end();
- R != REnd; ++R)
+ for (auto R : redecls())
if (R->isCompleteDefinition())
- return *R;
+ return R;
return 0;
}
@@ -3275,21 +3300,27 @@
IdentifierInfo *Id,
EnumDecl *PrevDecl, bool IsScoped,
bool IsScopedUsingClassTag, bool IsFixed) {
- EnumDecl *Enum = new (C) EnumDecl(DC, StartLoc, IdLoc, Id, PrevDecl,
- IsScoped, IsScopedUsingClassTag, IsFixed);
+ EnumDecl *Enum = new (C, DC) EnumDecl(DC, StartLoc, IdLoc, Id, PrevDecl,
+ IsScoped, IsScopedUsingClassTag,
+ IsFixed);
Enum->MayHaveOutOfDateDef = C.getLangOpts().Modules;
C.getTypeDeclType(Enum, PrevDecl);
return Enum;
}
EnumDecl *EnumDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(EnumDecl));
- EnumDecl *Enum = new (Mem) EnumDecl(0, SourceLocation(), SourceLocation(),
- 0, 0, false, false, false);
+ EnumDecl *Enum = new (C, ID) EnumDecl(0, SourceLocation(), SourceLocation(),
+ 0, 0, false, false, false);
Enum->MayHaveOutOfDateDef = C.getLangOpts().Modules;
return Enum;
}
+SourceRange EnumDecl::getIntegerTypeRange() const {
+ if (const TypeSourceInfo *TI = getIntegerTypeSourceInfo())
+ return TI->getTypeLoc().getSourceRange();
+ return SourceRange();
+}
+
void EnumDecl::completeDefinition(QualType NewType,
QualType NewPromotionType,
unsigned NumPositiveBits,
@@ -3353,8 +3384,8 @@
RecordDecl *RecordDecl::Create(const ASTContext &C, TagKind TK, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, RecordDecl* PrevDecl) {
- RecordDecl* R = new (C) RecordDecl(Record, TK, DC, StartLoc, IdLoc, Id,
- PrevDecl);
+ RecordDecl* R = new (C, DC) RecordDecl(Record, TK, DC, StartLoc, IdLoc, Id,
+ PrevDecl);
R->MayHaveOutOfDateDef = C.getLangOpts().Modules;
C.getTypeDeclType(R, PrevDecl);
@@ -3362,9 +3393,8 @@
}
RecordDecl *RecordDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(RecordDecl));
- RecordDecl *R = new (Mem) RecordDecl(Record, TTK_Struct, 0, SourceLocation(),
- SourceLocation(), 0, 0);
+ RecordDecl *R = new (C, ID) RecordDecl(Record, TTK_Struct, 0, SourceLocation(),
+ SourceLocation(), 0, 0);
R->MayHaveOutOfDateDef = C.getLangOpts().Modules;
return R;
}
@@ -3427,7 +3457,7 @@
if (Decls.empty())
return;
- llvm::tie(FirstDecl, LastDecl) = BuildDeclChain(Decls,
+ std::tie(FirstDecl, LastDecl) = BuildDeclChain(Decls,
/*FieldsAlreadyLoaded=*/false);
}
@@ -3469,10 +3499,9 @@
}
bool BlockDecl::capturesVariable(const VarDecl *variable) const {
- for (capture_const_iterator
- i = capture_begin(), e = capture_end(); i != e; ++i)
+ for (const auto &I : captures())
// Only auto vars can be captured, so no redeclaration worries.
- if (i->getVariable() == variable)
+ if (I.getVariable() == variable)
return true;
return false;
@@ -3489,33 +3518,32 @@
void TranslationUnitDecl::anchor() { }
TranslationUnitDecl *TranslationUnitDecl::Create(ASTContext &C) {
- return new (C) TranslationUnitDecl(C);
+ return new (C, (DeclContext*)0) TranslationUnitDecl(C);
}
void LabelDecl::anchor() { }
LabelDecl *LabelDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation IdentL, IdentifierInfo *II) {
- return new (C) LabelDecl(DC, IdentL, II, 0, IdentL);
+ return new (C, DC) LabelDecl(DC, IdentL, II, 0, IdentL);
}
LabelDecl *LabelDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation IdentL, IdentifierInfo *II,
SourceLocation GnuLabelL) {
assert(GnuLabelL != IdentL && "Use this only for GNU local labels");
- return new (C) LabelDecl(DC, IdentL, II, 0, GnuLabelL);
+ return new (C, DC) LabelDecl(DC, IdentL, II, 0, GnuLabelL);
}
LabelDecl *LabelDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(LabelDecl));
- return new (Mem) LabelDecl(0, SourceLocation(), 0, 0, SourceLocation());
+ return new (C, ID) LabelDecl(0, SourceLocation(), 0, 0, SourceLocation());
}
void ValueDecl::anchor() { }
bool ValueDecl::isWeak() const {
- for (attr_iterator I = attr_begin(), E = attr_end(); I != E; ++I)
- if (isa<WeakAttr>(*I) || isa<WeakRefAttr>(*I))
+ for (const auto *I : attrs())
+ if (isa<WeakAttr>(I) || isa<WeakRefAttr>(I))
return true;
return isWeakImported();
@@ -3527,13 +3555,12 @@
SourceLocation IdLoc,
IdentifierInfo *Id,
QualType Type) {
- return new (C) ImplicitParamDecl(DC, IdLoc, Id, Type);
+ return new (C, DC) ImplicitParamDecl(DC, IdLoc, Id, Type);
}
-ImplicitParamDecl *ImplicitParamDecl::CreateDeserialized(ASTContext &C,
+ImplicitParamDecl *ImplicitParamDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ImplicitParamDecl));
- return new (Mem) ImplicitParamDecl(0, SourceLocation(), 0, QualType());
+ return new (C, ID) ImplicitParamDecl(0, SourceLocation(), 0, QualType());
}
FunctionDecl *FunctionDecl::Create(ASTContext &C, DeclContext *DC,
@@ -3541,66 +3568,53 @@
const DeclarationNameInfo &NameInfo,
QualType T, TypeSourceInfo *TInfo,
StorageClass SC,
- bool isInlineSpecified,
+ bool isInlineSpecified,
bool hasWrittenPrototype,
bool isConstexprSpecified) {
- FunctionDecl *New = new (C) FunctionDecl(Function, DC, StartLoc, NameInfo,
- T, TInfo, SC,
- isInlineSpecified,
- isConstexprSpecified);
+ FunctionDecl *New =
+ new (C, DC) FunctionDecl(Function, DC, StartLoc, NameInfo, T, TInfo, SC,
+ isInlineSpecified, isConstexprSpecified);
New->HasWrittenPrototype = hasWrittenPrototype;
return New;
}
FunctionDecl *FunctionDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FunctionDecl));
- return new (Mem) FunctionDecl(Function, 0, SourceLocation(),
- DeclarationNameInfo(), QualType(), 0,
- SC_None, false, false);
+ return new (C, ID) FunctionDecl(Function, 0, SourceLocation(),
+ DeclarationNameInfo(), QualType(), 0,
+ SC_None, false, false);
}
BlockDecl *BlockDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L) {
- return new (C) BlockDecl(DC, L);
+ return new (C, DC) BlockDecl(DC, L);
}
BlockDecl *BlockDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(BlockDecl));
- return new (Mem) BlockDecl(0, SourceLocation());
-}
-
-MSPropertyDecl *MSPropertyDecl::CreateDeserialized(ASTContext &C,
- unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(MSPropertyDecl));
- return new (Mem) MSPropertyDecl(0, SourceLocation(), DeclarationName(),
- QualType(), 0, SourceLocation(),
- 0, 0);
+ return new (C, ID) BlockDecl(0, SourceLocation());
}
CapturedDecl *CapturedDecl::Create(ASTContext &C, DeclContext *DC,
unsigned NumParams) {
- unsigned Size = sizeof(CapturedDecl) + NumParams * sizeof(ImplicitParamDecl*);
- return new (C.Allocate(Size)) CapturedDecl(DC, NumParams);
+ return new (C, DC, NumParams * sizeof(ImplicitParamDecl *))
+ CapturedDecl(DC, NumParams);
}
CapturedDecl *CapturedDecl::CreateDeserialized(ASTContext &C, unsigned ID,
- unsigned NumParams) {
- unsigned Size = sizeof(CapturedDecl) + NumParams * sizeof(ImplicitParamDecl*);
- void *Mem = AllocateDeserializedDecl(C, ID, Size);
- return new (Mem) CapturedDecl(0, NumParams);
+ unsigned NumParams) {
+ return new (C, ID, NumParams * sizeof(ImplicitParamDecl *))
+ CapturedDecl(0, NumParams);
}
EnumConstantDecl *EnumConstantDecl::Create(ASTContext &C, EnumDecl *CD,
SourceLocation L,
IdentifierInfo *Id, QualType T,
Expr *E, const llvm::APSInt &V) {
- return new (C) EnumConstantDecl(CD, L, Id, T, E, V);
+ return new (C, CD) EnumConstantDecl(CD, L, Id, T, E, V);
}
EnumConstantDecl *
EnumConstantDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(EnumConstantDecl));
- return new (Mem) EnumConstantDecl(0, SourceLocation(), 0, QualType(), 0,
- llvm::APSInt());
+ return new (C, ID) EnumConstantDecl(0, SourceLocation(), 0, QualType(), 0,
+ llvm::APSInt());
}
void IndirectFieldDecl::anchor() { }
@@ -3609,14 +3623,13 @@
IndirectFieldDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L,
IdentifierInfo *Id, QualType T, NamedDecl **CH,
unsigned CHS) {
- return new (C) IndirectFieldDecl(DC, L, Id, T, CH, CHS);
+ return new (C, DC) IndirectFieldDecl(DC, L, Id, T, CH, CHS);
}
IndirectFieldDecl *IndirectFieldDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(IndirectFieldDecl));
- return new (Mem) IndirectFieldDecl(0, SourceLocation(), DeclarationName(),
- QualType(), 0, 0);
+ return new (C, ID) IndirectFieldDecl(0, SourceLocation(), DeclarationName(),
+ QualType(), 0, 0);
}
SourceRange EnumConstantDecl::getSourceRange() const {
@@ -3631,26 +3644,24 @@
TypedefDecl *TypedefDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, TypeSourceInfo *TInfo) {
- return new (C) TypedefDecl(DC, StartLoc, IdLoc, Id, TInfo);
+ return new (C, DC) TypedefDecl(DC, StartLoc, IdLoc, Id, TInfo);
}
void TypedefNameDecl::anchor() { }
TypedefDecl *TypedefDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TypedefDecl));
- return new (Mem) TypedefDecl(0, SourceLocation(), SourceLocation(), 0, 0);
+ return new (C, ID) TypedefDecl(0, SourceLocation(), SourceLocation(), 0, 0);
}
TypeAliasDecl *TypeAliasDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
TypeSourceInfo *TInfo) {
- return new (C) TypeAliasDecl(DC, StartLoc, IdLoc, Id, TInfo);
+ return new (C, DC) TypeAliasDecl(DC, StartLoc, IdLoc, Id, TInfo);
}
TypeAliasDecl *TypeAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TypeAliasDecl));
- return new (Mem) TypeAliasDecl(0, SourceLocation(), SourceLocation(), 0, 0);
+ return new (C, ID) TypeAliasDecl(0, SourceLocation(), SourceLocation(), 0, 0);
}
SourceRange TypedefDecl::getSourceRange() const {
@@ -3675,24 +3686,22 @@
StringLiteral *Str,
SourceLocation AsmLoc,
SourceLocation RParenLoc) {
- return new (C) FileScopeAsmDecl(DC, Str, AsmLoc, RParenLoc);
+ return new (C, DC) FileScopeAsmDecl(DC, Str, AsmLoc, RParenLoc);
}
-FileScopeAsmDecl *FileScopeAsmDecl::CreateDeserialized(ASTContext &C,
+FileScopeAsmDecl *FileScopeAsmDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FileScopeAsmDecl));
- return new (Mem) FileScopeAsmDecl(0, 0, SourceLocation(), SourceLocation());
+ return new (C, ID) FileScopeAsmDecl(0, 0, SourceLocation(), SourceLocation());
}
void EmptyDecl::anchor() {}
EmptyDecl *EmptyDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L) {
- return new (C) EmptyDecl(DC, L);
+ return new (C, DC) EmptyDecl(DC, L);
}
EmptyDecl *EmptyDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(EmptyDecl));
- return new (Mem) EmptyDecl(0, SourceLocation());
+ return new (C, ID) EmptyDecl(0, SourceLocation());
}
//===----------------------------------------------------------------------===//
@@ -3730,30 +3739,28 @@
*reinterpret_cast<SourceLocation *>(this + 1) = EndLoc;
}
-ImportDecl *ImportDecl::Create(ASTContext &C, DeclContext *DC,
+ImportDecl *ImportDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, Module *Imported,
ArrayRef<SourceLocation> IdentifierLocs) {
- void *Mem = C.Allocate(sizeof(ImportDecl) +
- IdentifierLocs.size() * sizeof(SourceLocation));
- return new (Mem) ImportDecl(DC, StartLoc, Imported, IdentifierLocs);
+ return new (C, DC, IdentifierLocs.size() * sizeof(SourceLocation))
+ ImportDecl(DC, StartLoc, Imported, IdentifierLocs);
}
-ImportDecl *ImportDecl::CreateImplicit(ASTContext &C, DeclContext *DC,
+ImportDecl *ImportDecl::CreateImplicit(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc,
- Module *Imported,
+ Module *Imported,
SourceLocation EndLoc) {
- void *Mem = C.Allocate(sizeof(ImportDecl) + sizeof(SourceLocation));
- ImportDecl *Import = new (Mem) ImportDecl(DC, StartLoc, Imported, EndLoc);
+ ImportDecl *Import =
+ new (C, DC, sizeof(SourceLocation)) ImportDecl(DC, StartLoc,
+ Imported, EndLoc);
Import->setImplicit();
return Import;
}
ImportDecl *ImportDecl::CreateDeserialized(ASTContext &C, unsigned ID,
unsigned NumLocations) {
- void *Mem = AllocateDeserializedDecl(C, ID,
- (sizeof(ImportDecl) +
- NumLocations * sizeof(SourceLocation)));
- return new (Mem) ImportDecl(EmptyShell());
+ return new (C, ID, NumLocations * sizeof(SourceLocation))
+ ImportDecl(EmptyShell());
}
ArrayRef<SourceLocation> ImportDecl::getIdentifierLocs() const {
diff --git a/lib/AST/DeclBase.cpp b/lib/AST/DeclBase.cpp
index 121c5a6..1de1fe2 100644
--- a/lib/AST/DeclBase.cpp
+++ b/lib/AST/DeclBase.cpp
@@ -45,25 +45,30 @@
getASTContext().getExternalSource()->updateOutOfDateIdentifier(II);
}
-void *Decl::AllocateDeserializedDecl(const ASTContext &Context,
- unsigned ID,
- unsigned Size) {
+void *Decl::operator new(std::size_t Size, const ASTContext &Context,
+ unsigned ID, std::size_t Extra) {
// Allocate an extra 8 bytes worth of storage, which ensures that the
// resulting pointer will still be 8-byte aligned.
- void *Start = Context.Allocate(Size + 8);
+ void *Start = Context.Allocate(Size + Extra + 8);
void *Result = (char*)Start + 8;
-
+
unsigned *PrefixPtr = (unsigned *)Result - 2;
-
+
// Zero out the first 4 bytes; this is used to store the owning module ID.
PrefixPtr[0] = 0;
-
+
// Store the global declaration ID in the second 4 bytes.
PrefixPtr[1] = ID;
-
+
return Result;
}
+void *Decl::operator new(std::size_t Size, const ASTContext &Ctx,
+ DeclContext *Parent, std::size_t Extra) {
+ assert(!Parent || &Parent->getParentASTContext() == &Ctx);
+ return ::operator new(Size + Extra, Ctx);
+}
+
Module *Decl::getOwningModuleSlow() const {
assert(isFromASTFile() && "Not from AST file?");
return getASTContext().getExternalSource()->getModule(getOwningModuleID());
@@ -80,6 +85,7 @@
void Decl::setInvalidDecl(bool Invalid) {
InvalidDecl = Invalid;
+ assert(!isa<TagDecl>(this) || !cast<TagDecl>(this)->isCompleteDefinition());
if (Invalid && !isa<ParmVarDecl>(this)) {
// Defensive maneuver for ill-formed code: we're likely not to make it to
// a point where we set the access specifier, so default it to "public"
@@ -153,11 +159,12 @@
return isTemplateParameterPack();
}
-bool Decl::isFunctionOrFunctionTemplate() const {
- if (const UsingShadowDecl *UD = dyn_cast<UsingShadowDecl>(this))
- return UD->getTargetDecl()->isFunctionOrFunctionTemplate();
-
- return isa<FunctionDecl>(this) || isa<FunctionTemplateDecl>(this);
+FunctionDecl *Decl::getAsFunction() {
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(this))
+ return FD;
+ if (const FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(this))
+ return FTD->getTemplatedDecl();
+ return 0;
}
bool Decl::isTemplateDecl() const {
@@ -306,7 +313,7 @@
return true;
// Check redeclarations.
- for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I)
+ for (auto I : redecls())
if (I->Referenced)
return true;
@@ -401,8 +408,8 @@
AvailabilityResult Result = AR_Available;
std::string ResultMessage;
- for (attr_iterator A = attr_begin(), AEnd = attr_end(); A != AEnd; ++A) {
- if (DeprecatedAttr *Deprecated = dyn_cast<DeprecatedAttr>(*A)) {
+ for (const auto *A : attrs()) {
+ if (const auto *Deprecated = dyn_cast<DeprecatedAttr>(A)) {
if (Result >= AR_Deprecated)
continue;
@@ -413,13 +420,13 @@
continue;
}
- if (UnavailableAttr *Unavailable = dyn_cast<UnavailableAttr>(*A)) {
+ if (const auto *Unavailable = dyn_cast<UnavailableAttr>(A)) {
if (Message)
*Message = Unavailable->getMessage();
return AR_Unavailable;
}
- if (AvailabilityAttr *Availability = dyn_cast<AvailabilityAttr>(*A)) {
+ if (const auto *Availability = dyn_cast<AvailabilityAttr>(A)) {
AvailabilityResult AR = CheckAvailability(getASTContext(), Availability,
Message);
@@ -475,11 +482,11 @@
if (!canBeWeakImported(IsDefinition))
return false;
- for (attr_iterator A = attr_begin(), AEnd = attr_end(); A != AEnd; ++A) {
- if (isa<WeakImportAttr>(*A))
+ for (const auto *A : attrs()) {
+ if (isa<WeakImportAttr>(A))
return true;
- if (AvailabilityAttr *Availability = dyn_cast<AvailabilityAttr>(*A)) {
+ if (const auto *Availability = dyn_cast<AvailabilityAttr>(A)) {
if (CheckAvailability(getASTContext(), Availability, 0)
== AR_NotYetIntroduced)
return true;
@@ -662,7 +669,7 @@
return SourceLocation();
}
-void Decl::CheckAccessDeclContext() const {
+bool Decl::AccessDeclContextSanity() const {
#ifndef NDEBUG
// Suppress this check if any of the following hold:
// 1. this is the translation unit (and thus has no parent)
@@ -684,16 +691,35 @@
// AS_none as access specifier.
isa<CXXRecordDecl>(this) ||
isa<ClassScopeFunctionSpecializationDecl>(this))
- return;
+ return true;
assert(Access != AS_none &&
"Access specifier is AS_none inside a record decl");
#endif
+ return true;
}
static Decl::Kind getKind(const Decl *D) { return D->getKind(); }
static Decl::Kind getKind(const DeclContext *DC) { return DC->getDeclKind(); }
+const FunctionType *Decl::getFunctionType(bool BlocksToo) const {
+ QualType Ty;
+ if (const ValueDecl *D = dyn_cast<ValueDecl>(this))
+ Ty = D->getType();
+ else if (const TypedefNameDecl *D = dyn_cast<TypedefNameDecl>(this))
+ Ty = D->getUnderlyingType();
+ else
+ return 0;
+
+ if (Ty->isFunctionPointerType())
+ Ty = Ty->getAs<PointerType>()->getPointeeType();
+ else if (BlocksToo && Ty->isBlockPointerType())
+ Ty = Ty->getAs<BlockPointerType>()->getPointeeType();
+
+ return Ty->getAs<FunctionType>();
+}
+
+
/// Starting at a given context (a Decl or DeclContext), look for a
/// code context that is not a closure (a lambda, block, etc.).
template <class T> static Decl *getNonClosureContext(T *D) {
@@ -939,13 +965,12 @@
/// \brief We have just acquired external visible storage, and we already have
/// built a lookup map. For every name in the map, pull in the new names from
/// the external storage.
-void DeclContext::reconcileExternalVisibleStorage() {
+void DeclContext::reconcileExternalVisibleStorage() const {
assert(NeedToReconcileExternalVisibleStorage && LookupPtr.getPointer());
NeedToReconcileExternalVisibleStorage = false;
- StoredDeclsMap &Map = *LookupPtr.getPointer();
- for (StoredDeclsMap::iterator I = Map.begin(); I != Map.end(); ++I)
- I->second.setHasExternalDecls();
+ for (auto &Lookup : *LookupPtr.getPointer())
+ Lookup.second.setHasExternalDecls();
}
/// \brief Load the declarations within this lexical storage from an
@@ -982,8 +1007,8 @@
// Splice the newly-read declarations into the beginning of the list
// of declarations.
Decl *ExternalFirst, *ExternalLast;
- llvm::tie(ExternalFirst, ExternalLast) = BuildDeclChain(Decls,
- FieldsAlreadyLoaded);
+ std::tie(ExternalFirst, ExternalLast) =
+ BuildDeclChain(Decls, FieldsAlreadyLoaded);
ExternalLast->NextInContextAndBits.setPointer(FirstDecl);
FirstDecl = ExternalFirst;
if (!LastDecl)
@@ -997,6 +1022,8 @@
StoredDeclsMap *Map;
if (!(Map = DC->LookupPtr.getPointer()))
Map = DC->CreateStoredDeclsMap(Context);
+ if (DC->NeedToReconcileExternalVisibleStorage)
+ DC->reconcileExternalVisibleStorage();
(*Map)[Name].removeExternalDecls();
@@ -1011,6 +1038,8 @@
StoredDeclsMap *Map;
if (!(Map = DC->LookupPtr.getPointer()))
Map = DC->CreateStoredDeclsMap(Context);
+ if (DC->NeedToReconcileExternalVisibleStorage)
+ DC->reconcileExternalVisibleStorage();
StoredDeclsList &List = (*Map)[Name];
@@ -1050,14 +1079,9 @@
return List.getLookupResult();
}
-DeclContext::decl_iterator DeclContext::noload_decls_begin() const {
- return decl_iterator(FirstDecl);
-}
-
DeclContext::decl_iterator DeclContext::decls_begin() const {
if (hasExternalLexicalStorage())
LoadLexicalDeclsFromExternalStorage();
-
return decl_iterator(FirstDecl);
}
@@ -1187,6 +1211,10 @@
/// buildLookup - Build the lookup data structure with all of the
/// declarations in this DeclContext (and any other contexts linked
/// to it or transparent contexts nested within it) and return it.
+///
+/// Note that the produced map may miss out declarations from an
+/// external source. If it does, those entries will be marked with
+/// the 'hasExternalDecls' flag.
StoredDeclsMap *DeclContext::buildLookup() {
assert(this == getPrimaryContext() && "buildLookup called on non-primary DC");
@@ -1202,7 +1230,6 @@
// We no longer have any lazy decls.
LookupPtr.setInt(false);
- NeedToReconcileExternalVisibleStorage = false;
return LookupPtr.getPointer();
}
@@ -1251,11 +1278,13 @@
return PrimaryContext->lookup(Name);
if (hasExternalVisibleStorage()) {
+ if (NeedToReconcileExternalVisibleStorage)
+ reconcileExternalVisibleStorage();
+
StoredDeclsMap *Map = LookupPtr.getPointer();
+
if (LookupPtr.getInt())
Map = buildLookup();
- else if (NeedToReconcileExternalVisibleStorage)
- reconcileExternalVisibleStorage();
if (!Map)
Map = CreateStoredDeclsMap(getParentASTContext());
@@ -1267,7 +1296,7 @@
return R.first->second.getLookupResult();
ExternalASTSource *Source = getParentASTContext().getExternalSource();
- if (Source->FindExternalVisibleDeclsByName(this, Name) || R.second) {
+ if (Source->FindExternalVisibleDeclsByName(this, Name) || !R.second) {
if (StoredDeclsMap *Map = LookupPtr.getPointer()) {
StoredDeclsMap::iterator I = Map->find(Name);
if (I != Map->end())
@@ -1502,13 +1531,13 @@
/// Returns iterator range [First, Last) of UsingDirectiveDecls stored within
/// this context.
-DeclContext::udir_iterator_range
-DeclContext::getUsingDirectives() const {
+DeclContext::udir_range DeclContext::using_directives() const {
// FIXME: Use something more efficient than normal lookup for using
// directives. In C++, using directives are looked up more than anything else.
lookup_const_result Result = lookup(UsingDirectiveDecl::getName());
- return udir_iterator_range(reinterpret_cast<udir_iterator>(Result.begin()),
- reinterpret_cast<udir_iterator>(Result.end()));
+ return udir_range(
+ reinterpret_cast<UsingDirectiveDecl *const *>(Result.begin()),
+ reinterpret_cast<UsingDirectiveDecl *const *>(Result.end()));
}
//===----------------------------------------------------------------------===//
diff --git a/lib/AST/DeclCXX.cpp b/lib/AST/DeclCXX.cpp
index a17abdd..d0ec4b9 100644
--- a/lib/AST/DeclCXX.cpp
+++ b/lib/AST/DeclCXX.cpp
@@ -31,8 +31,7 @@
void AccessSpecDecl::anchor() { }
AccessSpecDecl *AccessSpecDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(AccessSpecDecl));
- return new (Mem) AccessSpecDecl(EmptyShell());
+ return new (C, ID) AccessSpecDecl(EmptyShell());
}
void LazyASTUnresolvedSet::getFromExternalSource(ASTContext &C) const {
@@ -51,7 +50,7 @@
Aggregate(true), PlainOldData(true), Empty(true), Polymorphic(false),
Abstract(false), IsStandardLayout(true), HasNoNonEmptyBases(true),
HasPrivateFields(false), HasProtectedFields(false), HasPublicFields(false),
- HasMutableFields(false), HasOnlyCMembers(true),
+ HasMutableFields(false), HasVariantMembers(false), HasOnlyCMembers(true),
HasInClassInitializer(false), HasUninitializedReferenceMember(false),
NeedOverloadResolutionForMoveConstructor(false),
NeedOverloadResolutionForMoveAssignment(false),
@@ -95,8 +94,8 @@
SourceLocation IdLoc, IdentifierInfo *Id,
CXXRecordDecl* PrevDecl,
bool DelayTypeCreation) {
- CXXRecordDecl* R = new (C) CXXRecordDecl(CXXRecord, TK, DC, StartLoc, IdLoc,
- Id, PrevDecl);
+ CXXRecordDecl *R = new (C, DC) CXXRecordDecl(CXXRecord, TK, DC, StartLoc,
+ IdLoc, Id, PrevDecl);
R->MayHaveOutOfDateDef = C.getLangOpts().Modules;
// FIXME: DelayTypeCreation seems like such a hack
@@ -109,8 +108,8 @@
TypeSourceInfo *Info, SourceLocation Loc,
bool Dependent, bool IsGeneric,
LambdaCaptureDefault CaptureDefault) {
- CXXRecordDecl* R = new (C) CXXRecordDecl(CXXRecord, TTK_Class, DC, Loc, Loc,
- 0, 0);
+ CXXRecordDecl *R =
+ new (C, DC) CXXRecordDecl(CXXRecord, TTK_Class, DC, Loc, Loc, 0, 0);
R->IsBeingDefined = true;
R->DefinitionData = new (C) struct LambdaDefinitionData(R, Info,
Dependent,
@@ -124,10 +123,8 @@
CXXRecordDecl *
CXXRecordDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXRecordDecl));
- CXXRecordDecl *R = new (Mem) CXXRecordDecl(CXXRecord, TTK_Struct, 0,
- SourceLocation(), SourceLocation(),
- 0, 0);
+ CXXRecordDecl *R = new (C, ID) CXXRecordDecl(
+ CXXRecord, TTK_Struct, 0, SourceLocation(), SourceLocation(), 0, 0);
R->MayHaveOutOfDateDef = false;
return R;
}
@@ -205,19 +202,17 @@
data().HasNonLiteralTypeFieldsOrBases = true;
// Now go through all virtual bases of this base and add them.
- for (CXXRecordDecl::base_class_iterator VBase =
- BaseClassDecl->vbases_begin(),
- E = BaseClassDecl->vbases_end(); VBase != E; ++VBase) {
+ for (const auto &VBase : BaseClassDecl->vbases()) {
// Add this base if it's not already in the list.
- if (SeenVBaseTypes.insert(C.getCanonicalType(VBase->getType()))) {
- VBases.push_back(VBase);
+ if (SeenVBaseTypes.insert(C.getCanonicalType(VBase.getType()))) {
+ VBases.push_back(&VBase);
// C++11 [class.copy]p8:
// The implicitly-declared copy constructor for a class X will have
// the form 'X::X(const X&)' if each [...] virtual base class B of X
// has a copy constructor whose first parameter is of type
// 'const B&' or 'const volatile B&' [...]
- if (CXXRecordDecl *VBaseDecl = VBase->getType()->getAsCXXRecordDecl())
+ if (CXXRecordDecl *VBaseDecl = VBase.getType()->getAsCXXRecordDecl())
if (!VBaseDecl->hasCopyConstructorWithConstParam())
data().ImplicitCopyConstructorHasConstParam = false;
}
@@ -532,8 +527,11 @@
if (CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D)) {
SMKind |= SMF_Destructor;
- if (!DD->isImplicit())
+ if (DD->isUserProvided())
data().HasIrrelevantDestructor = false;
+ // If the destructor is explicitly defaulted and not trivial or not public
+ // or if the destructor is deleted, we clear HasIrrelevantDestructor in
+ // finishedDefaultedOrDeletedMember.
// C++11 [class.dtor]p5:
// A destructor is trivial if [...] the destructor is not virtual.
@@ -656,7 +654,13 @@
// Keep track of the presence of mutable fields.
if (Field->isMutable())
data().HasMutableFields = true;
-
+
+ // C++11 [class.union]p8, DR1460:
+ // If X is a union, a non-static data member of X that is not an anonymous
+ // union is a variant member of X.
+ if (isUnion() && !Field->isAnonymousStructOrUnion())
+ data().HasVariantMembers = true;
+
// C++0x [class]p9:
// A POD struct is a class that is both a trivial class and a
// standard-layout class, and has no non-static data members of type
@@ -692,7 +696,9 @@
if (!T->isLiteralType(Context) || T.isVolatileQualified())
data().HasNonLiteralTypeFieldsOrBases = true;
- if (Field->hasInClassInitializer()) {
+ if (Field->hasInClassInitializer() ||
+ (Field->isAnonymousStructOrUnion() &&
+ Field->getType()->getAsCXXRecordDecl()->hasInClassInitializer())) {
data().HasInClassInitializer = true;
// C++11 [class]p5:
@@ -809,15 +815,13 @@
// Virtual bases and virtual methods make a class non-empty, but they
// also make it non-standard-layout so we needn't check here.
// A non-empty base class may leave the class standard-layout, but not
- // if we have arrived here, and have at least on non-static data
+ // if we have arrived here, and have at least one non-static data
// member. If IsStandardLayout remains true, then the first non-static
// data member must come through here with Empty still true, and Empty
// will subsequently be set to false below.
if (data().IsStandardLayout && data().Empty) {
- for (CXXRecordDecl::base_class_const_iterator BI = bases_begin(),
- BE = bases_end();
- BI != BE; ++BI) {
- if (Context.hasSameUnqualifiedType(BI->getType(), T)) {
+ for (const auto &BI : bases()) {
+ if (Context.hasSameUnqualifiedType(BI.getType(), T)) {
data().IsStandardLayout = false;
break;
}
@@ -862,6 +866,13 @@
if (FieldRec->hasUninitializedReferenceMember() &&
!Field->hasInClassInitializer())
data().HasUninitializedReferenceMember = true;
+
+ // C++11 [class.union]p8, DR1460:
+ // a non-static data member of an anonymous union that is a member of
+ // X is also a variant member of X.
+ if (FieldRec->hasVariantMembers() &&
+ Field->isAnonymousStructOrUnion())
+ data().HasVariantMembers = true;
}
} else {
// Base element type of field is a non-class type.
@@ -928,9 +939,11 @@
else if (Constructor->isConstexpr())
// We may now know that the constructor is constexpr.
data().HasConstexprNonCopyMoveConstructor = true;
- } else if (isa<CXXDestructorDecl>(D))
+ } else if (isa<CXXDestructorDecl>(D)) {
SMKind |= SMF_Destructor;
- else if (D->isCopyAssignmentOperator())
+ if (!D->isTrivial() || D->getAccess() != AS_public || D->isDeleted())
+ data().HasIrrelevantDestructor = false;
+ } else if (D->isCopyAssignmentOperator())
SMKind |= SMF_CopyAssignment;
else if (D->isMoveAssignmentOperator())
SMKind |= SMF_MoveAssignment;
@@ -1018,13 +1031,9 @@
}
static CanQualType GetConversionType(ASTContext &Context, NamedDecl *Conv) {
- QualType T;
- if (isa<UsingShadowDecl>(Conv))
- Conv = cast<UsingShadowDecl>(Conv)->getTargetDecl();
- if (FunctionTemplateDecl *ConvTemp = dyn_cast<FunctionTemplateDecl>(Conv))
- T = ConvTemp->getTemplatedDecl()->getResultType();
- else
- T = cast<CXXConversionDecl>(Conv)->getConversionType();
+ QualType T =
+ cast<CXXConversionDecl>(Conv->getUnderlyingDecl()->getAsFunction())
+ ->getConversionType();
return Context.getCanonicalType(T);
}
@@ -1087,14 +1096,13 @@
}
// Collect information recursively from any base classes.
- for (CXXRecordDecl::base_class_iterator
- I = Record->bases_begin(), E = Record->bases_end(); I != E; ++I) {
- const RecordType *RT = I->getType()->getAs<RecordType>();
+ for (const auto &I : Record->bases()) {
+ const RecordType *RT = I.getType()->getAs<RecordType>();
if (!RT) continue;
AccessSpecifier BaseAccess
- = CXXRecordDecl::MergeAccess(Access, I->getAccessSpecifier());
- bool BaseInVirtual = InVirtual || I->isVirtual();
+ = CXXRecordDecl::MergeAccess(Access, I.getAccessSpecifier());
+ bool BaseInVirtual = InVirtual || I.isVirtual();
CXXRecordDecl *Base = cast<CXXRecordDecl>(RT->getDecl());
CollectVisibleConversions(Context, Base, BaseInVirtual, BaseAccess,
@@ -1130,13 +1138,12 @@
HiddenTypes.insert(GetConversionType(Context, ConvI.getDecl()));
// Recursively collect conversions from base classes.
- for (CXXRecordDecl::base_class_iterator
- I = Record->bases_begin(), E = Record->bases_end(); I != E; ++I) {
- const RecordType *RT = I->getType()->getAs<RecordType>();
+ for (const auto &I : Record->bases()) {
+ const RecordType *RT = I.getType()->getAs<RecordType>();
if (!RT) continue;
CollectVisibleConversions(Context, cast<CXXRecordDecl>(RT->getDecl()),
- I->isVirtual(), I->getAccessSpecifier(),
+ I.isVirtual(), I.getAccessSpecifier(),
HiddenTypes, Output, VBaseCs, HiddenVBaseCs);
}
@@ -1317,11 +1324,9 @@
isDependentContext())
return false;
- for (CXXRecordDecl::base_class_const_iterator B = bases_begin(),
- BEnd = bases_end();
- B != BEnd; ++B) {
+ for (const auto &B : bases()) {
CXXRecordDecl *BaseDecl
- = cast<CXXRecordDecl>(B->getType()->getAs<RecordType>()->getDecl());
+ = cast<CXXRecordDecl>(B.getType()->getAs<RecordType>()->getDecl());
if (BaseDecl->isAbstract())
return true;
}
@@ -1383,9 +1388,8 @@
return MD;
}
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- const RecordType *RT = I->getType()->getAs<RecordType>();
+ for (const auto &I : RD->bases()) {
+ const RecordType *RT = I.getType()->getAs<RecordType>();
if (!RT)
continue;
const CXXRecordDecl *Base = cast<CXXRecordDecl>(RT->getDecl());
@@ -1404,17 +1408,14 @@
QualType T, TypeSourceInfo *TInfo,
StorageClass SC, bool isInline,
bool isConstexpr, SourceLocation EndLocation) {
- return new (C) CXXMethodDecl(CXXMethod, RD, StartLoc, NameInfo, T, TInfo,
- SC, isInline, isConstexpr,
- EndLocation);
+ return new (C, RD) CXXMethodDecl(CXXMethod, RD, StartLoc, NameInfo, T, TInfo,
+ SC, isInline, isConstexpr, EndLocation);
}
CXXMethodDecl *CXXMethodDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXMethodDecl));
- return new (Mem) CXXMethodDecl(CXXMethod, 0, SourceLocation(),
- DeclarationNameInfo(), QualType(),
- 0, SC_None, false, false,
- SourceLocation());
+ return new (C, ID) CXXMethodDecl(CXXMethod, 0, SourceLocation(),
+ DeclarationNameInfo(), QualType(), 0,
+ SC_None, false, false, SourceLocation());
}
bool CXXMethodDecl::isUsualDeallocationFunction() const {
@@ -1677,9 +1678,9 @@
CXXConstructorDecl *
CXXConstructorDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXConstructorDecl));
- return new (Mem) CXXConstructorDecl(0, SourceLocation(),DeclarationNameInfo(),
- QualType(), 0, false, false, false,false);
+ return new (C, ID) CXXConstructorDecl(0, SourceLocation(),
+ DeclarationNameInfo(), QualType(),
+ 0, false, false, false, false);
}
CXXConstructorDecl *
@@ -1692,9 +1693,9 @@
assert(NameInfo.getName().getNameKind()
== DeclarationName::CXXConstructorName &&
"Name must refer to a constructor");
- return new (C) CXXConstructorDecl(RD, StartLoc, NameInfo, T, TInfo,
- isExplicit, isInline, isImplicitlyDeclared,
- isConstexpr);
+ return new (C, RD) CXXConstructorDecl(RD, StartLoc, NameInfo, T, TInfo,
+ isExplicit, isInline,
+ isImplicitlyDeclared, isConstexpr);
}
CXXConstructorDecl *CXXConstructorDecl::getTargetConstructor() const {
@@ -1827,9 +1828,8 @@
CXXDestructorDecl *
CXXDestructorDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXDestructorDecl));
- return new (Mem) CXXDestructorDecl(0, SourceLocation(), DeclarationNameInfo(),
- QualType(), 0, false, false);
+ return new (C, ID) CXXDestructorDecl(
+ 0, SourceLocation(), DeclarationNameInfo(), QualType(), 0, false, false);
}
CXXDestructorDecl *
@@ -1841,18 +1841,18 @@
assert(NameInfo.getName().getNameKind()
== DeclarationName::CXXDestructorName &&
"Name must refer to a destructor");
- return new (C) CXXDestructorDecl(RD, StartLoc, NameInfo, T, TInfo, isInline,
- isImplicitlyDeclared);
+ return new (C, RD) CXXDestructorDecl(RD, StartLoc, NameInfo, T, TInfo,
+ isInline, isImplicitlyDeclared);
}
void CXXConversionDecl::anchor() { }
CXXConversionDecl *
CXXConversionDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXConversionDecl));
- return new (Mem) CXXConversionDecl(0, SourceLocation(), DeclarationNameInfo(),
- QualType(), 0, false, false, false,
- SourceLocation());
+ return new (C, ID) CXXConversionDecl(0, SourceLocation(),
+ DeclarationNameInfo(), QualType(),
+ 0, false, false, false,
+ SourceLocation());
}
CXXConversionDecl *
@@ -1865,9 +1865,9 @@
assert(NameInfo.getName().getNameKind()
== DeclarationName::CXXConversionFunctionName &&
"Name must refer to a conversion function");
- return new (C) CXXConversionDecl(RD, StartLoc, NameInfo, T, TInfo,
- isInline, isExplicit, isConstexpr,
- EndLocation);
+ return new (C, RD) CXXConversionDecl(RD, StartLoc, NameInfo, T, TInfo,
+ isInline, isExplicit, isConstexpr,
+ EndLocation);
}
bool CXXConversionDecl::isLambdaToBlockPointerConversion() const {
@@ -1883,13 +1883,13 @@
SourceLocation LangLoc,
LanguageIDs Lang,
bool HasBraces) {
- return new (C) LinkageSpecDecl(DC, ExternLoc, LangLoc, Lang, HasBraces);
+ return new (C, DC) LinkageSpecDecl(DC, ExternLoc, LangLoc, Lang, HasBraces);
}
-LinkageSpecDecl *LinkageSpecDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(LinkageSpecDecl));
- return new (Mem) LinkageSpecDecl(0, SourceLocation(), SourceLocation(),
- lang_c, false);
+LinkageSpecDecl *LinkageSpecDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ return new (C, ID) LinkageSpecDecl(0, SourceLocation(), SourceLocation(),
+ lang_c, false);
}
void UsingDirectiveDecl::anchor() { }
@@ -1903,16 +1903,15 @@
DeclContext *CommonAncestor) {
if (NamespaceDecl *NS = dyn_cast_or_null<NamespaceDecl>(Used))
Used = NS->getOriginalNamespace();
- return new (C) UsingDirectiveDecl(DC, L, NamespaceLoc, QualifierLoc,
- IdentLoc, Used, CommonAncestor);
+ return new (C, DC) UsingDirectiveDecl(DC, L, NamespaceLoc, QualifierLoc,
+ IdentLoc, Used, CommonAncestor);
}
-UsingDirectiveDecl *
-UsingDirectiveDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(UsingDirectiveDecl));
- return new (Mem) UsingDirectiveDecl(0, SourceLocation(), SourceLocation(),
- NestedNameSpecifierLoc(),
- SourceLocation(), 0, 0);
+UsingDirectiveDecl *UsingDirectiveDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ return new (C, ID) UsingDirectiveDecl(0, SourceLocation(), SourceLocation(),
+ NestedNameSpecifierLoc(),
+ SourceLocation(), 0, 0);
}
NamespaceDecl *UsingDirectiveDecl::getNominatedNamespace() {
@@ -1922,8 +1921,6 @@
return cast_or_null<NamespaceDecl>(NominatedNamespace);
}
-void NamespaceDecl::anchor() { }
-
NamespaceDecl::NamespaceDecl(DeclContext *DC, bool Inline,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
@@ -1941,13 +1938,22 @@
bool Inline, SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
NamespaceDecl *PrevDecl) {
- return new (C) NamespaceDecl(DC, Inline, StartLoc, IdLoc, Id, PrevDecl);
+ return new (C, DC) NamespaceDecl(DC, Inline, StartLoc, IdLoc, Id, PrevDecl);
}
NamespaceDecl *NamespaceDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(NamespaceDecl));
- return new (Mem) NamespaceDecl(0, false, SourceLocation(), SourceLocation(),
- 0, 0);
+ return new (C, ID) NamespaceDecl(0, false, SourceLocation(), SourceLocation(),
+ 0, 0);
+}
+
+NamespaceDecl *NamespaceDecl::getNextRedeclaration() {
+ return RedeclLink.getNext();
+}
+NamespaceDecl *NamespaceDecl::getPreviousDeclImpl() {
+ return getPreviousDecl();
+}
+NamespaceDecl *NamespaceDecl::getMostRecentDeclImpl() {
+ return getMostRecentDecl();
}
void NamespaceAliasDecl::anchor() { }
@@ -1961,24 +1967,22 @@
NamedDecl *Namespace) {
if (NamespaceDecl *NS = dyn_cast_or_null<NamespaceDecl>(Namespace))
Namespace = NS->getOriginalNamespace();
- return new (C) NamespaceAliasDecl(DC, UsingLoc, AliasLoc, Alias,
- QualifierLoc, IdentLoc, Namespace);
+ return new (C, DC) NamespaceAliasDecl(DC, UsingLoc, AliasLoc, Alias,
+ QualifierLoc, IdentLoc, Namespace);
}
NamespaceAliasDecl *
NamespaceAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(NamespaceAliasDecl));
- return new (Mem) NamespaceAliasDecl(0, SourceLocation(), SourceLocation(), 0,
- NestedNameSpecifierLoc(),
- SourceLocation(), 0);
+ return new (C, ID) NamespaceAliasDecl(0, SourceLocation(), SourceLocation(),
+ 0, NestedNameSpecifierLoc(),
+ SourceLocation(), 0);
}
void UsingShadowDecl::anchor() { }
UsingShadowDecl *
UsingShadowDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(UsingShadowDecl));
- return new (Mem) UsingShadowDecl(0, SourceLocation(), 0, 0);
+ return new (C, ID) UsingShadowDecl(0, SourceLocation(), 0, 0);
}
UsingDecl *UsingShadowDecl::getUsingDecl() const {
@@ -2026,13 +2030,12 @@
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo,
bool HasTypename) {
- return new (C) UsingDecl(DC, UL, QualifierLoc, NameInfo, HasTypename);
+ return new (C, DC) UsingDecl(DC, UL, QualifierLoc, NameInfo, HasTypename);
}
UsingDecl *UsingDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(UsingDecl));
- return new (Mem) UsingDecl(0, SourceLocation(), NestedNameSpecifierLoc(),
- DeclarationNameInfo(), false);
+ return new (C, ID) UsingDecl(0, SourceLocation(), NestedNameSpecifierLoc(),
+ DeclarationNameInfo(), false);
}
SourceRange UsingDecl::getSourceRange() const {
@@ -2048,15 +2051,14 @@
SourceLocation UsingLoc,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo) {
- return new (C) UnresolvedUsingValueDecl(DC, C.DependentTy, UsingLoc,
- QualifierLoc, NameInfo);
+ return new (C, DC) UnresolvedUsingValueDecl(DC, C.DependentTy, UsingLoc,
+ QualifierLoc, NameInfo);
}
UnresolvedUsingValueDecl *
UnresolvedUsingValueDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(UnresolvedUsingValueDecl));
- return new (Mem) UnresolvedUsingValueDecl(0, QualType(), SourceLocation(),
- NestedNameSpecifierLoc(),
+ return new (C, ID) UnresolvedUsingValueDecl(0, QualType(), SourceLocation(),
+ NestedNameSpecifierLoc(),
DeclarationNameInfo());
}
@@ -2075,20 +2077,16 @@
NestedNameSpecifierLoc QualifierLoc,
SourceLocation TargetNameLoc,
DeclarationName TargetName) {
- return new (C) UnresolvedUsingTypenameDecl(DC, UsingLoc, TypenameLoc,
- QualifierLoc, TargetNameLoc,
- TargetName.getAsIdentifierInfo());
+ return new (C, DC) UnresolvedUsingTypenameDecl(
+ DC, UsingLoc, TypenameLoc, QualifierLoc, TargetNameLoc,
+ TargetName.getAsIdentifierInfo());
}
UnresolvedUsingTypenameDecl *
UnresolvedUsingTypenameDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID,
- sizeof(UnresolvedUsingTypenameDecl));
- return new (Mem) UnresolvedUsingTypenameDecl(0, SourceLocation(),
- SourceLocation(),
- NestedNameSpecifierLoc(),
- SourceLocation(),
- 0);
+ return new (C, ID) UnresolvedUsingTypenameDecl(
+ 0, SourceLocation(), SourceLocation(), NestedNameSpecifierLoc(),
+ SourceLocation(), 0);
}
void StaticAssertDecl::anchor() { }
@@ -2099,15 +2097,29 @@
StringLiteral *Message,
SourceLocation RParenLoc,
bool Failed) {
- return new (C) StaticAssertDecl(DC, StaticAssertLoc, AssertExpr, Message,
- RParenLoc, Failed);
+ return new (C, DC) StaticAssertDecl(DC, StaticAssertLoc, AssertExpr, Message,
+ RParenLoc, Failed);
}
-StaticAssertDecl *StaticAssertDecl::CreateDeserialized(ASTContext &C,
+StaticAssertDecl *StaticAssertDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(StaticAssertDecl));
- return new (Mem) StaticAssertDecl(0, SourceLocation(), 0, 0,
- SourceLocation(), false);
+ return new (C, ID) StaticAssertDecl(0, SourceLocation(), 0, 0,
+ SourceLocation(), false);
+}
+
+MSPropertyDecl *MSPropertyDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L, DeclarationName N,
+ QualType T, TypeSourceInfo *TInfo,
+ SourceLocation StartL,
+ IdentifierInfo *Getter,
+ IdentifierInfo *Setter) {
+ return new (C, DC) MSPropertyDecl(DC, L, N, T, TInfo, StartL, Getter, Setter);
+}
+
+MSPropertyDecl *MSPropertyDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ return new (C, ID) MSPropertyDecl(0, SourceLocation(), DeclarationName(),
+ QualType(), 0, SourceLocation(), 0, 0);
}
static const char *getAccessName(AccessSpecifier AS) {
diff --git a/lib/AST/DeclFriend.cpp b/lib/AST/DeclFriend.cpp
index 1c639d6..02374c7 100644
--- a/lib/AST/DeclFriend.cpp
+++ b/lib/AST/DeclFriend.cpp
@@ -46,21 +46,17 @@
}
#endif
- std::size_t Size = sizeof(FriendDecl)
- + FriendTypeTPLists.size() * sizeof(TemplateParameterList*);
- void *Mem = C.Allocate(Size);
- FriendDecl *FD = new (Mem) FriendDecl(DC, L, Friend, FriendL,
- FriendTypeTPLists);
+ std::size_t Extra = FriendTypeTPLists.size() * sizeof(TemplateParameterList*);
+ FriendDecl *FD = new (C, DC, Extra) FriendDecl(DC, L, Friend, FriendL,
+ FriendTypeTPLists);
cast<CXXRecordDecl>(DC)->pushFriendDecl(FD);
return FD;
}
FriendDecl *FriendDecl::CreateDeserialized(ASTContext &C, unsigned ID,
unsigned FriendTypeNumTPLists) {
- std::size_t Size = sizeof(FriendDecl)
- + FriendTypeNumTPLists * sizeof(TemplateParameterList*);
- void *Mem = AllocateDeserializedDecl(C, ID, Size);
- return new (Mem) FriendDecl(EmptyShell(), FriendTypeNumTPLists);
+ std::size_t Extra = FriendTypeNumTPLists * sizeof(TemplateParameterList*);
+ return new (C, ID, Extra) FriendDecl(EmptyShell(), FriendTypeNumTPLists);
}
FriendDecl *CXXRecordDecl::getFirstFriend() const {
diff --git a/lib/AST/DeclObjC.cpp b/lib/AST/DeclObjC.cpp
index b2b5b70..c53dba3 100644
--- a/lib/AST/DeclObjC.cpp
+++ b/lib/AST/DeclObjC.cpp
@@ -112,11 +112,7 @@
if (const ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(this)) {
// Also look into categories, including class extensions, looking
// for a user declared instance method.
- for (ObjCInterfaceDecl::visible_categories_iterator
- Cat = ID->visible_categories_begin(),
- CatEnd = ID->visible_categories_end();
- Cat != CatEnd;
- ++Cat) {
+ for (const auto *Cat : ID->visible_categories()) {
if (ObjCMethodDecl *MD = Cat->getInstanceMethod(Sel))
if (!MD->isImplicit())
return true;
@@ -125,8 +121,7 @@
// Also search through the categories looking for a 'readwrite' declaration
// of this property. If one found, presumably a setter will be provided
// (properties declared in categories will not get auto-synthesized).
- for (ObjCContainerDecl::prop_iterator P = Cat->prop_begin(),
- E = Cat->prop_end(); P != E; ++P)
+ for (const auto *P : Cat->properties())
if (P->getIdentifier() == Property->getIdentifier()) {
if (P->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_readwrite)
return true;
@@ -135,13 +130,10 @@
}
// Also look into protocols, for a user declared instance method.
- for (ObjCInterfaceDecl::all_protocol_iterator P =
- ID->all_referenced_protocol_begin(),
- PE = ID->all_referenced_protocol_end(); P != PE; ++P) {
- ObjCProtocolDecl *Proto = (*P);
+ for (const auto *Proto : ID->all_referenced_protocols())
if (Proto->HasUserDeclaredSetterMethod(Property))
return true;
- }
+
// And in its super class.
ObjCInterfaceDecl *OSC = ID->getSuperClass();
while (OSC) {
@@ -151,11 +143,9 @@
}
}
if (const ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(this))
- for (ObjCProtocolDecl::protocol_iterator PI = PD->protocol_begin(),
- E = PD->protocol_end(); PI != E; ++PI) {
- if ((*PI)->HasUserDeclaredSetterMethod(Property))
+ for (const auto *PI : PD->protocols())
+ if (PI->HasUserDeclaredSetterMethod(Property))
return true;
- }
return false;
}
@@ -209,29 +199,23 @@
break;
case Decl::ObjCProtocol: {
const ObjCProtocolDecl *PID = cast<ObjCProtocolDecl>(this);
- for (ObjCProtocolDecl::protocol_iterator I = PID->protocol_begin(),
- E = PID->protocol_end(); I != E; ++I)
- if (ObjCPropertyDecl *P = (*I)->FindPropertyDeclaration(PropertyId))
+ for (const auto *I : PID->protocols())
+ if (ObjCPropertyDecl *P = I->FindPropertyDeclaration(PropertyId))
return P;
break;
}
case Decl::ObjCInterface: {
const ObjCInterfaceDecl *OID = cast<ObjCInterfaceDecl>(this);
// Look through categories (but not extensions).
- for (ObjCInterfaceDecl::visible_categories_iterator
- Cat = OID->visible_categories_begin(),
- CatEnd = OID->visible_categories_end();
- Cat != CatEnd; ++Cat) {
+ for (const auto *Cat : OID->visible_categories()) {
if (!Cat->IsClassExtension())
if (ObjCPropertyDecl *P = Cat->FindPropertyDeclaration(PropertyId))
return P;
}
// Look through protocols.
- for (ObjCInterfaceDecl::all_protocol_iterator
- I = OID->all_referenced_protocol_begin(),
- E = OID->all_referenced_protocol_end(); I != E; ++I)
- if (ObjCPropertyDecl *P = (*I)->FindPropertyDeclaration(PropertyId))
+ for (const auto *I : OID->all_referenced_protocols())
+ if (ObjCPropertyDecl *P = I->FindPropertyDeclaration(PropertyId))
return P;
// Finally, check the super class.
@@ -243,11 +227,9 @@
const ObjCCategoryDecl *OCD = cast<ObjCCategoryDecl>(this);
// Look through protocols.
if (!OCD->IsClassExtension())
- for (ObjCCategoryDecl::protocol_iterator
- I = OCD->protocol_begin(), E = OCD->protocol_end(); I != E; ++I)
- if (ObjCPropertyDecl *P = (*I)->FindPropertyDeclaration(PropertyId))
- return P;
-
+ for (const auto *I : OCD->protocols())
+ if (ObjCPropertyDecl *P = I->FindPropertyDeclaration(PropertyId))
+ return P;
break;
}
}
@@ -275,10 +257,8 @@
return PD;
// Look through protocols.
- for (ObjCInterfaceDecl::all_protocol_iterator
- I = all_referenced_protocol_begin(),
- E = all_referenced_protocol_end(); I != E; ++I)
- if (ObjCPropertyDecl *P = (*I)->FindPropertyDeclaration(PropertyId))
+ for (const auto *I : all_referenced_protocols())
+ if (ObjCPropertyDecl *P = I->FindPropertyDeclaration(PropertyId))
return P;
return 0;
@@ -286,16 +266,12 @@
void ObjCInterfaceDecl::collectPropertiesToImplement(PropertyMap &PM,
PropertyDeclOrder &PO) const {
- for (ObjCContainerDecl::prop_iterator P = prop_begin(),
- E = prop_end(); P != E; ++P) {
- ObjCPropertyDecl *Prop = *P;
+ for (auto *Prop : properties()) {
PM[Prop->getIdentifier()] = Prop;
PO.push_back(Prop);
}
- for (ObjCInterfaceDecl::all_protocol_iterator
- PI = all_referenced_protocol_begin(),
- E = all_referenced_protocol_end(); PI != E; ++PI)
- (*PI)->collectPropertiesToImplement(PM, PO);
+ for (const auto *PI : all_referenced_protocols())
+ PI->collectPropertiesToImplement(PM, PO);
// Note, the properties declared only in class extensions are still copied
// into the main @interface's property list, and therefore we don't
// explicitly, have to search class extension properties.
@@ -341,10 +317,7 @@
for (unsigned i = 0; i < ExtNum; i++) {
bool protocolExists = false;
ObjCProtocolDecl *ProtoInExtension = ExtList[i];
- for (all_protocol_iterator
- p = all_referenced_protocol_begin(),
- e = all_referenced_protocol_end(); p != e; ++p) {
- ObjCProtocolDecl *Proto = (*p);
+ for (auto *Proto : all_referenced_protocols()) {
if (C.ProtocolCompatibleWithProtocol(ProtoInExtension, Proto)) {
protocolExists = true;
break;
@@ -360,14 +333,116 @@
return;
// Merge ProtocolRefs into class's protocol list;
- for (all_protocol_iterator p = all_referenced_protocol_begin(),
- e = all_referenced_protocol_end(); p != e; ++p) {
- ProtocolRefs.push_back(*p);
+ for (auto *P : all_referenced_protocols()) {
+ ProtocolRefs.push_back(P);
}
data().AllReferencedProtocols.set(ProtocolRefs.data(), ProtocolRefs.size(),C);
}
+const ObjCInterfaceDecl *
+ObjCInterfaceDecl::findInterfaceWithDesignatedInitializers() const {
+ const ObjCInterfaceDecl *IFace = this;
+ while (IFace) {
+ if (IFace->hasDesignatedInitializers())
+ return IFace;
+ if (!IFace->inheritsDesignatedInitializers())
+ break;
+ IFace = IFace->getSuperClass();
+ }
+ return 0;
+}
+
+static bool isIntroducingInitializers(const ObjCInterfaceDecl *D) {
+ for (const auto *MD : D->instance_methods()) {
+ if (MD->getMethodFamily() == OMF_init && !MD->isOverriding())
+ return true;
+ }
+ for (const auto *Ext : D->visible_extensions()) {
+ for (const auto *MD : Ext->instance_methods()) {
+ if (MD->getMethodFamily() == OMF_init && !MD->isOverriding())
+ return true;
+ }
+ }
+ return false;
+}
+
+bool ObjCInterfaceDecl::inheritsDesignatedInitializers() const {
+ switch (data().InheritedDesignatedInitializers) {
+ case DefinitionData::IDI_Inherited:
+ return true;
+ case DefinitionData::IDI_NotInherited:
+ return false;
+ case DefinitionData::IDI_Unknown: {
+ // If the class introduced initializers we conservatively assume that we
+ // don't know if any of them is a designated initializer to avoid possible
+ // misleading warnings.
+ if (isIntroducingInitializers(this)) {
+ data().InheritedDesignatedInitializers = DefinitionData::IDI_NotInherited;
+ return false;
+ } else {
+ data().InheritedDesignatedInitializers = DefinitionData::IDI_Inherited;
+ return true;
+ }
+ }
+ }
+
+ llvm_unreachable("unexpected InheritedDesignatedInitializers value");
+}
+
+void ObjCInterfaceDecl::getDesignatedInitializers(
+ llvm::SmallVectorImpl<const ObjCMethodDecl *> &Methods) const {
+ // Check for a complete definition and recover if not so.
+ if (!isThisDeclarationADefinition())
+ return;
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ const ObjCInterfaceDecl *IFace= findInterfaceWithDesignatedInitializers();
+ if (!IFace)
+ return;
+
+ for (const auto *MD : IFace->instance_methods())
+ if (MD->isThisDeclarationADesignatedInitializer())
+ Methods.push_back(MD);
+ for (const auto *Ext : IFace->visible_extensions()) {
+ for (const auto *MD : Ext->instance_methods())
+ if (MD->isThisDeclarationADesignatedInitializer())
+ Methods.push_back(MD);
+ }
+}
+
+bool ObjCInterfaceDecl::isDesignatedInitializer(Selector Sel,
+ const ObjCMethodDecl **InitMethod) const {
+ // Check for a complete definition and recover if not so.
+ if (!isThisDeclarationADefinition())
+ return false;
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ const ObjCInterfaceDecl *IFace= findInterfaceWithDesignatedInitializers();
+ if (!IFace)
+ return false;
+
+ if (const ObjCMethodDecl *MD = IFace->getInstanceMethod(Sel)) {
+ if (MD->isThisDeclarationADesignatedInitializer()) {
+ if (InitMethod)
+ *InitMethod = MD;
+ return true;
+ }
+ }
+ for (const auto *Ext : IFace->visible_extensions()) {
+ if (const ObjCMethodDecl *MD = Ext->getInstanceMethod(Sel)) {
+ if (MD->isThisDeclarationADesignatedInitializer()) {
+ if (InitMethod)
+ *InitMethod = MD;
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
void ObjCInterfaceDecl::allocateDefinitionData() {
assert(!hasDefinition() && "ObjC class already has a definition");
Data.setPointer(new (getASTContext()) DefinitionData());
@@ -382,9 +457,8 @@
allocateDefinitionData();
// Update all of the declarations with a pointer to the definition.
- for (redecl_iterator RD = redecls_begin(), RDEnd = redecls_end();
- RD != RDEnd; ++RD) {
- if (*RD != this)
+ for (auto RD : redecls()) {
+ if (RD != this)
RD->Data = Data;
}
}
@@ -405,10 +479,7 @@
return I;
}
- for (ObjCInterfaceDecl::visible_extensions_iterator
- Ext = ClassDecl->visible_extensions_begin(),
- ExtEnd = ClassDecl->visible_extensions_end();
- Ext != ExtEnd; ++Ext) {
+ for (const auto *Ext : ClassDecl->visible_extensions()) {
if (ObjCIvarDecl *I = Ext->getIvarDecl(ID)) {
clsDeclared = ClassDecl;
return I;
@@ -443,11 +514,9 @@
ObjCProtocolDecl *
ObjCInterfaceDecl::lookupNestedProtocol(IdentifierInfo *Name) {
- for (ObjCInterfaceDecl::all_protocol_iterator P =
- all_referenced_protocol_begin(), PE = all_referenced_protocol_end();
- P != PE; ++P)
- if ((*P)->lookupProtocolNamed(Name))
- return (*P);
+ for (auto *P : all_referenced_protocols())
+ if (P->lookupProtocolNamed(Name))
+ return P;
ObjCInterfaceDecl *SuperClass = getSuperClass();
return SuperClass ? SuperClass->lookupNestedProtocol(Name) : NULL;
}
@@ -457,9 +526,11 @@
/// When argument category "C" is specified, any implicit method found
/// in this category is ignored.
ObjCMethodDecl *ObjCInterfaceDecl::lookupMethod(Selector Sel,
- bool isInstance,
- bool shallowCategoryLookup,
- const ObjCCategoryDecl *C) const {
+ bool isInstance,
+ bool shallowCategoryLookup,
+ bool followSuper,
+ const ObjCCategoryDecl *C) const
+{
// FIXME: Should make sure no callers ever do this.
if (!hasDefinition())
return 0;
@@ -470,24 +541,19 @@
if (data().ExternallyCompleted)
LoadExternalDefinition();
- while (ClassDecl != NULL) {
+ while (ClassDecl) {
if ((MethodDecl = ClassDecl->getMethod(Sel, isInstance)))
return MethodDecl;
// Didn't find one yet - look through protocols.
- for (ObjCInterfaceDecl::protocol_iterator I = ClassDecl->protocol_begin(),
- E = ClassDecl->protocol_end();
- I != E; ++I)
- if ((MethodDecl = (*I)->lookupMethod(Sel, isInstance)))
+ for (const auto *I : ClassDecl->protocols())
+ if ((MethodDecl = I->lookupMethod(Sel, isInstance)))
return MethodDecl;
// Didn't find one yet - now look through categories.
- for (ObjCInterfaceDecl::visible_categories_iterator
- Cat = ClassDecl->visible_categories_begin(),
- CatEnd = ClassDecl->visible_categories_end();
- Cat != CatEnd; ++Cat) {
+ for (const auto *Cat : ClassDecl->visible_categories()) {
if ((MethodDecl = Cat->getMethod(Sel, isInstance)))
- if (C != (*Cat) || !MethodDecl->isImplicit())
+ if (C != Cat || !MethodDecl->isImplicit())
return MethodDecl;
if (!shallowCategoryLookup) {
@@ -497,11 +563,15 @@
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
E = Protocols.end(); I != E; ++I)
if ((MethodDecl = (*I)->lookupMethod(Sel, isInstance)))
- if (C != (*Cat) || !MethodDecl->isImplicit())
+ if (C != Cat || !MethodDecl->isImplicit())
return MethodDecl;
}
}
-
+
+ if (!followSuper)
+ return NULL;
+
+ // Get the super class (if any).
ClassDecl = ClassDecl->getSuperClass();
}
return NULL;
@@ -550,31 +620,38 @@
// ObjCMethodDecl
//===----------------------------------------------------------------------===//
-ObjCMethodDecl *ObjCMethodDecl::Create(ASTContext &C,
- SourceLocation beginLoc,
- SourceLocation endLoc,
- Selector SelInfo, QualType T,
- TypeSourceInfo *ResultTInfo,
- DeclContext *contextDecl,
- bool isInstance,
- bool isVariadic,
- bool isPropertyAccessor,
- bool isImplicitlyDeclared,
- bool isDefined,
- ImplementationControl impControl,
- bool HasRelatedResultType) {
- return new (C) ObjCMethodDecl(beginLoc, endLoc,
- SelInfo, T, ResultTInfo, contextDecl,
- isInstance, isVariadic, isPropertyAccessor,
- isImplicitlyDeclared, isDefined,
- impControl,
- HasRelatedResultType);
+ObjCMethodDecl *ObjCMethodDecl::Create(
+ ASTContext &C, SourceLocation beginLoc, SourceLocation endLoc,
+ Selector SelInfo, QualType T, TypeSourceInfo *ReturnTInfo,
+ DeclContext *contextDecl, bool isInstance, bool isVariadic,
+ bool isPropertyAccessor, bool isImplicitlyDeclared, bool isDefined,
+ ImplementationControl impControl, bool HasRelatedResultType) {
+ return new (C, contextDecl) ObjCMethodDecl(
+ beginLoc, endLoc, SelInfo, T, ReturnTInfo, contextDecl, isInstance,
+ isVariadic, isPropertyAccessor, isImplicitlyDeclared, isDefined,
+ impControl, HasRelatedResultType);
}
ObjCMethodDecl *ObjCMethodDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCMethodDecl));
- return new (Mem) ObjCMethodDecl(SourceLocation(), SourceLocation(),
- Selector(), QualType(), 0, 0);
+ return new (C, ID) ObjCMethodDecl(SourceLocation(), SourceLocation(),
+ Selector(), QualType(), 0, 0);
+}
+
+bool ObjCMethodDecl::isThisDeclarationADesignatedInitializer() const {
+ return getMethodFamily() == OMF_init &&
+ hasAttr<ObjCDesignatedInitializerAttr>();
+}
+
+bool ObjCMethodDecl::isDesignatedInitializerForTheInterface(
+ const ObjCMethodDecl **InitMethod) const {
+ if (getMethodFamily() != OMF_init)
+ return false;
+ const DeclContext *DC = getDeclContext();
+ if (isa<ObjCProtocolDecl>(DC))
+ return false;
+ if (const ObjCInterfaceDecl *ID = getClassInterface())
+ return ID->isDesignatedInitializer(getSelector(), InitMethod);
+ return false;
}
Stmt *ObjCMethodDecl::getBody() const {
@@ -730,7 +807,7 @@
// init only has a conventional meaning for an instance method, and
// it has to return an object.
case OMF_init:
- if (!isInstanceMethod() || !getResultType()->isObjCObjectPointerType())
+ if (!isInstanceMethod() || !getReturnType()->isObjCObjectPointerType())
family = OMF_None;
break;
@@ -740,7 +817,7 @@
case OMF_copy:
case OMF_mutableCopy:
case OMF_new:
- if (!getResultType()->isObjCObjectPointerType())
+ if (!getReturnType()->isObjCObjectPointerType())
family = OMF_None;
break;
@@ -757,15 +834,14 @@
break;
case OMF_performSelector:
- if (!isInstanceMethod() ||
- !getResultType()->isObjCIdType())
+ if (!isInstanceMethod() || !getReturnType()->isObjCIdType())
family = OMF_None;
else {
unsigned noParams = param_size();
if (noParams < 1 || noParams > 3)
family = OMF_None;
else {
- ObjCMethodDecl::arg_type_iterator it = arg_type_begin();
+ ObjCMethodDecl::param_type_iterator it = param_type_begin();
QualType ArgT = (*it);
if (!ArgT->isObjCSelType()) {
family = OMF_None;
@@ -838,7 +914,7 @@
setSelfDecl(self);
if (selfIsConsumed)
- self->addAttr(new (Context) NSConsumedAttr(SourceLocation(), Context));
+ self->addAttr(NSConsumedAttr::CreateImplicit(Context));
if (selfIsPseudoStrong)
self->setARCPseudoStrong(true);
@@ -855,8 +931,8 @@
return CD->getClassInterface();
if (ObjCImplDecl *IMD = dyn_cast<ObjCImplDecl>(getDeclContext()))
return IMD->getClassInterface();
-
- assert(!isa<ObjCProtocolDecl>(getDeclContext()) && "It's a protocol method");
+ if (isa<ObjCProtocolDecl>(getDeclContext()))
+ return 0;
llvm_unreachable("unknown method context");
}
@@ -886,10 +962,8 @@
return;
}
- for (ObjCCategoryDecl::protocol_iterator P = Category->protocol_begin(),
- PEnd = Category->protocol_end();
- P != PEnd; ++P)
- CollectOverriddenMethodsRecurse(*P, Method, Methods, MovedToSuper);
+ for (const auto *P : Category->protocols())
+ CollectOverriddenMethodsRecurse(P, Method, Methods, MovedToSuper);
return;
}
@@ -906,26 +980,17 @@
}
if (const ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)){
- for (ObjCProtocolDecl::protocol_iterator P = Protocol->protocol_begin(),
- PEnd = Protocol->protocol_end();
- P != PEnd; ++P)
- CollectOverriddenMethodsRecurse(*P, Method, Methods, MovedToSuper);
+ for (const auto *P : Protocol->protocols())
+ CollectOverriddenMethodsRecurse(P, Method, Methods, MovedToSuper);
}
if (const ObjCInterfaceDecl *
Interface = dyn_cast<ObjCInterfaceDecl>(Container)) {
- for (ObjCInterfaceDecl::protocol_iterator P = Interface->protocol_begin(),
- PEnd = Interface->protocol_end();
- P != PEnd; ++P)
- CollectOverriddenMethodsRecurse(*P, Method, Methods, MovedToSuper);
+ for (const auto *P : Interface->protocols())
+ CollectOverriddenMethodsRecurse(P, Method, Methods, MovedToSuper);
- for (ObjCInterfaceDecl::known_categories_iterator
- Cat = Interface->known_categories_begin(),
- CatEnd = Interface->known_categories_end();
- Cat != CatEnd; ++Cat) {
- CollectOverriddenMethodsRecurse(*Cat, Method, Methods,
- MovedToSuper);
- }
+ for (const auto *Cat : Interface->known_categories())
+ CollectOverriddenMethodsRecurse(Cat, Method, Methods, MovedToSuper);
if (const ObjCInterfaceDecl *Super = Interface->getSuperClass())
return CollectOverriddenMethodsRecurse(Super, Method, Methods,
@@ -1016,13 +1081,11 @@
bool IsGetter = (NumArgs == 0);
- for (ObjCContainerDecl::prop_iterator I = Container->prop_begin(),
- E = Container->prop_end();
- I != E; ++I) {
- Selector NextSel = IsGetter ? (*I)->getGetterName()
- : (*I)->getSetterName();
+ for (const auto *I : Container->properties()) {
+ Selector NextSel = IsGetter ? I->getGetterName()
+ : I->getSetterName();
if (NextSel == Sel)
- return *I;
+ return I;
}
llvm_unreachable("Marked as a property accessor but no property found!");
@@ -1055,19 +1118,18 @@
ObjCInterfaceDecl *PrevDecl,
SourceLocation ClassLoc,
bool isInternal){
- ObjCInterfaceDecl *Result = new (C) ObjCInterfaceDecl(DC, atLoc, Id, ClassLoc,
- PrevDecl, isInternal);
+ ObjCInterfaceDecl *Result = new (C, DC)
+ ObjCInterfaceDecl(DC, atLoc, Id, ClassLoc, PrevDecl, isInternal);
Result->Data.setInt(!C.getLangOpts().Modules);
C.getObjCInterfaceType(Result, PrevDecl);
return Result;
}
-ObjCInterfaceDecl *ObjCInterfaceDecl::CreateDeserialized(ASTContext &C,
+ObjCInterfaceDecl *ObjCInterfaceDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCInterfaceDecl));
- ObjCInterfaceDecl *Result = new (Mem) ObjCInterfaceDecl(0, SourceLocation(),
- 0, SourceLocation(),
- 0, false);
+ ObjCInterfaceDecl *Result = new (C, ID) ObjCInterfaceDecl(0, SourceLocation(),
+ 0, SourceLocation(),
+ 0, false);
Result->Data.setInt(!C.getLangOpts().Modules);
return Result;
}
@@ -1103,6 +1165,23 @@
data().ExternallyCompleted = true;
}
+void ObjCInterfaceDecl::setHasDesignatedInitializers() {
+ // Check for a complete definition and recover if not so.
+ if (!isThisDeclarationADefinition())
+ return;
+ data().HasDesignatedInitializers = true;
+}
+
+bool ObjCInterfaceDecl::hasDesignatedInitializers() const {
+ // Check for a complete definition and recover if not so.
+ if (!isThisDeclarationADefinition())
+ return false;
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ return data().HasDesignatedInitializers;
+}
+
ObjCImplementationDecl *ObjCInterfaceDecl::getImplementation() const {
if (const ObjCInterfaceDecl *Def = getDefinition()) {
if (data().ExternallyCompleted)
@@ -1157,10 +1236,7 @@
curIvar->setNextIvar(*I);
}
- for (ObjCInterfaceDecl::known_extensions_iterator
- Ext = known_extensions_begin(),
- ExtEnd = known_extensions_end();
- Ext != ExtEnd; ++Ext) {
+ for (const auto *Ext : known_extensions()) {
if (!Ext->ivar_empty()) {
ObjCCategoryDecl::ivar_iterator
I = Ext->ivar_begin(),
@@ -1184,19 +1260,17 @@
data().IvarListMissingImplementation = false;
if (!ImplDecl->ivar_empty()) {
SmallVector<SynthesizeIvarChunk, 16> layout;
- for (ObjCImplementationDecl::ivar_iterator I = ImplDecl->ivar_begin(),
- E = ImplDecl->ivar_end(); I != E; ++I) {
- ObjCIvarDecl *IV = *I;
+ for (auto *IV : ImplDecl->ivars()) {
if (IV->getSynthesize() && !IV->isInvalidDecl()) {
layout.push_back(SynthesizeIvarChunk(
IV->getASTContext().getTypeSize(IV->getType()), IV));
continue;
}
if (!data().IvarList)
- data().IvarList = *I;
+ data().IvarList = IV;
else
- curIvar->setNextIvar(*I);
- curIvar = *I;
+ curIvar->setNextIvar(IV);
+ curIvar = IV;
}
if (!layout.empty()) {
@@ -1228,23 +1302,16 @@
if (data().ExternallyCompleted)
LoadExternalDefinition();
- for (visible_categories_iterator Cat = visible_categories_begin(),
- CatEnd = visible_categories_end();
- Cat != CatEnd;
- ++Cat) {
+ for (auto *Cat : visible_categories())
if (Cat->getIdentifier() == CategoryId)
- return *Cat;
- }
+ return Cat;
return 0;
}
ObjCMethodDecl *
ObjCInterfaceDecl::getCategoryInstanceMethod(Selector Sel) const {
- for (visible_categories_iterator Cat = visible_categories_begin(),
- CatEnd = visible_categories_end();
- Cat != CatEnd;
- ++Cat) {
+ for (const auto *Cat : visible_categories()) {
if (ObjCCategoryImplDecl *Impl = Cat->getImplementation())
if (ObjCMethodDecl *MD = Impl->getInstanceMethod(Sel))
return MD;
@@ -1254,10 +1321,7 @@
}
ObjCMethodDecl *ObjCInterfaceDecl::getCategoryClassMethod(Selector Sel) const {
- for (visible_categories_iterator Cat = visible_categories_begin(),
- CatEnd = visible_categories_end();
- Cat != CatEnd;
- ++Cat) {
+ for (const auto *Cat : visible_categories()) {
if (ObjCCategoryImplDecl *Impl = Cat->getImplementation())
if (ObjCMethodDecl *MD = Impl->getClassMethod(Sel))
return MD;
@@ -1277,9 +1341,8 @@
ObjCInterfaceDecl *IDecl = this;
// 1st, look up the class.
- for (ObjCInterfaceDecl::protocol_iterator
- PI = IDecl->protocol_begin(), E = IDecl->protocol_end(); PI != E; ++PI){
- if (getASTContext().ProtocolCompatibleWithProtocol(lProto, *PI))
+ for (auto *PI : IDecl->protocols()){
+ if (getASTContext().ProtocolCompatibleWithProtocol(lProto, PI))
return true;
// This is dubious and is added to be compatible with gcc. In gcc, it is
// also allowed assigning a protocol-qualified 'id' type to a LHS object
@@ -1288,20 +1351,15 @@
// FIXME: Treat this as an extension, and flag this as an error when GCC
// extensions are not enabled.
if (RHSIsQualifiedID &&
- getASTContext().ProtocolCompatibleWithProtocol(*PI, lProto))
+ getASTContext().ProtocolCompatibleWithProtocol(PI, lProto))
return true;
}
// 2nd, look up the category.
if (lookupCategory)
- for (visible_categories_iterator Cat = visible_categories_begin(),
- CatEnd = visible_categories_end();
- Cat != CatEnd;
- ++Cat) {
- for (ObjCCategoryDecl::protocol_iterator PI = Cat->protocol_begin(),
- E = Cat->protocol_end();
- PI != E; ++PI)
- if (getASTContext().ProtocolCompatibleWithProtocol(lProto, *PI))
+ for (const auto *Cat : visible_categories()) {
+ for (auto *PI : Cat->protocols())
+ if (getASTContext().ProtocolCompatibleWithProtocol(lProto, PI))
return true;
}
@@ -1325,8 +1383,7 @@
SourceLocation IdLoc, IdentifierInfo *Id,
QualType T, TypeSourceInfo *TInfo,
AccessControl ac, Expr *BW,
- bool synthesized,
- bool backingIvarReferencedInAccessor) {
+ bool synthesized) {
if (DC) {
// Ivar's can only appear in interfaces, implementations (via synthesized
// properties), and class extensions (via direct declaration, or synthesized
@@ -1353,14 +1410,13 @@
ID->setIvarList(0);
}
- return new (C) ObjCIvarDecl(DC, StartLoc, IdLoc, Id, T, TInfo,
- ac, BW, synthesized, backingIvarReferencedInAccessor);
+ return new (C, DC) ObjCIvarDecl(DC, StartLoc, IdLoc, Id, T, TInfo, ac, BW,
+ synthesized);
}
ObjCIvarDecl *ObjCIvarDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCIvarDecl));
- return new (Mem) ObjCIvarDecl(0, SourceLocation(), SourceLocation(), 0,
- QualType(), 0, ObjCIvarDecl::None, 0, false, false);
+ return new (C, ID) ObjCIvarDecl(0, SourceLocation(), SourceLocation(), 0,
+ QualType(), 0, ObjCIvarDecl::None, 0, false);
}
const ObjCInterfaceDecl *ObjCIvarDecl::getContainingInterface() const {
@@ -1397,14 +1453,13 @@
*ObjCAtDefsFieldDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, QualType T, Expr *BW) {
- return new (C) ObjCAtDefsFieldDecl(DC, StartLoc, IdLoc, Id, T, BW);
+ return new (C, DC) ObjCAtDefsFieldDecl(DC, StartLoc, IdLoc, Id, T, BW);
}
-ObjCAtDefsFieldDecl *ObjCAtDefsFieldDecl::CreateDeserialized(ASTContext &C,
+ObjCAtDefsFieldDecl *ObjCAtDefsFieldDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCAtDefsFieldDecl));
- return new (Mem) ObjCAtDefsFieldDecl(0, SourceLocation(), SourceLocation(),
- 0, QualType(), 0);
+ return new (C, ID) ObjCAtDefsFieldDecl(0, SourceLocation(), SourceLocation(),
+ 0, QualType(), 0);
}
//===----------------------------------------------------------------------===//
@@ -1429,17 +1484,16 @@
SourceLocation nameLoc,
SourceLocation atStartLoc,
ObjCProtocolDecl *PrevDecl) {
- ObjCProtocolDecl *Result
- = new (C) ObjCProtocolDecl(DC, Id, nameLoc, atStartLoc, PrevDecl);
+ ObjCProtocolDecl *Result =
+ new (C, DC) ObjCProtocolDecl(DC, Id, nameLoc, atStartLoc, PrevDecl);
Result->Data.setInt(!C.getLangOpts().Modules);
return Result;
}
-ObjCProtocolDecl *ObjCProtocolDecl::CreateDeserialized(ASTContext &C,
+ObjCProtocolDecl *ObjCProtocolDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCProtocolDecl));
- ObjCProtocolDecl *Result = new (Mem) ObjCProtocolDecl(0, 0, SourceLocation(),
- SourceLocation(), 0);
+ ObjCProtocolDecl *Result =
+ new (C, ID) ObjCProtocolDecl(0, 0, SourceLocation(), SourceLocation(), 0);
Result->Data.setInt(!C.getLangOpts().Modules);
return Result;
}
@@ -1450,8 +1504,8 @@
if (Name == getIdentifier())
return PDecl;
- for (protocol_iterator I = protocol_begin(), E = protocol_end(); I != E; ++I)
- if ((PDecl = (*I)->lookupProtocolNamed(Name)))
+ for (auto *I : protocols())
+ if ((PDecl = I->lookupProtocolNamed(Name)))
return PDecl;
return NULL;
@@ -1472,8 +1526,8 @@
if ((MethodDecl = getMethod(Sel, isInstance)))
return MethodDecl;
- for (protocol_iterator I = protocol_begin(), E = protocol_end(); I != E; ++I)
- if ((MethodDecl = (*I)->lookupMethod(Sel, isInstance)))
+ for (const auto *I : protocols())
+ if ((MethodDecl = I->lookupMethod(Sel, isInstance)))
return MethodDecl;
return NULL;
}
@@ -1488,8 +1542,7 @@
allocateDefinitionData();
// Update all of the declarations with a pointer to the definition.
- for (redecl_iterator RD = redecls_begin(), RDEnd = redecls_end();
- RD != RDEnd; ++RD)
+ for (auto RD : redecls())
RD->Data = this->Data;
}
@@ -1497,17 +1550,14 @@
PropertyDeclOrder &PO) const {
if (const ObjCProtocolDecl *PDecl = getDefinition()) {
- for (ObjCProtocolDecl::prop_iterator P = PDecl->prop_begin(),
- E = PDecl->prop_end(); P != E; ++P) {
- ObjCPropertyDecl *Prop = *P;
+ for (auto *Prop : PDecl->properties()) {
// Insert into PM if not there already.
PM.insert(std::make_pair(Prop->getIdentifier(), Prop));
PO.push_back(Prop);
}
// Scan through protocol's protocols.
- for (ObjCProtocolDecl::protocol_iterator PI = PDecl->protocol_begin(),
- E = PDecl->protocol_end(); PI != E; ++PI)
- (*PI)->collectPropertiesToImplement(PM, PO);
+ for (const auto *PI : PDecl->protocols())
+ PI->collectPropertiesToImplement(PM, PO);
}
}
@@ -1517,9 +1567,7 @@
ProtocolPropertyMap &PM) const {
if (const ObjCProtocolDecl *PDecl = getDefinition()) {
bool MatchFound = false;
- for (ObjCProtocolDecl::prop_iterator P = PDecl->prop_begin(),
- E = PDecl->prop_end(); P != E; ++P) {
- ObjCPropertyDecl *Prop = *P;
+ for (auto *Prop : PDecl->properties()) {
if (Prop == Property)
continue;
if (Prop->getIdentifier() == Property->getIdentifier()) {
@@ -1530,9 +1578,8 @@
}
// Scan through protocol's protocols which did not have a matching property.
if (!MatchFound)
- for (ObjCProtocolDecl::protocol_iterator PI = PDecl->protocol_begin(),
- E = PDecl->protocol_end(); PI != E; ++PI)
- (*PI)->collectInheritedProtocolProperties(Property, PM);
+ for (const auto *PI : PDecl->protocols())
+ PI->collectInheritedProtocolProperties(Property, PM);
}
}
@@ -1543,17 +1590,16 @@
void ObjCCategoryDecl::anchor() { }
ObjCCategoryDecl *ObjCCategoryDecl::Create(ASTContext &C, DeclContext *DC,
- SourceLocation AtLoc,
+ SourceLocation AtLoc,
SourceLocation ClassNameLoc,
SourceLocation CategoryNameLoc,
IdentifierInfo *Id,
ObjCInterfaceDecl *IDecl,
SourceLocation IvarLBraceLoc,
SourceLocation IvarRBraceLoc) {
- ObjCCategoryDecl *CatDecl = new (C) ObjCCategoryDecl(DC, AtLoc, ClassNameLoc,
- CategoryNameLoc, Id,
- IDecl,
- IvarLBraceLoc, IvarRBraceLoc);
+ ObjCCategoryDecl *CatDecl =
+ new (C, DC) ObjCCategoryDecl(DC, AtLoc, ClassNameLoc, CategoryNameLoc, Id,
+ IDecl, IvarLBraceLoc, IvarRBraceLoc);
if (IDecl) {
// Link this category into its class's category list.
CatDecl->NextClassCategory = IDecl->getCategoryListRaw();
@@ -1567,11 +1613,10 @@
return CatDecl;
}
-ObjCCategoryDecl *ObjCCategoryDecl::CreateDeserialized(ASTContext &C,
+ObjCCategoryDecl *ObjCCategoryDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCCategoryDecl));
- return new (Mem) ObjCCategoryDecl(0, SourceLocation(), SourceLocation(),
- SourceLocation(), 0, 0);
+ return new (C, ID) ObjCCategoryDecl(0, SourceLocation(), SourceLocation(),
+ SourceLocation(), 0, 0);
}
ObjCCategoryImplDecl *ObjCCategoryDecl::getImplementation() const {
@@ -1599,15 +1644,14 @@
SourceLocation CategoryNameLoc) {
if (ClassInterface && ClassInterface->hasDefinition())
ClassInterface = ClassInterface->getDefinition();
- return new (C) ObjCCategoryImplDecl(DC, Id, ClassInterface,
- nameLoc, atStartLoc, CategoryNameLoc);
+ return new (C, DC) ObjCCategoryImplDecl(DC, Id, ClassInterface, nameLoc,
+ atStartLoc, CategoryNameLoc);
}
ObjCCategoryImplDecl *ObjCCategoryImplDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCCategoryImplDecl));
- return new (Mem) ObjCCategoryImplDecl(0, 0, 0, SourceLocation(),
- SourceLocation(), SourceLocation());
+ return new (C, ID) ObjCCategoryImplDecl(0, 0, 0, SourceLocation(),
+ SourceLocation(), SourceLocation());
}
ObjCCategoryDecl *ObjCCategoryImplDecl::getCategoryDecl() const {
@@ -1649,12 +1693,10 @@
///
ObjCPropertyImplDecl *ObjCImplDecl::
FindPropertyImplIvarDecl(IdentifierInfo *ivarId) const {
- for (propimpl_iterator i = propimpl_begin(), e = propimpl_end(); i != e; ++i){
- ObjCPropertyImplDecl *PID = *i;
+ for (auto *PID : property_impls())
if (PID->getPropertyIvarDecl() &&
PID->getPropertyIvarDecl()->getIdentifier() == ivarId)
return PID;
- }
return 0;
}
@@ -1664,11 +1706,9 @@
///
ObjCPropertyImplDecl *ObjCImplDecl::
FindPropertyImplDecl(IdentifierInfo *Id) const {
- for (propimpl_iterator i = propimpl_begin(), e = propimpl_end(); i != e; ++i){
- ObjCPropertyImplDecl *PID = *i;
+ for (auto *PID : property_impls())
if (PID->getPropertyDecl()->getIdentifier() == Id)
return PID;
- }
return 0;
}
@@ -1695,16 +1735,15 @@
SourceLocation IvarRBraceLoc) {
if (ClassInterface && ClassInterface->hasDefinition())
ClassInterface = ClassInterface->getDefinition();
- return new (C) ObjCImplementationDecl(DC, ClassInterface, SuperDecl,
- nameLoc, atStartLoc, superLoc,
- IvarLBraceLoc, IvarRBraceLoc);
+ return new (C, DC) ObjCImplementationDecl(DC, ClassInterface, SuperDecl,
+ nameLoc, atStartLoc, superLoc,
+ IvarLBraceLoc, IvarRBraceLoc);
}
ObjCImplementationDecl *
ObjCImplementationDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCImplementationDecl));
- return new (Mem) ObjCImplementationDecl(0, 0, 0, SourceLocation(),
- SourceLocation());
+ return new (C, ID) ObjCImplementationDecl(0, 0, 0, SourceLocation(),
+ SourceLocation());
}
void ObjCImplementationDecl::setIvarInitializers(ASTContext &C,
@@ -1737,13 +1776,12 @@
SourceLocation L,
IdentifierInfo *Id,
ObjCInterfaceDecl* AliasedClass) {
- return new (C) ObjCCompatibleAliasDecl(DC, L, Id, AliasedClass);
+ return new (C, DC) ObjCCompatibleAliasDecl(DC, L, Id, AliasedClass);
}
ObjCCompatibleAliasDecl *
ObjCCompatibleAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCCompatibleAliasDecl));
- return new (Mem) ObjCCompatibleAliasDecl(0, SourceLocation(), 0, 0);
+ return new (C, ID) ObjCCompatibleAliasDecl(0, SourceLocation(), 0, 0);
}
//===----------------------------------------------------------------------===//
@@ -1759,15 +1797,13 @@
SourceLocation LParenLoc,
TypeSourceInfo *T,
PropertyControl propControl) {
- return new (C) ObjCPropertyDecl(DC, L, Id, AtLoc, LParenLoc, T);
+ return new (C, DC) ObjCPropertyDecl(DC, L, Id, AtLoc, LParenLoc, T);
}
-ObjCPropertyDecl *ObjCPropertyDecl::CreateDeserialized(ASTContext &C,
+ObjCPropertyDecl *ObjCPropertyDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
- void * Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCPropertyDecl));
- return new (Mem) ObjCPropertyDecl(0, SourceLocation(), 0, SourceLocation(),
- SourceLocation(),
- 0);
+ return new (C, ID) ObjCPropertyDecl(0, SourceLocation(), 0, SourceLocation(),
+ SourceLocation(), 0);
}
//===----------------------------------------------------------------------===//
@@ -1782,15 +1818,14 @@
Kind PK,
ObjCIvarDecl *ivar,
SourceLocation ivarLoc) {
- return new (C) ObjCPropertyImplDecl(DC, atLoc, L, property, PK, ivar,
- ivarLoc);
+ return new (C, DC) ObjCPropertyImplDecl(DC, atLoc, L, property, PK, ivar,
+ ivarLoc);
}
-ObjCPropertyImplDecl *ObjCPropertyImplDecl::CreateDeserialized(ASTContext &C,
+ObjCPropertyImplDecl *ObjCPropertyImplDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCPropertyImplDecl));
- return new (Mem) ObjCPropertyImplDecl(0, SourceLocation(), SourceLocation(),
- 0, Dynamic, 0, SourceLocation());
+ return new (C, ID) ObjCPropertyImplDecl(0, SourceLocation(), SourceLocation(),
+ 0, Dynamic, 0, SourceLocation());
}
SourceRange ObjCPropertyImplDecl::getSourceRange() const {
diff --git a/lib/AST/DeclOpenMP.cpp b/lib/AST/DeclOpenMP.cpp
index 0d195f7..37d4ae2 100644
--- a/lib/AST/DeclOpenMP.cpp
+++ b/lib/AST/DeclOpenMP.cpp
@@ -12,8 +12,8 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTContext.h"
-#include "clang/AST/DeclBase.h"
#include "clang/AST/Decl.h"
+#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/Expr.h"
@@ -29,12 +29,8 @@
DeclContext *DC,
SourceLocation L,
ArrayRef<Expr *> VL) {
- unsigned Size = sizeof(OMPThreadPrivateDecl) +
- (VL.size() * sizeof(Expr *));
-
- void *Mem = C.Allocate(Size, llvm::alignOf<OMPThreadPrivateDecl>());
- OMPThreadPrivateDecl *D = new (Mem) OMPThreadPrivateDecl(OMPThreadPrivate,
- DC, L);
+ OMPThreadPrivateDecl *D = new (C, DC, VL.size() * sizeof(Expr *))
+ OMPThreadPrivateDecl(OMPThreadPrivate, DC, L);
D->NumVars = VL.size();
D->setVars(VL);
return D;
@@ -43,11 +39,8 @@
OMPThreadPrivateDecl *OMPThreadPrivateDecl::CreateDeserialized(ASTContext &C,
unsigned ID,
unsigned N) {
- unsigned Size = sizeof(OMPThreadPrivateDecl) + (N * sizeof(Expr *));
-
- void *Mem = AllocateDeserializedDecl(C, ID, Size);
- OMPThreadPrivateDecl *D = new (Mem) OMPThreadPrivateDecl(OMPThreadPrivate,
- 0, SourceLocation());
+ OMPThreadPrivateDecl *D = new (C, ID, N * sizeof(Expr *))
+ OMPThreadPrivateDecl(OMPThreadPrivate, 0, SourceLocation());
D->NumVars = N;
return D;
}
diff --git a/lib/AST/DeclPrinter.cpp b/lib/AST/DeclPrinter.cpp
index 767f662..a57532c 100644
--- a/lib/AST/DeclPrinter.cpp
+++ b/lib/AST/DeclPrinter.cpp
@@ -114,7 +114,7 @@
else if (const ArrayType* ATy = dyn_cast<ArrayType>(BaseType))
BaseType = ATy->getElementType();
else if (const FunctionType* FTy = BaseType->getAs<FunctionType>())
- BaseType = FTy->getResultType();
+ BaseType = FTy->getReturnType();
else if (const VectorType *VTy = BaseType->getAs<VectorType>())
BaseType = VTy->getElementType();
else if (const ReferenceType *RTy = BaseType->getAs<ReferenceType>())
@@ -167,7 +167,7 @@
}
}
-void DeclContext::dumpDeclContext() const {
+LLVM_DUMP_METHOD void DeclContext::dumpDeclContext() const {
// Get the translation unit
const DeclContext *DC = this;
while (!DC->isTranslationUnit())
@@ -238,17 +238,6 @@
if (D->isImplicit())
continue;
- // FIXME: Ugly hack so we don't pretty-print the builtin declaration
- // of __builtin_va_list or __[u]int128_t. There should be some other way
- // to check that.
- if (NamedDecl *ND = dyn_cast<NamedDecl>(*D)) {
- if (IdentifierInfo *II = ND->getIdentifier()) {
- if (II->isStr("__builtin_va_list") ||
- II->isStr("__int128_t") || II->isStr("__uint128_t"))
- continue;
- }
- }
-
// The next bits of code handles stuff like "struct {int x;} a,b"; we're
// forced to merge the declarations because there's no other way to
// refer to the struct in question. This limited merging is safe without
@@ -396,6 +385,7 @@
void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
CXXConstructorDecl *CDecl = dyn_cast<CXXConstructorDecl>(D);
+ CXXConversionDecl *ConversionDecl = dyn_cast<CXXConversionDecl>(D);
if (!Policy.SuppressSpecifiers) {
switch (D->getStorageClass()) {
case SC_None: break;
@@ -409,7 +399,9 @@
if (D->isInlineSpecified()) Out << "inline ";
if (D->isVirtualAsWritten()) Out << "virtual ";
if (D->isModulePrivate()) Out << "__module_private__ ";
- if (CDecl && CDecl->isExplicitSpecified())
+ if (D->isConstexpr() && !D->isExplicitlyDefaulted()) Out << "constexpr ";
+ if ((CDecl && CDecl->isExplicitSpecified()) ||
+ (ConversionDecl && ConversionDecl->isExplicit()))
Out << "explicit ";
}
@@ -423,8 +415,7 @@
Ty = PT->getInnerType();
}
- if (isa<FunctionType>(Ty)) {
- const FunctionType *AFT = Ty->getAs<FunctionType>();
+ if (const FunctionType *AFT = Ty->getAs<FunctionType>()) {
const FunctionProtoType *FT = 0;
if (D->hasWrittenPrototype())
FT = dyn_cast<FunctionProtoType>(AFT);
@@ -459,6 +450,17 @@
Proto += " volatile";
if (FT->isRestrict())
Proto += " restrict";
+
+ switch (FT->getRefQualifier()) {
+ case RQ_None:
+ break;
+ case RQ_LValue:
+ Proto += " &";
+ break;
+ case RQ_RValue:
+ Proto += " &&";
+ break;
+ }
}
if (FT && FT->hasDynamicExceptionSpec()) {
@@ -488,10 +490,7 @@
if (CDecl) {
bool HasInitializerList = false;
- for (CXXConstructorDecl::init_const_iterator B = CDecl->init_begin(),
- E = CDecl->init_end();
- B != E; ++B) {
- CXXCtorInitializer *BMInitializer = (*B);
+ for (const auto *BMInitializer : CDecl->inits()) {
if (BMInitializer->isInClassMemberInitializer())
continue;
@@ -547,16 +546,18 @@
}
}
Out << ")";
+ if (BMInitializer->isPackExpansion())
+ Out << "...";
}
- if (!Proto.empty())
- Out << Proto;
- } else {
+ } else if (!ConversionDecl && !isa<CXXDestructorDecl>(D)) {
if (FT && FT->hasTrailingReturn()) {
Out << "auto " << Proto << " -> ";
Proto.clear();
}
- AFT->getResultType().print(Out, Policy, Proto);
+ AFT->getReturnType().print(Out, Policy, Proto);
+ Proto.clear();
}
+ Out << Proto;
} else {
Ty.print(Out, Policy, Proto);
}
@@ -884,10 +885,9 @@
void DeclPrinter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
if (PrintInstantiation) {
TemplateParameterList *Params = D->getTemplateParameters();
- for (FunctionTemplateDecl::spec_iterator I = D->spec_begin(), E = D->spec_end();
- I != E; ++I) {
- PrintTemplateParameters(Params, (*I)->getTemplateSpecializationArgs());
- Visit(*I);
+ for (auto *I : D->specializations()) {
+ PrintTemplateParameters(Params, I->getTemplateSpecializationArgs());
+ Visit(I);
}
}
@@ -897,10 +897,9 @@
void DeclPrinter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
if (PrintInstantiation) {
TemplateParameterList *Params = D->getTemplateParameters();
- for (ClassTemplateDecl::spec_iterator I = D->spec_begin(), E = D->spec_end();
- I != E; ++I) {
- PrintTemplateParameters(Params, &(*I)->getTemplateArgs());
- Visit(*I);
+ for (auto *I : D->specializations()) {
+ PrintTemplateParameters(Params, &I->getTemplateArgs());
+ Visit(I);
Out << '\n';
}
}
@@ -917,19 +916,19 @@
Out << "- ";
else
Out << "+ ";
- if (!OMD->getResultType().isNull())
- Out << '(' << OMD->getASTContext().getUnqualifiedObjCPointerType(OMD->getResultType()).
- getAsString(Policy) << ")";
+ if (!OMD->getReturnType().isNull())
+ Out << '(' << OMD->getASTContext()
+ .getUnqualifiedObjCPointerType(OMD->getReturnType())
+ .getAsString(Policy) << ")";
std::string name = OMD->getSelector().getAsString();
std::string::size_type pos, lastPos = 0;
- for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(),
- E = OMD->param_end(); PI != E; ++PI) {
+ for (const auto *PI : OMD->params()) {
// FIXME: selector is missing here!
pos = name.find_first_of(':', lastPos);
Out << " " << name.substr(lastPos, pos - lastPos);
- Out << ":(" << (*PI)->getASTContext().getUnqualifiedObjCPointerType((*PI)->getType()).
- getAsString(Policy) << ')' << **PI;
+ Out << ":(" << PI->getASTContext().getUnqualifiedObjCPointerType(PI->getType()).
+ getAsString(Policy) << ')' << *PI;
lastPos = pos + 1;
}
@@ -960,10 +959,9 @@
if (OID->ivar_size() > 0) {
Out << "{\n";
Indentation += Policy.Indentation;
- for (ObjCImplementationDecl::ivar_iterator I = OID->ivar_begin(),
- E = OID->ivar_end(); I != E; ++I) {
+ for (const auto *I : OID->ivars()) {
Indent() << I->getASTContext().getUnqualifiedObjCPointerType(I->getType()).
- getAsString(Policy) << ' ' << **I << ";\n";
+ getAsString(Policy) << ' ' << *I << ";\n";
}
Indentation -= Policy.Indentation;
Out << "}\n";
@@ -999,10 +997,10 @@
Out << "{\n";
eolnOut = true;
Indentation += Policy.Indentation;
- for (ObjCInterfaceDecl::ivar_iterator I = OID->ivar_begin(),
- E = OID->ivar_end(); I != E; ++I) {
- Indent() << I->getASTContext().getUnqualifiedObjCPointerType(I->getType()).
- getAsString(Policy) << ' ' << **I << ";\n";
+ for (const auto *I : OID->ivars()) {
+ Indent() << I->getASTContext()
+ .getUnqualifiedObjCPointerType(I->getType())
+ .getAsString(Policy) << ' ' << *I << ";\n";
}
Indentation -= Policy.Indentation;
Out << "}\n";
@@ -1051,11 +1049,9 @@
if (PID->ivar_size() > 0) {
Out << "{\n";
Indentation += Policy.Indentation;
- for (ObjCCategoryDecl::ivar_iterator I = PID->ivar_begin(),
- E = PID->ivar_end(); I != E; ++I) {
+ for (const auto *I : PID->ivars())
Indent() << I->getASTContext().getUnqualifiedObjCPointerType(I->getType()).
- getAsString(Policy) << ' ' << **I << ";\n";
- }
+ getAsString(Policy) << ' ' << *I << ";\n";
Indentation -= Policy.Indentation;
Out << "}\n";
}
@@ -1090,13 +1086,13 @@
}
if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_getter) {
- Out << (first ? ' ' : ',') << "getter = "
- << PDecl->getGetterName().getAsString();
+ Out << (first ? ' ' : ',') << "getter = ";
+ PDecl->getGetterName().print(Out);
first = false;
}
if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_setter) {
- Out << (first ? ' ' : ',') << "setter = "
- << PDecl->getSetterName().getAsString();
+ Out << (first ? ' ' : ',') << "setter = ";
+ PDecl->getSetterName().print(Out);
first = false;
}
diff --git a/lib/AST/DeclTemplate.cpp b/lib/AST/DeclTemplate.cpp
index 7172fb7..fc73e6f 100644
--- a/lib/AST/DeclTemplate.cpp
+++ b/lib/AST/DeclTemplate.cpp
@@ -229,14 +229,13 @@
TemplateParameterList *Params,
NamedDecl *Decl) {
AdoptTemplateParameterList(Params, cast<DeclContext>(Decl));
- return new (C) FunctionTemplateDecl(DC, L, Name, Params, Decl);
+ return new (C, DC) FunctionTemplateDecl(DC, L, Name, Params, Decl);
}
FunctionTemplateDecl *FunctionTemplateDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FunctionTemplateDecl));
- return new (Mem) FunctionTemplateDecl(0, SourceLocation(), DeclarationName(),
- 0, 0);
+ return new (C, ID) FunctionTemplateDecl(0, SourceLocation(), DeclarationName(),
+ 0, 0);
}
RedeclarableTemplateDecl::CommonBase *
@@ -308,15 +307,15 @@
NamedDecl *Decl,
ClassTemplateDecl *PrevDecl) {
AdoptTemplateParameterList(Params, cast<DeclContext>(Decl));
- ClassTemplateDecl *New = new (C) ClassTemplateDecl(DC, L, Name, Params, Decl);
+ ClassTemplateDecl *New =
+ new (C, DC) ClassTemplateDecl(DC, L, Name, Params, Decl);
New->setPreviousDecl(PrevDecl);
return New;
}
ClassTemplateDecl *ClassTemplateDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ClassTemplateDecl));
- return new (Mem) ClassTemplateDecl(EmptyShell());
+ return new (C, ID) ClassTemplateDecl(EmptyShell());
}
void ClassTemplateDecl::LoadLazySpecializations() const {
@@ -471,7 +470,7 @@
unsigned D, unsigned P, IdentifierInfo *Id,
bool Typename, bool ParameterPack) {
TemplateTypeParmDecl *TTPDecl =
- new (C) TemplateTypeParmDecl(DC, KeyLoc, NameLoc, Id, Typename);
+ new (C, DC) TemplateTypeParmDecl(DC, KeyLoc, NameLoc, Id, Typename);
QualType TTPType = C.getTemplateTypeParmType(D, P, ParameterPack, TTPDecl);
TTPDecl->TypeForDecl = TTPType.getTypePtr();
return TTPDecl;
@@ -479,9 +478,8 @@
TemplateTypeParmDecl *
TemplateTypeParmDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TemplateTypeParmDecl));
- return new (Mem) TemplateTypeParmDecl(0, SourceLocation(), SourceLocation(),
- 0, false);
+ return new (C, ID) TemplateTypeParmDecl(0, SourceLocation(), SourceLocation(),
+ 0, false);
}
SourceLocation TemplateTypeParmDecl::getDefaultArgumentLoc() const {
@@ -544,8 +542,8 @@
unsigned D, unsigned P, IdentifierInfo *Id,
QualType T, bool ParameterPack,
TypeSourceInfo *TInfo) {
- return new (C) NonTypeTemplateParmDecl(DC, StartLoc, IdLoc, D, P, Id,
- T, ParameterPack, TInfo);
+ return new (C, DC) NonTypeTemplateParmDecl(DC, StartLoc, IdLoc, D, P, Id,
+ T, ParameterPack, TInfo);
}
NonTypeTemplateParmDecl *
@@ -557,34 +555,26 @@
const QualType *ExpandedTypes,
unsigned NumExpandedTypes,
TypeSourceInfo **ExpandedTInfos) {
- unsigned Size = sizeof(NonTypeTemplateParmDecl)
- + NumExpandedTypes * 2 * sizeof(void*);
- void *Mem = C.Allocate(Size);
- return new (Mem) NonTypeTemplateParmDecl(DC, StartLoc, IdLoc,
- D, P, Id, T, TInfo,
- ExpandedTypes, NumExpandedTypes,
- ExpandedTInfos);
+ unsigned Extra = NumExpandedTypes * 2 * sizeof(void*);
+ return new (C, DC, Extra) NonTypeTemplateParmDecl(
+ DC, StartLoc, IdLoc, D, P, Id, T, TInfo,
+ ExpandedTypes, NumExpandedTypes, ExpandedTInfos);
}
NonTypeTemplateParmDecl *
NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(NonTypeTemplateParmDecl));
- return new (Mem) NonTypeTemplateParmDecl(0, SourceLocation(),
- SourceLocation(), 0, 0, 0,
- QualType(), false, 0);
+ return new (C, ID) NonTypeTemplateParmDecl(0, SourceLocation(),
+ SourceLocation(), 0, 0, 0,
+ QualType(), false, 0);
}
NonTypeTemplateParmDecl *
NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID,
unsigned NumExpandedTypes) {
- unsigned Size = sizeof(NonTypeTemplateParmDecl)
- + NumExpandedTypes * 2 * sizeof(void*);
-
- void *Mem = AllocateDeserializedDecl(C, ID, Size);
- return new (Mem) NonTypeTemplateParmDecl(0, SourceLocation(),
- SourceLocation(), 0, 0, 0,
- QualType(), 0, 0, NumExpandedTypes,
- 0);
+ unsigned Extra = NumExpandedTypes * 2 * sizeof(void*);
+ return new (C, ID, Extra) NonTypeTemplateParmDecl(
+ 0, SourceLocation(), SourceLocation(), 0, 0, 0, QualType(), 0,
+ 0, NumExpandedTypes, 0);
}
SourceRange NonTypeTemplateParmDecl::getSourceRange() const {
@@ -624,8 +614,8 @@
SourceLocation L, unsigned D, unsigned P,
bool ParameterPack, IdentifierInfo *Id,
TemplateParameterList *Params) {
- return new (C) TemplateTemplateParmDecl(DC, L, D, P, ParameterPack, Id,
- Params);
+ return new (C, DC) TemplateTemplateParmDecl(DC, L, D, P, ParameterPack, Id,
+ Params);
}
TemplateTemplateParmDecl *
@@ -634,28 +624,23 @@
IdentifierInfo *Id,
TemplateParameterList *Params,
ArrayRef<TemplateParameterList *> Expansions) {
- void *Mem = C.Allocate(sizeof(TemplateTemplateParmDecl) +
- sizeof(TemplateParameterList*) * Expansions.size());
- return new (Mem) TemplateTemplateParmDecl(DC, L, D, P, Id, Params,
- Expansions.size(),
- Expansions.data());
+ return new (C, DC, sizeof(TemplateParameterList*) * Expansions.size())
+ TemplateTemplateParmDecl(DC, L, D, P, Id, Params,
+ Expansions.size(), Expansions.data());
}
TemplateTemplateParmDecl *
TemplateTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TemplateTemplateParmDecl));
- return new (Mem) TemplateTemplateParmDecl(0, SourceLocation(), 0, 0, false,
- 0, 0);
+ return new (C, ID) TemplateTemplateParmDecl(0, SourceLocation(), 0, 0, false,
+ 0, 0);
}
TemplateTemplateParmDecl *
TemplateTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID,
unsigned NumExpansions) {
- unsigned Size = sizeof(TemplateTemplateParmDecl) +
- sizeof(TemplateParameterList*) * NumExpansions;
- void *Mem = AllocateDeserializedDecl(C, ID, Size);
- return new (Mem) TemplateTemplateParmDecl(0, SourceLocation(), 0, 0, 0, 0,
- NumExpansions, 0);
+ return new (C, ID, sizeof(TemplateParameterList*) * NumExpansions)
+ TemplateTemplateParmDecl(0, SourceLocation(), 0, 0, 0, 0,
+ NumExpansions, 0);
}
//===----------------------------------------------------------------------===//
@@ -734,13 +719,10 @@
const TemplateArgument *Args,
unsigned NumArgs,
ClassTemplateSpecializationDecl *PrevDecl) {
- ClassTemplateSpecializationDecl *Result
- = new (Context)ClassTemplateSpecializationDecl(Context,
- ClassTemplateSpecialization,
- TK, DC, StartLoc, IdLoc,
- SpecializedTemplate,
- Args, NumArgs,
- PrevDecl);
+ ClassTemplateSpecializationDecl *Result =
+ new (Context, DC) ClassTemplateSpecializationDecl(
+ Context, ClassTemplateSpecialization, TK, DC, StartLoc, IdLoc,
+ SpecializedTemplate, Args, NumArgs, PrevDecl);
Result->MayHaveOutOfDateDef = false;
Context.getTypeDeclType(Result, PrevDecl);
@@ -748,12 +730,10 @@
}
ClassTemplateSpecializationDecl *
-ClassTemplateSpecializationDecl::CreateDeserialized(ASTContext &C,
+ClassTemplateSpecializationDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID,
- sizeof(ClassTemplateSpecializationDecl));
ClassTemplateSpecializationDecl *Result =
- new (Mem) ClassTemplateSpecializationDecl(ClassTemplateSpecialization);
+ new (C, ID) ClassTemplateSpecializationDecl(ClassTemplateSpecialization);
Result->MayHaveOutOfDateDef = false;
return Result;
}
@@ -855,14 +835,10 @@
const ASTTemplateArgumentListInfo *ASTArgInfos =
ASTTemplateArgumentListInfo::Create(Context, ArgInfos);
- ClassTemplatePartialSpecializationDecl *Result
- = new (Context)ClassTemplatePartialSpecializationDecl(Context, TK, DC,
- StartLoc, IdLoc,
- Params,
- SpecializedTemplate,
- Args, NumArgs,
- ASTArgInfos,
- PrevDecl);
+ ClassTemplatePartialSpecializationDecl *Result = new (Context, DC)
+ ClassTemplatePartialSpecializationDecl(Context, TK, DC, StartLoc, IdLoc,
+ Params, SpecializedTemplate, Args,
+ NumArgs, ASTArgInfos, PrevDecl);
Result->setSpecializationKind(TSK_ExplicitSpecialization);
Result->MayHaveOutOfDateDef = false;
@@ -873,10 +849,8 @@
ClassTemplatePartialSpecializationDecl *
ClassTemplatePartialSpecializationDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID,
- sizeof(ClassTemplatePartialSpecializationDecl));
- ClassTemplatePartialSpecializationDecl *Result
- = new (Mem) ClassTemplatePartialSpecializationDecl();
+ ClassTemplatePartialSpecializationDecl *Result =
+ new (C, ID) ClassTemplatePartialSpecializationDecl();
Result->MayHaveOutOfDateDef = false;
return Result;
}
@@ -894,15 +868,13 @@
TemplateParameterList **Params,
FriendUnion Friend,
SourceLocation FLoc) {
- FriendTemplateDecl *Result
- = new (Context) FriendTemplateDecl(DC, L, NParams, Params, Friend, FLoc);
- return Result;
+ return new (Context, DC) FriendTemplateDecl(DC, L, NParams, Params,
+ Friend, FLoc);
}
FriendTemplateDecl *FriendTemplateDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FriendTemplateDecl));
- return new (Mem) FriendTemplateDecl(EmptyShell());
+ return new (C, ID) FriendTemplateDecl(EmptyShell());
}
//===----------------------------------------------------------------------===//
@@ -916,14 +888,13 @@
TemplateParameterList *Params,
NamedDecl *Decl) {
AdoptTemplateParameterList(Params, DC);
- return new (C) TypeAliasTemplateDecl(DC, L, Name, Params, Decl);
+ return new (C, DC) TypeAliasTemplateDecl(DC, L, Name, Params, Decl);
}
TypeAliasTemplateDecl *TypeAliasTemplateDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TypeAliasTemplateDecl));
- return new (Mem) TypeAliasTemplateDecl(0, SourceLocation(), DeclarationName(),
- 0, 0);
+ return new (C, ID) TypeAliasTemplateDecl(0, SourceLocation(), DeclarationName(),
+ 0, 0);
}
void TypeAliasTemplateDecl::DeallocateCommon(void *Ptr) {
@@ -945,10 +916,8 @@
ClassScopeFunctionSpecializationDecl *
ClassScopeFunctionSpecializationDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID,
- sizeof(ClassScopeFunctionSpecializationDecl));
- return new (Mem) ClassScopeFunctionSpecializationDecl(0, SourceLocation(), 0,
- false, TemplateArgumentListInfo());
+ return new (C, ID) ClassScopeFunctionSpecializationDecl(
+ 0, SourceLocation(), 0, false, TemplateArgumentListInfo());
}
//===----------------------------------------------------------------------===//
@@ -972,20 +941,16 @@
VarTemplateDecl *VarTemplateDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation L, DeclarationName Name,
TemplateParameterList *Params,
- NamedDecl *Decl,
- VarTemplateDecl *PrevDecl) {
- VarTemplateDecl *New = new (C) VarTemplateDecl(DC, L, Name, Params, Decl);
- New->setPreviousDecl(PrevDecl);
- return New;
+ VarDecl *Decl) {
+ return new (C, DC) VarTemplateDecl(DC, L, Name, Params, Decl);
}
VarTemplateDecl *VarTemplateDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
- void *Mem = AllocateDeserializedDecl(C, ID, sizeof(VarTemplateDecl));
- return new (Mem) VarTemplateDecl(EmptyShell());
+ return new (C, ID) VarTemplateDecl(EmptyShell());
}
-// TODO: Unify accross class, function and variable templates?
+// TODO: Unify across class, function and variable templates?
// May require moving this and Common to RedeclarableTemplateDecl.
void VarTemplateDecl::LoadLazySpecializations() const {
Common *CommonPtr = getCommonPtr();
@@ -1111,20 +1076,14 @@
SourceLocation IdLoc, VarTemplateDecl *SpecializedTemplate, QualType T,
TypeSourceInfo *TInfo, StorageClass S, const TemplateArgument *Args,
unsigned NumArgs) {
- VarTemplateSpecializationDecl *Result = new (Context)
- VarTemplateSpecializationDecl(Context, VarTemplateSpecialization, DC,
- StartLoc, IdLoc, SpecializedTemplate, T,
- TInfo, S, Args, NumArgs);
- return Result;
+ return new (Context, DC) VarTemplateSpecializationDecl(
+ Context, VarTemplateSpecialization, DC, StartLoc, IdLoc,
+ SpecializedTemplate, T, TInfo, S, Args, NumArgs);
}
VarTemplateSpecializationDecl *
VarTemplateSpecializationDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- void *Mem =
- AllocateDeserializedDecl(C, ID, sizeof(VarTemplateSpecializationDecl));
- VarTemplateSpecializationDecl *Result =
- new (Mem) VarTemplateSpecializationDecl(VarTemplateSpecialization);
- return Result;
+ return new (C, ID) VarTemplateSpecializationDecl(VarTemplateSpecialization);
}
void VarTemplateSpecializationDecl::getNameForDiagnostic(
@@ -1183,7 +1142,7 @@
= ASTTemplateArgumentListInfo::Create(Context, ArgInfos);
VarTemplatePartialSpecializationDecl *Result =
- new (Context) VarTemplatePartialSpecializationDecl(
+ new (Context, DC) VarTemplatePartialSpecializationDecl(
Context, DC, StartLoc, IdLoc, Params, SpecializedTemplate, T, TInfo,
S, Args, NumArgs, ASTArgInfos);
Result->setSpecializationKind(TSK_ExplicitSpecialization);
@@ -1193,9 +1152,5 @@
VarTemplatePartialSpecializationDecl *
VarTemplatePartialSpecializationDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
- void *Mem = AllocateDeserializedDecl(
- C, ID, sizeof(VarTemplatePartialSpecializationDecl));
- VarTemplatePartialSpecializationDecl *Result =
- new (Mem) VarTemplatePartialSpecializationDecl();
- return Result;
+ return new (C, ID) VarTemplatePartialSpecializationDecl();
}
diff --git a/lib/AST/DeclarationName.cpp b/lib/AST/DeclarationName.cpp
index e064e23..f9041c0 100644
--- a/lib/AST/DeclarationName.cpp
+++ b/lib/AST/DeclarationName.cpp
@@ -143,13 +143,16 @@
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCMultiArgSelector:
- return OS << N.getObjCSelector().getAsString();
+ N.getObjCSelector().print(OS);
+ return OS;
case DeclarationName::CXXConstructorName: {
QualType ClassType = N.getCXXNameType();
if (const RecordType *ClassRec = ClassType->getAs<RecordType>())
return OS << *ClassRec->getDecl();
- return OS << ClassType.getAsString();
+ LangOptions LO;
+ LO.CPlusPlus = true;
+ return OS << ClassType.getAsString(PrintingPolicy(LO));
}
case DeclarationName::CXXDestructorName: {
@@ -157,7 +160,9 @@
QualType Type = N.getCXXNameType();
if (const RecordType *Rec = Type->getAs<RecordType>())
return OS << *Rec->getDecl();
- return OS << Type.getAsString();
+ LangOptions LO;
+ LO.CPlusPlus = true;
+ return OS << Type.getAsString(PrintingPolicy(LO));
}
case DeclarationName::CXXOperatorName: {
@@ -184,7 +189,10 @@
QualType Type = N.getCXXNameType();
if (const RecordType *Rec = Type->getAs<RecordType>())
return OS << *Rec->getDecl();
- return OS << Type.getAsString();
+ LangOptions LO;
+ LO.CPlusPlus = true;
+ LO.Bool = true;
+ return OS << Type.getAsString(PrintingPolicy(LO));
}
case DeclarationName::CXXUsingDirective:
return OS << "<using-directive>";
@@ -537,7 +545,10 @@
OS << '~';
else if (Name.getNameKind() == DeclarationName::CXXConversionFunctionName)
OS << "operator ";
- OS << TInfo->getType().getAsString();
+ LangOptions LO;
+ LO.CPlusPlus = true;
+ LO.Bool = true;
+ OS << TInfo->getType().getAsString(PrintingPolicy(LO));
} else
OS << Name;
return;
diff --git a/lib/AST/Expr.cpp b/lib/AST/Expr.cpp
index 9055dda..2f5f14f 100644
--- a/lib/AST/Expr.cpp
+++ b/lib/AST/Expr.cpp
@@ -105,37 +105,6 @@
return E;
}
-const Expr *
-Expr::findMaterializedTemporary(const MaterializeTemporaryExpr *&MTE) const {
- const Expr *E = this;
-
- // This might be a default initializer for a reference member. Walk over the
- // wrapper node for that.
- if (const CXXDefaultInitExpr *DAE = dyn_cast<CXXDefaultInitExpr>(E))
- E = DAE->getExpr();
-
- // Look through single-element init lists that claim to be lvalues. They're
- // just syntactic wrappers in this case.
- if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E)) {
- if (ILE->getNumInits() == 1 && ILE->isGLValue()) {
- E = ILE->getInit(0);
- if (const CXXDefaultInitExpr *DAE = dyn_cast<CXXDefaultInitExpr>(E))
- E = DAE->getExpr();
- }
- }
-
- // Look through expressions for materialized temporaries (for now).
- if (const MaterializeTemporaryExpr *M
- = dyn_cast<MaterializeTemporaryExpr>(E)) {
- MTE = M;
- E = M->GetTemporaryExpr();
- }
-
- if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E))
- E = DAE->getExpr();
- return E;
-}
-
/// isKnownToHaveBooleanValue - Return true if this is an integer expression
/// that is known to return 0 or 1. This happens for _Bool/bool expressions
/// but also int expressions which are produced by things like comparisons in
@@ -484,7 +453,7 @@
if (IT == PredefinedExpr::FuncDName) {
if (const NamedDecl *ND = dyn_cast<NamedDecl>(CurrentDecl)) {
- OwningPtr<MangleContext> MC;
+ std::unique_ptr<MangleContext> MC;
MC.reset(Context.createMangleContext());
if (MC->shouldMangleDeclName(ND)) {
@@ -619,13 +588,15 @@
// not a constructor or destructor.
if ((isa<CXXMethodDecl>(FD) &&
cast<CXXMethodDecl>(FD)->getParent()->isLambda()) ||
- (FT && FT->getResultType()->getAs<AutoType>()))
+ (FT && FT->getReturnType()->getAs<AutoType>()))
Proto = "auto " + Proto;
- else if (FT && FT->getResultType()->getAs<DecltypeType>())
- FT->getResultType()->getAs<DecltypeType>()->getUnderlyingType()
+ else if (FT && FT->getReturnType()->getAs<DecltypeType>())
+ FT->getReturnType()
+ ->getAs<DecltypeType>()
+ ->getUnderlyingType()
.getAsStringInternal(Proto, Policy);
else if (!isa<CXXConstructorDecl>(FD) && !isa<CXXDestructorDecl>(FD))
- AFT->getResultType().getAsStringInternal(Proto, Policy);
+ AFT->getReturnType().getAsStringInternal(Proto, Policy);
Out << Proto;
@@ -658,7 +629,7 @@
Out << '(' << *CID << ')';
Out << ' ';
- Out << MD->getSelector().getAsString();
+ MD->getSelector().print(Out);
Out << ']';
Out.flush();
@@ -810,6 +781,9 @@
StringKind Kind, bool Pascal, QualType Ty,
const SourceLocation *Loc,
unsigned NumStrs) {
+ assert(C.getAsConstantArrayType(Ty) &&
+ "StringLiteral must be of constant array type!");
+
// Allocate enough space for the StringLiteral plus an array of locations for
// any concatenated string tokens.
void *Mem = C.Allocate(sizeof(StringLiteral)+
@@ -1215,9 +1189,9 @@
this->NumArgs = NumArgs;
}
-/// isBuiltinCall - If this is a call to a builtin, return the builtin ID. If
+/// getBuiltinCallee - If this is a call to a builtin, return the builtin ID. If
/// not, return 0.
-unsigned CallExpr::isBuiltinCall() const {
+unsigned CallExpr::getBuiltinCallee() const {
// All simple function calls (e.g. func()) are implicitly cast to pointer to
// function. As a result, we try and obtain the DeclRefExpr from the
// ImplicitCastExpr.
@@ -1240,7 +1214,7 @@
}
bool CallExpr::isUnevaluatedBuiltinCall(ASTContext &Ctx) const {
- if (unsigned BI = isBuiltinCall())
+ if (unsigned BI = getBuiltinCallee())
return Ctx.BuiltinInfo.isUnevaluated(BI);
return false;
}
@@ -1256,7 +1230,7 @@
CalleeType = Expr::findBoundMemberType(getCallee());
const FunctionType *FnType = CalleeType->castAs<FunctionType>();
- return FnType->getResultType();
+ return FnType->getReturnType();
}
SourceLocation CallExpr::getLocStart() const {
@@ -1421,7 +1395,7 @@
return EndLoc;
}
-void CastExpr::CheckCastConsistency() const {
+bool CastExpr::CastConsistency() const {
switch (getCastKind()) {
case CK_DerivedToBase:
case CK_UncheckedDerivedToBase:
@@ -1474,6 +1448,11 @@
assert(getSubExpr()->getType()->isFunctionType());
goto CheckNoBasePath;
+ case CK_AddressSpaceConversion:
+ assert(getType()->isPointerType());
+ assert(getSubExpr()->getType()->isPointerType());
+ assert(getType()->getPointeeType().getAddressSpace() !=
+ getSubExpr()->getType()->getPointeeType().getAddressSpace());
// These should not have an inheritance path.
case CK_Dynamic:
case CK_ToUnion:
@@ -1524,6 +1503,7 @@
assert(path_empty() && "Cast kind should not have a base path!");
break;
}
+ return true;
}
const char *CastExpr::getCastKindName() const {
@@ -1625,7 +1605,7 @@
case CK_ARCReclaimReturnedObject:
return "ARCReclaimReturnedObject";
case CK_ARCExtendBlockObject:
- return "ARCCExtendBlockObject";
+ return "ARCExtendBlockObject";
case CK_AtomicToNonAtomic:
return "AtomicToNonAtomic";
case CK_NonAtomicToAtomic:
@@ -1636,6 +1616,8 @@
return "BuiltinFnToFnPtr";
case CK_ZeroToOCLEvent:
return "ZeroToOCLEvent";
+ case CK_AddressSpaceConversion:
+ return "AddressSpaceConversion";
}
llvm_unreachable("Unhandled cast kind!");
@@ -1867,12 +1849,12 @@
Expr *InitListExpr::updateInit(const ASTContext &C, unsigned Init, Expr *expr) {
if (Init >= InitExprs.size()) {
InitExprs.insert(C, InitExprs.end(), Init - InitExprs.size() + 1, 0);
- InitExprs.back() = expr;
+ setInit(Init, expr);
return 0;
}
Expr *Result = cast_or_null<Expr>(InitExprs[Init]);
- InitExprs[Init] = expr;
+ setInit(Init, expr);
return Result;
}
@@ -1892,7 +1874,11 @@
const ArrayType *AT = getType()->getAsArrayTypeUnsafe();
if (!AT || !AT->getElementType()->isIntegerType())
return false;
- const Expr *Init = getInit(0)->IgnoreParens();
+ // It is possible for getInit() to return null.
+ const Expr *Init = getInit(0);
+ if (!Init)
+ return false;
+ Init = Init->IgnoreParens();
return isa<StringLiteral>(Init) || isa<ObjCEncodeExpr>(Init);
}
@@ -2078,15 +2064,22 @@
return true;
case CXXOperatorCallExprClass: {
- // We warn about operator== and operator!= even when user-defined operator
+ // Warn about operator ==,!=,<,>,<=, and >= even when user-defined operator
// overloads as there is no reasonable way to define these such that they
// have non-trivial, desirable side-effects. See the -Wunused-comparison
- // warning: these operators are commonly typo'ed, and so warning on them
+ // warning: operators == and != are commonly typo'ed, and so warning on them
// provides additional value as well. If this list is updated,
// DiagnoseUnusedComparison should be as well.
const CXXOperatorCallExpr *Op = cast<CXXOperatorCallExpr>(this);
- if (Op->getOperator() == OO_EqualEqual ||
- Op->getOperator() == OO_ExclaimEqual) {
+ switch (Op->getOperator()) {
+ default:
+ break;
+ case OO_EqualEqual:
+ case OO_ExclaimEqual:
+ case OO_Less:
+ case OO_Greater:
+ case OO_GreaterEqual:
+ case OO_LessEqual:
WarnE = this;
Loc = Op->getOperatorLoc();
R1 = Op->getSourceRange();
@@ -2106,8 +2099,8 @@
//
// Note: If new cases are added here, DiagnoseUnusedExprResult should be
// updated to match for QoI.
- if (FD->getAttr<WarnUnusedResultAttr>() ||
- FD->getAttr<PureAttr>() || FD->getAttr<ConstAttr>()) {
+ if (FD->hasAttr<WarnUnusedResultAttr>() ||
+ FD->hasAttr<PureAttr>() || FD->hasAttr<ConstAttr>()) {
WarnE = this;
Loc = CE->getCallee()->getLocStart();
R1 = CE->getCallee()->getSourceRange();
@@ -2152,7 +2145,7 @@
}
const ObjCMethodDecl *MD = ME->getMethodDecl();
- if (MD && MD->getAttr<WarnUnusedResultAttr>()) {
+ if (MD && MD->hasAttr<WarnUnusedResultAttr>()) {
WarnE = this;
Loc = getExprLoc();
return true;
@@ -2821,8 +2814,6 @@
case CXXThisExprClass:
case CXXScalarValueInitExprClass:
case TypeTraitExprClass:
- case UnaryTypeTraitExprClass:
- case BinaryTypeTraitExprClass:
case ArrayTypeTraitExprClass:
case ExpressionTraitExprClass:
case CXXNoexceptExprClass:
@@ -3055,7 +3046,7 @@
Expr::isNullPointerConstant(ASTContext &Ctx,
NullPointerConstantValueDependence NPC) const {
if (isValueDependent() &&
- (!Ctx.getLangOpts().CPlusPlus11 || Ctx.getLangOpts().MicrosoftMode)) {
+ (!Ctx.getLangOpts().CPlusPlus11 || Ctx.getLangOpts().MSVCCompat)) {
switch (NPC) {
case NPC_NeverValueDependent:
llvm_unreachable("Unexpected value dependent expression!");
@@ -3141,8 +3132,7 @@
const IntegerLiteral *Lit = dyn_cast<IntegerLiteral>(this);
if (Lit && !Lit->getValue())
return NPCK_ZeroLiteral;
- else if (!Ctx.getLangOpts().MicrosoftMode ||
- !isCXX98IntegralConstantExpr(Ctx))
+ else if (!Ctx.getLangOpts().MSVCCompat || !isCXX98IntegralConstantExpr(Ctx))
return NPCK_NotNull;
} else {
// If we have an integer constant expression, we need to *evaluate* it and
@@ -3806,30 +3796,21 @@
Expr *DesignatedInitExpr::getArrayIndex(const Designator& D) const {
assert(D.Kind == Designator::ArrayDesignator && "Requires array designator");
- char *Ptr = static_cast<char *>(
- const_cast<void *>(static_cast<const void *>(this)));
- Ptr += sizeof(DesignatedInitExpr);
- Stmt **SubExprs = reinterpret_cast<Stmt**>(reinterpret_cast<void**>(Ptr));
+ Stmt *const *SubExprs = reinterpret_cast<Stmt *const *>(this + 1);
return cast<Expr>(*(SubExprs + D.ArrayOrRange.Index + 1));
}
Expr *DesignatedInitExpr::getArrayRangeStart(const Designator &D) const {
assert(D.Kind == Designator::ArrayRangeDesignator &&
"Requires array range designator");
- char *Ptr = static_cast<char *>(
- const_cast<void *>(static_cast<const void *>(this)));
- Ptr += sizeof(DesignatedInitExpr);
- Stmt **SubExprs = reinterpret_cast<Stmt**>(reinterpret_cast<void**>(Ptr));
+ Stmt *const *SubExprs = reinterpret_cast<Stmt *const *>(this + 1);
return cast<Expr>(*(SubExprs + D.ArrayOrRange.Index + 1));
}
Expr *DesignatedInitExpr::getArrayRangeEnd(const Designator &D) const {
assert(D.Kind == Designator::ArrayRangeDesignator &&
"Requires array range designator");
- char *Ptr = static_cast<char *>(
- const_cast<void *>(static_cast<const void *>(this)));
- Ptr += sizeof(DesignatedInitExpr);
- Stmt **SubExprs = reinterpret_cast<Stmt**>(reinterpret_cast<void**>(Ptr));
+ Stmt *const *SubExprs = reinterpret_cast<Stmt *const *>(this + 1);
return cast<Expr>(*(SubExprs + D.ArrayOrRange.Index + 2));
}
diff --git a/lib/AST/ExprCXX.cpp b/lib/AST/ExprCXX.cpp
index 3738c0e..ee49925 100644
--- a/lib/AST/ExprCXX.cpp
+++ b/lib/AST/ExprCXX.cpp
@@ -108,10 +108,8 @@
return UuidForRD;
}
- for (CXXRecordDecl::redecl_iterator I = RD->redecls_begin(),
- E = RD->redecls_end();
- I != E; ++I)
- if (UuidAttr *Uuid = I->getAttr<UuidAttr>())
+ for (auto I : RD->redecls())
+ if (auto Uuid = I->getAttr<UuidAttr>())
return Uuid;
return 0;
@@ -1291,16 +1289,11 @@
NamedDecl *decl = *begin;
if (isa<UnresolvedUsingValueDecl>(decl))
return false;
- if (isa<UsingShadowDecl>(decl))
- decl = cast<UsingShadowDecl>(decl)->getUnderlyingDecl();
// Unresolved member expressions should only contain methods and
// method templates.
- assert(isa<CXXMethodDecl>(decl) || isa<FunctionTemplateDecl>(decl));
-
- if (isa<FunctionTemplateDecl>(decl))
- decl = cast<FunctionTemplateDecl>(decl)->getTemplatedDecl();
- if (cast<CXXMethodDecl>(decl)->isStatic())
+ if (cast<CXXMethodDecl>(decl->getUnderlyingDecl()->getAsFunction())
+ ->isStatic())
return false;
} while (++begin != end);
diff --git a/lib/AST/ExprClassification.cpp b/lib/AST/ExprClassification.cpp
index 54f77ef..55b9f13 100644
--- a/lib/AST/ExprClassification.cpp
+++ b/lib/AST/ExprClassification.cpp
@@ -165,8 +165,6 @@
case Expr::FloatingLiteralClass:
case Expr::CXXNoexceptExprClass:
case Expr::CXXScalarValueInitExprClass:
- case Expr::UnaryTypeTraitExprClass:
- case Expr::BinaryTypeTraitExprClass:
case Expr::TypeTraitExprClass:
case Expr::ArrayTypeTraitExprClass:
case Expr::ExpressionTraitExprClass:
@@ -348,7 +346,7 @@
case Expr::ObjCMessageExprClass:
if (const ObjCMethodDecl *Method =
cast<ObjCMessageExpr>(E)->getMethodDecl()) {
- Cl::Kinds kind = ClassifyUnnamed(Ctx, Method->getResultType());
+ Cl::Kinds kind = ClassifyUnnamed(Ctx, Method->getReturnType());
return (kind == Cl::CL_PRValue) ? Cl::CL_ObjCMessageRValue : kind;
}
return Cl::CL_PRValue;
@@ -543,10 +541,21 @@
"This is only relevant for C++.");
// C++ [expr.cond]p2
- // If either the second or the third operand has type (cv) void, [...]
- // the result [...] is a prvalue.
- if (True->getType()->isVoidType() || False->getType()->isVoidType())
+ // If either the second or the third operand has type (cv) void,
+ // one of the following shall hold:
+ if (True->getType()->isVoidType() || False->getType()->isVoidType()) {
+ // The second or the third operand (but not both) is a (possibly
+ // parenthesized) throw-expression; the result is of the [...] value
+ // category of the other.
+ bool TrueIsThrow = isa<CXXThrowExpr>(True->IgnoreParenImpCasts());
+ bool FalseIsThrow = isa<CXXThrowExpr>(False->IgnoreParenImpCasts());
+ if (const Expr *NonThrow = TrueIsThrow ? (FalseIsThrow ? 0 : False)
+ : (FalseIsThrow ? True : 0))
+ return ClassifyInternal(Ctx, NonThrow);
+
+ // [Otherwise] the result [...] is a prvalue.
return Cl::CL_PRValue;
+ }
// Note that at this point, we have already performed all conversions
// according to [expr.cond]p3.
diff --git a/lib/AST/ExprConstant.cpp b/lib/AST/ExprConstant.cpp
index 390cfe9..9c69080 100644
--- a/lib/AST/ExprConstant.cpp
+++ b/lib/AST/ExprConstant.cpp
@@ -474,13 +474,30 @@
/// Evaluate in any way we know how. Don't worry about side-effects that
/// can't be modeled.
- EM_IgnoreSideEffects
+ EM_IgnoreSideEffects,
+
+ /// Evaluate as a constant expression. Stop if we find that the expression
+ /// is not a constant expression. Some expressions can be retried in the
+ /// optimizer if we don't constant fold them here, but in an unevaluated
+ /// context we try to fold them immediately since the optimizer never
+ /// gets a chance to look at it.
+ EM_ConstantExpressionUnevaluated,
+
+ /// Evaluate as a potential constant expression. Keep going if we hit a
+ /// construct that we can't evaluate yet (because we don't yet know the
+ /// value of something) but stop if we hit something that could never be
+ /// a constant expression. Some expressions can be retried in the
+ /// optimizer if we don't constant fold them here, but in an unevaluated
+ /// context we try to fold them immediately since the optimizer never
+ /// gets a chance to look at it.
+ EM_PotentialConstantExpressionUnevaluated
} EvalMode;
/// Are we checking whether the expression is a potential constant
/// expression?
bool checkingPotentialConstantExpression() const {
- return EvalMode == EM_PotentialConstantExpression;
+ return EvalMode == EM_PotentialConstantExpression ||
+ EvalMode == EM_PotentialConstantExpressionUnevaluated;
}
/// Are we checking an expression for overflow?
@@ -573,6 +590,8 @@
// some later problem.
case EM_ConstantExpression:
case EM_PotentialConstantExpression:
+ case EM_ConstantExpressionUnevaluated:
+ case EM_PotentialConstantExpressionUnevaluated:
HasActiveDiagnostic = false;
return OptionalDiagnostic();
}
@@ -644,11 +663,13 @@
bool keepEvaluatingAfterSideEffect() {
switch (EvalMode) {
case EM_PotentialConstantExpression:
+ case EM_PotentialConstantExpressionUnevaluated:
case EM_EvaluateForOverflow:
case EM_IgnoreSideEffects:
return true;
case EM_ConstantExpression:
+ case EM_ConstantExpressionUnevaluated:
case EM_ConstantFold:
return false;
}
@@ -670,10 +691,12 @@
switch (EvalMode) {
case EM_PotentialConstantExpression:
+ case EM_PotentialConstantExpressionUnevaluated:
case EM_EvaluateForOverflow:
return true;
case EM_ConstantExpression:
+ case EM_ConstantExpressionUnevaluated:
case EM_ConstantFold:
case EM_IgnoreSideEffects:
return false;
@@ -696,7 +719,9 @@
Info.EvalStatus.Diag->empty() &&
!Info.EvalStatus.HasSideEffects),
OldMode(Info.EvalMode) {
- if (Enabled && Info.EvalMode == EvalInfo::EM_ConstantExpression)
+ if (Enabled &&
+ (Info.EvalMode == EvalInfo::EM_ConstantExpression ||
+ Info.EvalMode == EvalInfo::EM_ConstantExpressionUnevaluated))
Info.EvalMode = EvalInfo::EM_ConstantFold;
}
void keepDiagnostics() { Enabled = false; }
@@ -1141,7 +1166,7 @@
/// Should this call expression be treated as a string literal?
static bool IsStringLiteralCall(const CallExpr *E) {
- unsigned Builtin = E->isBuiltinCall();
+ unsigned Builtin = E->getBuiltinCallee();
return (Builtin == Builtin::BI__builtin___CFStringMakeConstantString ||
Builtin == Builtin::BI__builtin___NSStringMakeConstantString);
}
@@ -1338,8 +1363,7 @@
return false;
}
}
- for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
- I != E; ++I) {
+ for (const auto *I : RD->fields()) {
if (!CheckConstantExpression(Info, DiagLoc, I->getType(),
Value.getStructField(I->getFieldIndex())))
return false;
@@ -1807,9 +1831,8 @@
static bool HandleLValueIndirectMember(EvalInfo &Info, const Expr *E,
LValue &LVal,
const IndirectFieldDecl *IFD) {
- for (IndirectFieldDecl::chain_iterator C = IFD->chain_begin(),
- CE = IFD->chain_end(); C != CE; ++C)
- if (!HandleLValueMember(Info, E, LVal, cast<FieldDecl>(*C)))
+ for (const auto *C : IFD->chain())
+ if (!HandleLValueMember(Info, E, LVal, cast<FieldDecl>(C)))
return false;
return true;
}
@@ -3092,14 +3115,18 @@
Result.set(VD, Info.CurrentCall->Index);
APValue &Val = Info.CurrentCall->createTemporary(VD, true);
- if (!VD->getInit()) {
+ const Expr *InitE = VD->getInit();
+ if (!InitE) {
Info.Diag(D->getLocStart(), diag::note_constexpr_uninitialized)
<< false << VD->getType();
Val = APValue();
return false;
}
- if (!EvaluateInPlace(Val, Info, Result, VD->getInit())) {
+ if (InitE->isValueDependent())
+ return false;
+
+ if (!EvaluateInPlace(Val, Info, Result, InitE)) {
// Wipe out any partially-computed value, to allow tracking that this
// evaluation failed.
Val = APValue();
@@ -3291,13 +3318,12 @@
case Stmt::DeclStmtClass: {
const DeclStmt *DS = cast<DeclStmt>(S);
- for (DeclStmt::const_decl_iterator DclIt = DS->decl_begin(),
- DclEnd = DS->decl_end(); DclIt != DclEnd; ++DclIt) {
+ for (const auto *DclIt : DS->decls()) {
// Each declaration initialization is its own full-expression.
// FIXME: This isn't quite right; if we're performing aggregate
// initialization, each braced subexpression is its own full-expression.
FullExpressionRAII Scope(Info);
- if (!EvaluateDecl(Info, *DclIt) && !Info.keepEvaluatingAfterFailure())
+ if (!EvaluateDecl(Info, DclIt) && !Info.keepEvaluatingAfterFailure())
return ESR_Failed;
}
return ESR_Succeeded;
@@ -3315,9 +3341,8 @@
BlockScopeRAII Scope(Info);
const CompoundStmt *CS = cast<CompoundStmt>(S);
- for (CompoundStmt::const_body_iterator BI = CS->body_begin(),
- BE = CS->body_end(); BI != BE; ++BI) {
- EvalStmtResult ESR = EvaluateStmt(Result, Info, *BI, Case);
+ for (const auto *BI : CS->body()) {
+ EvalStmtResult ESR = EvaluateStmt(Result, Info, BI, Case);
if (ESR == ESR_Succeeded)
Case = 0;
else if (ESR != ESR_CaseNotFound)
@@ -3593,7 +3618,7 @@
EvalStmtResult ESR = EvaluateStmt(Result, Info, Body);
if (ESR == ESR_Succeeded) {
- if (Callee->getResultType()->isVoidType())
+ if (Callee->getReturnType()->isVoidType())
return true;
Info.Diag(Callee->getLocEnd(), diag::note_constexpr_no_return);
}
@@ -3659,15 +3684,14 @@
#ifndef NDEBUG
CXXRecordDecl::base_class_const_iterator BaseIt = RD->bases_begin();
#endif
- for (CXXConstructorDecl::init_const_iterator I = Definition->init_begin(),
- E = Definition->init_end(); I != E; ++I) {
+ for (const auto *I : Definition->inits()) {
LValue Subobject = This;
APValue *Value = &Result;
// Determine the subobject to initialize.
FieldDecl *FD = 0;
- if ((*I)->isBaseInitializer()) {
- QualType BaseType((*I)->getBaseClass(), 0);
+ if (I->isBaseInitializer()) {
+ QualType BaseType(I->getBaseClass(), 0);
#ifndef NDEBUG
// Non-virtual base classes are initialized in the order in the class
// definition. We have already checked for virtual base classes.
@@ -3676,12 +3700,12 @@
"base class initializers not in expected order");
++BaseIt;
#endif
- if (!HandleLValueDirectBase(Info, (*I)->getInit(), Subobject, RD,
+ if (!HandleLValueDirectBase(Info, I->getInit(), Subobject, RD,
BaseType->getAsCXXRecordDecl(), &Layout))
return false;
Value = &Result.getStructBase(BasesSeen++);
- } else if ((FD = (*I)->getMember())) {
- if (!HandleLValueMember(Info, (*I)->getInit(), Subobject, FD, &Layout))
+ } else if ((FD = I->getMember())) {
+ if (!HandleLValueMember(Info, I->getInit(), Subobject, FD, &Layout))
return false;
if (RD->isUnion()) {
Result = APValue(FD);
@@ -3689,13 +3713,11 @@
} else {
Value = &Result.getStructField(FD->getFieldIndex());
}
- } else if (IndirectFieldDecl *IFD = (*I)->getIndirectMember()) {
+ } else if (IndirectFieldDecl *IFD = I->getIndirectMember()) {
// Walk the indirect field decl's chain to find the object to initialize,
// and make sure we've initialized every step along it.
- for (IndirectFieldDecl::chain_iterator C = IFD->chain_begin(),
- CE = IFD->chain_end();
- C != CE; ++C) {
- FD = cast<FieldDecl>(*C);
+ for (auto *C : IFD->chain()) {
+ FD = cast<FieldDecl>(C);
CXXRecordDecl *CD = cast<CXXRecordDecl>(FD->getParent());
// Switch the union field if it differs. This happens if we had
// preceding zero-initialization, and we're now initializing a union
@@ -3710,7 +3732,7 @@
*Value = APValue(APValue::UninitStruct(), CD->getNumBases(),
std::distance(CD->field_begin(), CD->field_end()));
}
- if (!HandleLValueMember(Info, (*I)->getInit(), Subobject, FD))
+ if (!HandleLValueMember(Info, I->getInit(), Subobject, FD))
return false;
if (CD->isUnion())
Value = &Value->getUnionValue();
@@ -3722,8 +3744,8 @@
}
FullExpressionRAII InitScope(Info);
- if (!EvaluateInPlace(*Value, Info, Subobject, (*I)->getInit()) ||
- (FD && FD->isBitField() && !truncateBitfieldValue(Info, (*I)->getInit(),
+ if (!EvaluateInPlace(*Value, Info, Subobject, I->getInit()) ||
+ (FD && FD->isBitField() && !truncateBitfieldValue(Info, I->getInit(),
*Value, FD))) {
// If we're checking for a potential constant expression, evaluate all
// initializers even if some of them fail.
@@ -3742,15 +3764,14 @@
//===----------------------------------------------------------------------===//
namespace {
-// FIXME: RetTy is always bool. Remove it.
-template <class Derived, typename RetTy=bool>
+template <class Derived>
class ExprEvaluatorBase
- : public ConstStmtVisitor<Derived, RetTy> {
+ : public ConstStmtVisitor<Derived, bool> {
private:
- RetTy DerivedSuccess(const APValue &V, const Expr *E) {
+ bool DerivedSuccess(const APValue &V, const Expr *E) {
return static_cast<Derived*>(this)->Success(V, E);
}
- RetTy DerivedZeroInitialization(const Expr *E) {
+ bool DerivedZeroInitialization(const Expr *E) {
return static_cast<Derived*>(this)->ZeroInitialization(E);
}
@@ -3795,14 +3816,14 @@
protected:
EvalInfo &Info;
- typedef ConstStmtVisitor<Derived, RetTy> StmtVisitorTy;
+ typedef ConstStmtVisitor<Derived, bool> StmtVisitorTy;
typedef ExprEvaluatorBase ExprEvaluatorBaseTy;
OptionalDiagnostic CCEDiag(const Expr *E, diag::kind D) {
return Info.CCEDiag(E, D);
}
- RetTy ZeroInitialization(const Expr *E) { return Error(E); }
+ bool ZeroInitialization(const Expr *E) { return Error(E); }
public:
ExprEvaluatorBase(EvalInfo &Info) : Info(Info) {}
@@ -3819,28 +3840,28 @@
return Error(E, diag::note_invalid_subexpr_in_const_expr);
}
- RetTy VisitStmt(const Stmt *) {
+ bool VisitStmt(const Stmt *) {
llvm_unreachable("Expression evaluator should not be called on stmts");
}
- RetTy VisitExpr(const Expr *E) {
+ bool VisitExpr(const Expr *E) {
return Error(E);
}
- RetTy VisitParenExpr(const ParenExpr *E)
+ bool VisitParenExpr(const ParenExpr *E)
{ return StmtVisitorTy::Visit(E->getSubExpr()); }
- RetTy VisitUnaryExtension(const UnaryOperator *E)
+ bool VisitUnaryExtension(const UnaryOperator *E)
{ return StmtVisitorTy::Visit(E->getSubExpr()); }
- RetTy VisitUnaryPlus(const UnaryOperator *E)
+ bool VisitUnaryPlus(const UnaryOperator *E)
{ return StmtVisitorTy::Visit(E->getSubExpr()); }
- RetTy VisitChooseExpr(const ChooseExpr *E)
+ bool VisitChooseExpr(const ChooseExpr *E)
{ return StmtVisitorTy::Visit(E->getChosenSubExpr()); }
- RetTy VisitGenericSelectionExpr(const GenericSelectionExpr *E)
+ bool VisitGenericSelectionExpr(const GenericSelectionExpr *E)
{ return StmtVisitorTy::Visit(E->getResultExpr()); }
- RetTy VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr *E)
+ bool VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr *E)
{ return StmtVisitorTy::Visit(E->getReplacement()); }
- RetTy VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *E)
+ bool VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *E)
{ return StmtVisitorTy::Visit(E->getExpr()); }
- RetTy VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *E) {
+ bool VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *E) {
// The initializer may not have been parsed yet, or might be erroneous.
if (!E->getExpr())
return Error(E);
@@ -3848,19 +3869,19 @@
}
// We cannot create any objects for which cleanups are required, so there is
// nothing to do here; all cleanups must come from unevaluated subexpressions.
- RetTy VisitExprWithCleanups(const ExprWithCleanups *E)
+ bool VisitExprWithCleanups(const ExprWithCleanups *E)
{ return StmtVisitorTy::Visit(E->getSubExpr()); }
- RetTy VisitCXXReinterpretCastExpr(const CXXReinterpretCastExpr *E) {
+ bool VisitCXXReinterpretCastExpr(const CXXReinterpretCastExpr *E) {
CCEDiag(E, diag::note_constexpr_invalid_cast) << 0;
return static_cast<Derived*>(this)->VisitCastExpr(E);
}
- RetTy VisitCXXDynamicCastExpr(const CXXDynamicCastExpr *E) {
+ bool VisitCXXDynamicCastExpr(const CXXDynamicCastExpr *E) {
CCEDiag(E, diag::note_constexpr_invalid_cast) << 1;
return static_cast<Derived*>(this)->VisitCastExpr(E);
}
- RetTy VisitBinaryOperator(const BinaryOperator *E) {
+ bool VisitBinaryOperator(const BinaryOperator *E) {
switch (E->getOpcode()) {
default:
return Error(E);
@@ -3882,7 +3903,7 @@
}
}
- RetTy VisitBinaryConditionalOperator(const BinaryConditionalOperator *E) {
+ bool VisitBinaryConditionalOperator(const BinaryConditionalOperator *E) {
// Evaluate and cache the common expression. We treat it as a temporary,
// even though it's not quite the same thing.
if (!Evaluate(Info.CurrentCall->createTemporary(E->getOpaqueValue(), false),
@@ -3892,7 +3913,7 @@
return HandleConditionalOperator(E);
}
- RetTy VisitConditionalOperator(const ConditionalOperator *E) {
+ bool VisitConditionalOperator(const ConditionalOperator *E) {
bool IsBcpCall = false;
// If the condition (ignoring parens) is a __builtin_constant_p call,
// the result is a constant expression if it can be folded without
@@ -3900,7 +3921,7 @@
// for discussion.
if (const CallExpr *CallCE =
dyn_cast<CallExpr>(E->getCond()->IgnoreParenCasts()))
- if (CallCE->isBuiltinCall() == Builtin::BI__builtin_constant_p)
+ if (CallCE->getBuiltinCallee() == Builtin::BI__builtin_constant_p)
IsBcpCall = true;
// Always assume __builtin_constant_p(...) ? ... : ... is a potential
@@ -3917,7 +3938,7 @@
return true;
}
- RetTy VisitOpaqueValueExpr(const OpaqueValueExpr *E) {
+ bool VisitOpaqueValueExpr(const OpaqueValueExpr *E) {
if (APValue *Value = Info.CurrentCall->getTemporary(E))
return DerivedSuccess(*Value, E);
@@ -3931,7 +3952,7 @@
return StmtVisitorTy::Visit(Source);
}
- RetTy VisitCallExpr(const CallExpr *E) {
+ bool VisitCallExpr(const CallExpr *E) {
const Expr *Callee = E->getCallee()->IgnoreParens();
QualType CalleeType = Callee->getType();
@@ -4016,28 +4037,28 @@
return DerivedSuccess(Result, E);
}
- RetTy VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
+ bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
return StmtVisitorTy::Visit(E->getInitializer());
}
- RetTy VisitInitListExpr(const InitListExpr *E) {
+ bool VisitInitListExpr(const InitListExpr *E) {
if (E->getNumInits() == 0)
return DerivedZeroInitialization(E);
if (E->getNumInits() == 1)
return StmtVisitorTy::Visit(E->getInit(0));
return Error(E);
}
- RetTy VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
+ bool VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
return DerivedZeroInitialization(E);
}
- RetTy VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
+ bool VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
return DerivedZeroInitialization(E);
}
- RetTy VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
+ bool VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
return DerivedZeroInitialization(E);
}
/// A member expression where the object is a prvalue is itself a prvalue.
- RetTy VisitMemberExpr(const MemberExpr *E) {
+ bool VisitMemberExpr(const MemberExpr *E) {
assert(!E->isArrow() && "missing call to bound member function?");
APValue Val;
@@ -4061,7 +4082,7 @@
DerivedSuccess(Result, E);
}
- RetTy VisitCastExpr(const CastExpr *E) {
+ bool VisitCastExpr(const CastExpr *E) {
switch (E->getCastKind()) {
default:
break;
@@ -4093,13 +4114,13 @@
return Error(E);
}
- RetTy VisitUnaryPostInc(const UnaryOperator *UO) {
+ bool VisitUnaryPostInc(const UnaryOperator *UO) {
return VisitUnaryPostIncDec(UO);
}
- RetTy VisitUnaryPostDec(const UnaryOperator *UO) {
+ bool VisitUnaryPostDec(const UnaryOperator *UO) {
return VisitUnaryPostIncDec(UO);
}
- RetTy VisitUnaryPostIncDec(const UnaryOperator *UO) {
+ bool VisitUnaryPostIncDec(const UnaryOperator *UO) {
if (!Info.getLangOpts().CPlusPlus1y && !Info.keepEvaluatingAfterFailure())
return Error(UO);
@@ -4113,7 +4134,7 @@
return DerivedSuccess(RVal, UO);
}
- RetTy VisitStmtExpr(const StmtExpr *E) {
+ bool VisitStmtExpr(const StmtExpr *E) {
// We will have checked the full-expressions inside the statement expression
// when they were completed, and don't need to check them again now.
if (Info.checkingForOverflow())
@@ -4162,11 +4183,11 @@
namespace {
template<class Derived>
class LValueExprEvaluatorBase
- : public ExprEvaluatorBase<Derived, bool> {
+ : public ExprEvaluatorBase<Derived> {
protected:
LValue &Result;
typedef LValueExprEvaluatorBase LValueExprEvaluatorBaseTy;
- typedef ExprEvaluatorBase<Derived, bool> ExprEvaluatorBaseTy;
+ typedef ExprEvaluatorBase<Derived> ExprEvaluatorBaseTy;
bool Success(APValue::LValueBase B) {
Result.set(B);
@@ -4584,7 +4605,7 @@
namespace {
class PointerExprEvaluator
- : public ExprEvaluatorBase<PointerExprEvaluator, bool> {
+ : public ExprEvaluatorBase<PointerExprEvaluator> {
LValue &Result;
bool Success(const Expr *E) {
@@ -4769,7 +4790,7 @@
if (IsStringLiteralCall(E))
return Success(E);
- switch (E->isBuiltinCall()) {
+ switch (E->getBuiltinCallee()) {
case Builtin::BI__builtin_addressof:
return EvaluateLValue(E->getArg(0), Result, Info);
@@ -4784,7 +4805,7 @@
namespace {
class MemberPointerExprEvaluator
- : public ExprEvaluatorBase<MemberPointerExprEvaluator, bool> {
+ : public ExprEvaluatorBase<MemberPointerExprEvaluator> {
MemberPtr &Result;
bool Success(const ValueDecl *D) {
@@ -4872,7 +4893,7 @@
namespace {
class RecordExprEvaluator
- : public ExprEvaluatorBase<RecordExprEvaluator, bool> {
+ : public ExprEvaluatorBase<RecordExprEvaluator> {
const LValue &This;
APValue &Result;
public:
@@ -4925,14 +4946,13 @@
}
}
- for (RecordDecl::field_iterator I = RD->field_begin(), End = RD->field_end();
- I != End; ++I) {
+ for (const auto *I : RD->fields()) {
// -- if T is a reference type, no initialization is performed.
if (I->getType()->isReferenceType())
continue;
LValue Subobject = This;
- if (!HandleLValueMember(Info, E, Subobject, *I, &Layout))
+ if (!HandleLValueMember(Info, E, Subobject, I, &Layout))
return false;
ImplicitValueInitExpr VIE(I->getType());
@@ -5040,8 +5060,7 @@
std::distance(RD->field_begin(), RD->field_end()));
unsigned ElementNo = 0;
bool Success = true;
- for (RecordDecl::field_iterator Field = RD->field_begin(),
- FieldEnd = RD->field_end(); Field != FieldEnd; ++Field) {
+ for (const auto *Field : RD->fields()) {
// Anonymous bit-fields are not considered members of the class for
// purposes of aggregate initialization.
if (Field->isUnnamedBitfield())
@@ -5054,7 +5073,7 @@
// FIXME: Diagnostics here should point to the end of the initializer
// list, not the start.
if (!HandleLValueMember(Info, HaveInit ? E->getInit(ElementNo) : E,
- Subobject, *Field, &Layout))
+ Subobject, Field, &Layout))
return false;
// Perform an implicit value-initialization for members beyond the end of
@@ -5069,7 +5088,7 @@
APValue &FieldVal = Result.getStructField(Field->getFieldIndex());
if (!EvaluateInPlace(FieldVal, Info, Subobject, Init) ||
(Field->isBitField() && !truncateBitfieldValue(Info, Init,
- FieldVal, *Field))) {
+ FieldVal, Field))) {
if (!Info.keepEvaluatingAfterFailure())
return false;
Success = false;
@@ -5089,16 +5108,15 @@
if (!Result.isUninit())
return true;
- if (ZeroInit)
- return ZeroInitialization(E);
-
- const CXXRecordDecl *RD = FD->getParent();
- if (RD->isUnion())
- Result = APValue((FieldDecl*)0);
- else
- Result = APValue(APValue::UninitStruct(), RD->getNumBases(),
- std::distance(RD->field_begin(), RD->field_end()));
- return true;
+ // We can get here in two different ways:
+ // 1) We're performing value-initialization, and should zero-initialize
+ // the object, or
+ // 2) We're performing default-initialization of an object with a trivial
+ // constexpr default constructor, in which case we should start the
+ // lifetimes of all the base subobjects (there can be no data member
+ // subobjects in this case) per [basic.life]p1.
+ // Either way, ZeroInitialization is appropriate.
+ return ZeroInitialization(E);
}
const FunctionDecl *Definition = 0;
@@ -5235,7 +5253,7 @@
namespace {
class VectorExprEvaluator
- : public ExprEvaluatorBase<VectorExprEvaluator, bool> {
+ : public ExprEvaluatorBase<VectorExprEvaluator> {
APValue &Result;
public:
@@ -5416,7 +5434,7 @@
namespace {
class ArrayExprEvaluator
- : public ExprEvaluatorBase<ArrayExprEvaluator, bool> {
+ : public ExprEvaluatorBase<ArrayExprEvaluator> {
const LValue &This;
APValue &Result;
public:
@@ -5578,19 +5596,9 @@
if (HadZeroInit)
return true;
- if (ZeroInit) {
- ImplicitValueInitExpr VIE(Type);
- return EvaluateInPlace(*Value, Info, Subobject, &VIE);
- }
-
- const CXXRecordDecl *RD = FD->getParent();
- if (RD->isUnion())
- *Value = APValue((FieldDecl*)0);
- else
- *Value =
- APValue(APValue::UninitStruct(), RD->getNumBases(),
- std::distance(RD->field_begin(), RD->field_end()));
- return true;
+ // See RecordExprEvaluator::VisitCXXConstructExpr for explanation.
+ ImplicitValueInitExpr VIE(Type);
+ return EvaluateInPlace(*Value, Info, Subobject, &VIE);
}
const FunctionDecl *Definition = 0;
@@ -5621,7 +5629,7 @@
namespace {
class IntExprEvaluator
- : public ExprEvaluatorBase<IntExprEvaluator, bool> {
+ : public ExprEvaluatorBase<IntExprEvaluator> {
APValue &Result;
public:
IntExprEvaluator(EvalInfo &info, APValue &result)
@@ -5727,14 +5735,6 @@
return ZeroInitialization(E);
}
- bool VisitUnaryTypeTraitExpr(const UnaryTypeTraitExpr *E) {
- return Success(E->getValue(), E);
- }
-
- bool VisitBinaryTypeTraitExpr(const BinaryTypeTraitExpr *E) {
- return Success(E->getValue(), E);
- }
-
bool VisitTypeTraitExpr(const TypeTraitExpr *E) {
return Success(E->getValue(), E);
}
@@ -5975,7 +5975,7 @@
}
bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
- switch (unsigned BuiltinOp = E->isBuiltinCall()) {
+ switch (unsigned BuiltinOp = E->getBuiltinCallee()) {
default:
return ExprEvaluatorBaseTy::VisitCallExpr(E);
@@ -5994,7 +5994,17 @@
// Expression had no side effects, but we couldn't statically determine the
// size of the referenced object.
- return Error(E);
+ switch (Info.EvalMode) {
+ case EvalInfo::EM_ConstantExpression:
+ case EvalInfo::EM_PotentialConstantExpression:
+ case EvalInfo::EM_ConstantFold:
+ case EvalInfo::EM_EvaluateForOverflow:
+ case EvalInfo::EM_IgnoreSideEffects:
+ return Error(E);
+ case EvalInfo::EM_ConstantExpressionUnevaluated:
+ case EvalInfo::EM_PotentialConstantExpressionUnevaluated:
+ return Success(-1ULL, E);
+ }
}
case Builtin::BI__builtin_bswap16:
@@ -7120,6 +7130,7 @@
case CK_BuiltinFnToFnPtr:
case CK_ZeroToOCLEvent:
case CK_NonAtomicToAtomic:
+ case CK_AddressSpaceConversion:
llvm_unreachable("invalid cast kind for integral value");
case CK_BitCast:
@@ -7258,7 +7269,7 @@
namespace {
class FloatExprEvaluator
- : public ExprEvaluatorBase<FloatExprEvaluator, bool> {
+ : public ExprEvaluatorBase<FloatExprEvaluator> {
APFloat &Result;
public:
FloatExprEvaluator(EvalInfo &info, APFloat &result)
@@ -7319,7 +7330,7 @@
}
bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
- switch (E->isBuiltinCall()) {
+ switch (E->getBuiltinCallee()) {
default:
return ExprEvaluatorBaseTy::VisitCallExpr(E);
@@ -7474,7 +7485,7 @@
namespace {
class ComplexExprEvaluator
- : public ExprEvaluatorBase<ComplexExprEvaluator, bool> {
+ : public ExprEvaluatorBase<ComplexExprEvaluator> {
ComplexValue &Result;
public:
@@ -7592,6 +7603,7 @@
case CK_BuiltinFnToFnPtr:
case CK_ZeroToOCLEvent:
case CK_NonAtomicToAtomic:
+ case CK_AddressSpaceConversion:
llvm_unreachable("invalid cast kind for complex value");
case CK_LValueToRValue:
@@ -7858,7 +7870,7 @@
namespace {
class AtomicExprEvaluator :
- public ExprEvaluatorBase<AtomicExprEvaluator, bool> {
+ public ExprEvaluatorBase<AtomicExprEvaluator> {
APValue &Result;
public:
AtomicExprEvaluator(EvalInfo &Info, APValue &Result)
@@ -7898,7 +7910,7 @@
namespace {
class VoidExprEvaluator
- : public ExprEvaluatorBase<VoidExprEvaluator, bool> {
+ : public ExprEvaluatorBase<VoidExprEvaluator> {
public:
VoidExprEvaluator(EvalInfo &Info) : ExprEvaluatorBaseTy(Info) {}
@@ -8000,6 +8012,8 @@
/// an object can indirectly refer to subobjects which were initialized earlier.
static bool EvaluateInPlace(APValue &Result, EvalInfo &Info, const LValue &This,
const Expr *E, bool AllowNonLiteralTypes) {
+ assert(!E->isValueDependent());
+
if (!AllowNonLiteralTypes && !CheckLiteralType(Info, E, &This))
return false;
@@ -8019,6 +8033,9 @@
/// EvaluateAsRValue - Try to evaluate this expression, performing an implicit
/// lvalue-to-rvalue cast if it is an lvalue.
static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result) {
+ if (E->getType().isNull())
+ return false;
+
if (!CheckLiteralType(Info, E))
return false;
@@ -8046,6 +8063,13 @@
IsConst = true;
return true;
}
+
+ // This case should be rare, but we need to check it before we check on
+ // the type below.
+ if (Exp->getType().isNull()) {
+ IsConst = false;
+ return true;
+ }
// FIXME: Evaluating values of large array and record types can cause
// performance problems. Only do so in C++11 for now.
@@ -8303,10 +8327,20 @@
case Expr::MaterializeTemporaryExprClass:
case Expr::PseudoObjectExprClass:
case Expr::AtomicExprClass:
- case Expr::InitListExprClass:
case Expr::LambdaExprClass:
return ICEDiag(IK_NotICE, E->getLocStart());
+ case Expr::InitListExprClass: {
+ // C++03 [dcl.init]p13: If T is a scalar type, then a declaration of the
+ // form "T x = { a };" is equivalent to "T x = a;".
+ // Unless we're initializing a reference, T is a scalar as it is known to be
+ // of integral or enumeration type.
+ if (E->isRValue())
+ if (cast<InitListExpr>(E)->getNumInits() == 1)
+ return CheckICE(cast<InitListExpr>(E)->getInit(0), Ctx);
+ return ICEDiag(IK_NotICE, E->getLocStart());
+ }
+
case Expr::SizeOfPackExprClass:
case Expr::GNUNullExprClass:
// GCC considers the GNU __null value to be an integral constant expression.
@@ -8325,8 +8359,6 @@
case Expr::ObjCBoolLiteralExprClass:
case Expr::CXXBoolLiteralExprClass:
case Expr::CXXScalarValueInitExprClass:
- case Expr::UnaryTypeTraitExprClass:
- case Expr::BinaryTypeTraitExprClass:
case Expr::TypeTraitExprClass:
case Expr::ArrayTypeTraitExprClass:
case Expr::ExpressionTraitExprClass:
@@ -8338,7 +8370,7 @@
// constant expressions, but they can never be ICEs because an ICE cannot
// contain an operand of (pointer to) function type.
const CallExpr *CE = cast<CallExpr>(E);
- if (CE->isBuiltinCall())
+ if (CE->getBuiltinCallee())
return CheckEvalInICE(E, Ctx);
return ICEDiag(IK_NotICE, E->getLocStart());
}
@@ -8555,7 +8587,7 @@
// extension. See GCC PR38377 for discussion.
if (const CallExpr *CallCE
= dyn_cast<CallExpr>(Exp->getCond()->IgnoreParenCasts()))
- if (CallCE->isBuiltinCall() == Builtin::BI__builtin_constant_p)
+ if (CallCE->getBuiltinCallee() == Builtin::BI__builtin_constant_p)
return CheckEvalInICE(E, Ctx);
ICEDiag CondResult = CheckICE(Exp->getCond(), Ctx);
if (CondResult.Kind == IK_NotICE)
@@ -8665,6 +8697,28 @@
return IsConstExpr;
}
+bool Expr::EvaluateWithSubstitution(APValue &Value, ASTContext &Ctx,
+ const FunctionDecl *Callee,
+ llvm::ArrayRef<const Expr*> Args) const {
+ Expr::EvalStatus Status;
+ EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantExpressionUnevaluated);
+
+ ArgVector ArgValues(Args.size());
+ for (ArrayRef<const Expr*>::iterator I = Args.begin(), E = Args.end();
+ I != E; ++I) {
+ if (!Evaluate(ArgValues[I - Args.begin()], Info, *I))
+ // If evaluation fails, throw away the argument entirely.
+ ArgValues[I - Args.begin()] = APValue();
+ if (Info.EvalStatus.HasSideEffects)
+ return false;
+ }
+
+ // Build fake call to Callee.
+ CallStackFrame Frame(Info, Callee->getLocation(), Callee, /*This*/0,
+ ArgValues.data());
+ return Evaluate(Value, Info, this) && !Info.EvalStatus.HasSideEffects;
+}
+
bool Expr::isPotentialConstantExpr(const FunctionDecl *FD,
SmallVectorImpl<
PartialDiagnosticAt> &Diags) {
@@ -8705,3 +8759,27 @@
return Diags.empty();
}
+
+bool Expr::isPotentialConstantExprUnevaluated(Expr *E,
+ const FunctionDecl *FD,
+ SmallVectorImpl<
+ PartialDiagnosticAt> &Diags) {
+ Expr::EvalStatus Status;
+ Status.Diag = &Diags;
+
+ EvalInfo Info(FD->getASTContext(), Status,
+ EvalInfo::EM_PotentialConstantExpressionUnevaluated);
+
+ // Fabricate a call stack frame to give the arguments a plausible cover story.
+ ArrayRef<const Expr*> Args;
+ ArgVector ArgValues(0);
+ bool Success = EvaluateArgs(Args, ArgValues, Info);
+ (void)Success;
+ assert(Success &&
+ "Failed to set up arguments for potential constant evaluation");
+ CallStackFrame Frame(Info, SourceLocation(), FD, 0, ArgValues.data());
+
+ APValue ResultScratch;
+ Evaluate(ResultScratch, Info, E);
+ return Diags.empty();
+}
diff --git a/lib/AST/InheritViz.cpp b/lib/AST/InheritViz.cpp
index 3d64310..84cc167 100644
--- a/lib/AST/InheritViz.cpp
+++ b/lib/AST/InheritViz.cpp
@@ -93,26 +93,25 @@
// Display the base classes.
const CXXRecordDecl *Decl
= static_cast<const CXXRecordDecl *>(Type->getAs<RecordType>()->getDecl());
- for (CXXRecordDecl::base_class_const_iterator Base = Decl->bases_begin();
- Base != Decl->bases_end(); ++Base) {
- QualType CanonBaseType = Context.getCanonicalType(Base->getType());
+ for (const auto &Base : Decl->bases()) {
+ QualType CanonBaseType = Context.getCanonicalType(Base.getType());
// If this is not virtual inheritance, bump the direct base
// count for the type.
- if (!Base->isVirtual())
+ if (!Base.isVirtual())
++DirectBaseCount[CanonBaseType];
// Write out the node (if we need to).
- WriteNode(Base->getType(), Base->isVirtual());
+ WriteNode(Base.getType(), Base.isVirtual());
// Write out the edge.
Out << " ";
WriteNodeReference(Type, FromVirtual);
Out << " -> ";
- WriteNodeReference(Base->getType(), Base->isVirtual());
+ WriteNodeReference(Base.getType(), Base.isVirtual());
// Write out edge attributes to show the kind of inheritance.
- if (Base->isVirtual()) {
+ if (Base.isVirtual()) {
Out << " [ style=\"dashed\" ]";
}
Out << ";";
diff --git a/lib/AST/ItaniumCXXABI.cpp b/lib/AST/ItaniumCXXABI.cpp
index 5784660..ffa2ddc 100644
--- a/lib/AST/ItaniumCXXABI.cpp
+++ b/lib/AST/ItaniumCXXABI.cpp
@@ -33,12 +33,17 @@
/// literals within a particular context.
class ItaniumNumberingContext : public MangleNumberingContext {
llvm::DenseMap<IdentifierInfo*, unsigned> VarManglingNumbers;
+ llvm::DenseMap<IdentifierInfo*, unsigned> TagManglingNumbers;
public:
/// Variable decls are numbered by identifier.
- virtual unsigned getManglingNumber(const VarDecl *VD) {
+ unsigned getManglingNumber(const VarDecl *VD, unsigned) override {
return ++VarManglingNumbers[VD->getIdentifier()];
}
+
+ unsigned getManglingNumber(const TagDecl *TD, unsigned) override {
+ return ++TagManglingNumbers[TD->getIdentifier()];
+ }
};
class ItaniumCXXABI : public CXXABI {
@@ -48,7 +53,7 @@
ItaniumCXXABI(ASTContext &Ctx) : Context(Ctx) { }
std::pair<uint64_t, unsigned>
- getMemberPointerWidthAndAlign(const MemberPointerType *MPT) const {
+ getMemberPointerWidthAndAlign(const MemberPointerType *MPT) const override {
const TargetInfo &Target = Context.getTargetInfo();
TargetInfo::IntType PtrDiff = Target.getPtrDiffType(0);
uint64_t Width = Target.getTypeWidth(PtrDiff);
@@ -58,13 +63,17 @@
return std::make_pair(Width, Align);
}
- CallingConv getDefaultMethodCallConv(bool isVariadic) const {
+ CallingConv getDefaultMethodCallConv(bool isVariadic) const override {
+ const llvm::Triple &T = Context.getTargetInfo().getTriple();
+ if (!isVariadic && T.isWindowsGNUEnvironment() &&
+ T.getArch() == llvm::Triple::x86)
+ return CC_X86ThisCall;
return CC_C;
}
// We cheat and just check that the class has a vtable pointer, and that it's
// only big enough to have a vtable pointer and nothing more (or less).
- bool isNearlyEmpty(const CXXRecordDecl *RD) const {
+ bool isNearlyEmpty(const CXXRecordDecl *RD) const override {
// Check that the class has a vtable pointer.
if (!RD->isDynamicClass())
@@ -76,7 +85,7 @@
return Layout.getNonVirtualSize() == PointerSize;
}
- virtual MangleNumberingContext *createMangleNumberingContext() const {
+ MangleNumberingContext *createMangleNumberingContext() const override {
return new ItaniumNumberingContext();
}
};
diff --git a/lib/AST/ItaniumMangle.cpp b/lib/AST/ItaniumMangle.cpp
index 0621d7b..27ba2be 100644
--- a/lib/AST/ItaniumMangle.cpp
+++ b/lib/AST/ItaniumMangle.cpp
@@ -11,7 +11,7 @@
// which is used in GCC 3.2 and newer (and many compilers that are
// ABI-compatible with GCC):
//
-// http://www.codesourcery.com/public/cxx-abi/abi.html
+// http://mentorembedded.github.io/cxx-abi/abi.html#mangling
//
//===----------------------------------------------------------------------===//
#include "clang/AST/Mangle.h"
@@ -21,6 +21,7 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/TypeLoc.h"
@@ -101,11 +102,18 @@
const FunctionDecl *fn = dyn_cast_or_null<FunctionDecl>(decl);
return (fn ? getStructor(fn) : decl);
}
-
+
+static bool isLambda(const NamedDecl *ND) {
+ const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(ND);
+ if (!Record)
+ return false;
+
+ return Record->isLambda();
+}
+
static const unsigned UnknownArity = ~0U;
class ItaniumMangleContextImpl : public ItaniumMangleContext {
- llvm::DenseMap<const TagDecl *, uint64_t> AnonStructIds;
typedef std::pair<const DeclContext*, IdentifierInfo*> DiscriminatorKeyTy;
llvm::DenseMap<DiscriminatorKeyTy, unsigned> Discriminator;
llvm::DenseMap<const NamedDecl*, unsigned> Uniquifier;
@@ -115,52 +123,46 @@
DiagnosticsEngine &Diags)
: ItaniumMangleContext(Context, Diags) {}
- uint64_t getAnonymousStructId(const TagDecl *TD) {
- std::pair<llvm::DenseMap<const TagDecl *,
- uint64_t>::iterator, bool> Result =
- AnonStructIds.insert(std::make_pair(TD, AnonStructIds.size()));
- return Result.first->second;
- }
-
/// @name Mangler Entry Points
/// @{
- bool shouldMangleCXXName(const NamedDecl *D);
- void mangleCXXName(const NamedDecl *D, raw_ostream &);
- void mangleThunk(const CXXMethodDecl *MD,
- const ThunkInfo &Thunk,
- raw_ostream &);
+ bool shouldMangleCXXName(const NamedDecl *D) override;
+ bool shouldMangleStringLiteral(const StringLiteral *) override {
+ return false;
+ }
+ void mangleCXXName(const NamedDecl *D, raw_ostream &) override;
+ void mangleThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk,
+ raw_ostream &) override;
void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
const ThisAdjustment &ThisAdjustment,
- raw_ostream &);
- void mangleReferenceTemporary(const VarDecl *D,
- raw_ostream &);
- void mangleCXXVTable(const CXXRecordDecl *RD,
- raw_ostream &);
- void mangleCXXVTT(const CXXRecordDecl *RD,
- raw_ostream &);
+ raw_ostream &) override;
+ void mangleReferenceTemporary(const VarDecl *D, raw_ostream &) override;
+ void mangleCXXVTable(const CXXRecordDecl *RD, raw_ostream &) override;
+ void mangleCXXVTT(const CXXRecordDecl *RD, raw_ostream &) override;
void mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
- const CXXRecordDecl *Type,
- raw_ostream &);
- void mangleCXXRTTI(QualType T, raw_ostream &);
- void mangleCXXRTTIName(QualType T, raw_ostream &);
- void mangleTypeName(QualType T, raw_ostream &);
+ const CXXRecordDecl *Type, raw_ostream &) override;
+ void mangleCXXRTTI(QualType T, raw_ostream &) override;
+ void mangleCXXRTTIName(QualType T, raw_ostream &) override;
+ void mangleTypeName(QualType T, raw_ostream &) override;
void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
- raw_ostream &);
+ raw_ostream &) override;
void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
- raw_ostream &);
+ raw_ostream &) override;
- void mangleStaticGuardVariable(const VarDecl *D, raw_ostream &);
- void mangleDynamicInitializer(const VarDecl *D, raw_ostream &Out);
- void mangleDynamicAtExitDestructor(const VarDecl *D, raw_ostream &Out);
- void mangleItaniumThreadLocalInit(const VarDecl *D, raw_ostream &);
- void mangleItaniumThreadLocalWrapper(const VarDecl *D, raw_ostream &);
+ void mangleStaticGuardVariable(const VarDecl *D, raw_ostream &) override;
+ void mangleDynamicInitializer(const VarDecl *D, raw_ostream &Out) override;
+ void mangleDynamicAtExitDestructor(const VarDecl *D,
+ raw_ostream &Out) override;
+ void mangleItaniumThreadLocalInit(const VarDecl *D, raw_ostream &) override;
+ void mangleItaniumThreadLocalWrapper(const VarDecl *D,
+ raw_ostream &) override;
+
+ void mangleStringLiteral(const StringLiteral *, raw_ostream &) override;
bool getNextDiscriminator(const NamedDecl *ND, unsigned &disc) {
// Lambda closure types are already numbered.
- if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(ND))
- if (RD->isLambda())
- return false;
+ if (isLambda(ND))
+ return false;
// Anonymous tags are already numbered.
if (const TagDecl *Tag = dyn_cast<TagDecl>(ND)) {
@@ -538,14 +540,6 @@
return 0;
}
-static bool isLambda(const NamedDecl *ND) {
- const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(ND);
- if (!Record)
- return false;
-
- return Record->isLambda();
-}
-
void CXXNameMangler::mangleName(const NamedDecl *ND) {
// <name> ::= <nested-name>
// ::= <unscoped-name>
@@ -833,6 +827,7 @@
switch (type->getTypeClass()) {
case Type::Builtin:
case Type::Complex:
+ case Type::Adjusted:
case Type::Decayed:
case Type::Pointer:
case Type::BlockPointer:
@@ -1032,10 +1027,9 @@
assert(RD->isAnonymousStructOrUnion() &&
"Expected anonymous struct or union!");
- for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
- I != E; ++I) {
+ for (const auto *I : RD->fields()) {
if (I->getIdentifier())
- return *I;
+ return I;
if (const RecordType *RT = I->getType()->getAs<RecordType>())
if (const FieldDecl *NamedDataMember =
@@ -1148,7 +1142,7 @@
}
// Get a unique id for the anonymous struct.
- uint64_t AnonStructId = Context.getAnonymousStructId(TD);
+ unsigned AnonStructId = Context.getAnonymousStructId(TD);
// Mangle it as a source name in the form
// [n] $_<id>
@@ -1919,7 +1913,7 @@
// ::= x # long long, __int64
// ::= y # unsigned long long, __int64
// ::= n # __int128
- // UNSUPPORTED: ::= o # unsigned __int128
+ // ::= o # unsigned __int128
// ::= f # float
// ::= d # double
// ::= e # long double, __float80
@@ -2012,11 +2006,11 @@
// <bare-function-type> ::= <signature type>+
if (MangleReturnType) {
FunctionTypeDepth.enterResultType();
- mangleType(Proto->getResultType());
+ mangleType(Proto->getReturnType());
FunctionTypeDepth.leaveResultType();
}
- if (Proto->getNumArgs() == 0 && !Proto->isVariadic()) {
+ if (Proto->getNumParams() == 0 && !Proto->isVariadic()) {
// <builtin-type> ::= v # void
Out << 'v';
@@ -2024,10 +2018,8 @@
return;
}
- for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
- ArgEnd = Proto->arg_type_end();
- Arg != ArgEnd; ++Arg)
- mangleType(Context.getASTContext().getSignatureParameterType(*Arg));
+ for (const auto &Arg : Proto->param_types())
+ mangleType(Context.getASTContext().getSignatureParameterType(Arg));
FunctionTypeDepth.pop(saved);
@@ -2161,8 +2153,17 @@
const char *EltName = 0;
if (T->getVectorKind() == VectorType::NeonPolyVector) {
switch (cast<BuiltinType>(EltType)->getKind()) {
- case BuiltinType::SChar: EltName = "poly8_t"; break;
- case BuiltinType::Short: EltName = "poly16_t"; break;
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ EltName = "poly8_t";
+ break;
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ EltName = "poly16_t";
+ break;
+ case BuiltinType::ULongLong:
+ EltName = "poly64_t";
+ break;
default: llvm_unreachable("unexpected Neon polynomial vector element type");
}
} else {
@@ -2175,6 +2176,7 @@
case BuiltinType::UInt: EltName = "uint32_t"; break;
case BuiltinType::LongLong: EltName = "int64_t"; break;
case BuiltinType::ULongLong: EltName = "uint64_t"; break;
+ case BuiltinType::Double: EltName = "float64_t"; break;
case BuiltinType::Float: EltName = "float32_t"; break;
case BuiltinType::Half: EltName = "float16_t";break;
default:
@@ -2202,6 +2204,7 @@
return "Int16";
case BuiltinType::Int:
return "Int32";
+ case BuiltinType::Long:
case BuiltinType::LongLong:
return "Int64";
case BuiltinType::UChar:
@@ -2210,6 +2213,7 @@
return "Uint16";
case BuiltinType::UInt:
return "Uint32";
+ case BuiltinType::ULong:
case BuiltinType::ULongLong:
return "Uint64";
case BuiltinType::Half:
@@ -2245,7 +2249,7 @@
case BuiltinType::UShort:
EltName = "Poly16";
break;
- case BuiltinType::ULongLong:
+ case BuiltinType::ULong:
EltName = "Poly64";
break;
default:
@@ -2270,8 +2274,12 @@
void CXXNameMangler::mangleType(const VectorType *T) {
if ((T->getVectorKind() == VectorType::NeonVector ||
T->getVectorKind() == VectorType::NeonPolyVector)) {
- if (getASTContext().getTargetInfo().getTriple().getArch() ==
- llvm::Triple::aarch64)
+ llvm::Triple Target = getASTContext().getTargetInfo().getTriple();
+ llvm::Triple::ArchType Arch =
+ getASTContext().getTargetInfo().getTriple().getArch();
+ if (Arch == llvm::Triple::aarch64 ||
+ Arch == llvm::Triple::aarch64_be ||
+ (Arch == llvm::Triple::arm64 && !Target.isOSDarwin()))
mangleAArch64NeonVectorType(T);
else
mangleNeonVectorType(T);
@@ -2311,9 +2319,8 @@
SmallString<64> QualStr;
llvm::raw_svector_ostream QualOS(QualStr);
QualOS << "objcproto";
- ObjCObjectType::qual_iterator i = T->qual_begin(), e = T->qual_end();
- for ( ; i != e; ++i) {
- StringRef name = (*i)->getName();
+ for (const auto *I : T->quals()) {
+ StringRef name = I->getName();
QualOS << name.size() << name;
}
QualOS.flush();
@@ -2438,7 +2445,7 @@
}
void CXXNameMangler::mangleType(const AtomicType *T) {
- // <type> ::= U <source-name> <type> # vendor extended type qualifier
+ // <type> ::= U <source-name> <type> # vendor extended type qualifier
// (Until there's a standardized mangling...)
Out << "U7_Atomic";
mangleType(T->getValueType());
@@ -2581,8 +2588,6 @@
case Expr::ShuffleVectorExprClass:
case Expr::ConvertVectorExprClass:
case Expr::StmtExprClass:
- case Expr::UnaryTypeTraitExprClass:
- case Expr::BinaryTypeTraitExprClass:
case Expr::TypeTraitExprClass:
case Expr::ArrayTypeTraitExprClass:
case Expr::ExpressionTraitExprClass:
@@ -3789,6 +3794,10 @@
mangleCXXRTTIName(Ty, Out);
}
+void ItaniumMangleContextImpl::mangleStringLiteral(const StringLiteral *, raw_ostream &) {
+ llvm_unreachable("Can't mangle string literals");
+}
+
ItaniumMangleContext *
ItaniumMangleContext::create(ASTContext &Context, DiagnosticsEngine &Diags) {
return new ItaniumMangleContextImpl(Context, Diags);
diff --git a/lib/AST/Mangle.cpp b/lib/AST/Mangle.cpp
index 231ef03..fdc00e3 100644
--- a/lib/AST/Mangle.cpp
+++ b/lib/AST/Mangle.cpp
@@ -11,13 +11,13 @@
//
//===----------------------------------------------------------------------===//
#include "clang/AST/Attr.h"
-#include "clang/AST/Mangle.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/Mangle.h"
#include "clang/Basic/ABI.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
@@ -64,7 +64,7 @@
static StdOrFastCC getStdOrFastCallMangling(const ASTContext &Context,
const NamedDecl *ND) {
const TargetInfo &TI = Context.getTargetInfo();
- llvm::Triple Triple = TI.getTriple();
+ const llvm::Triple &Triple = TI.getTriple();
if (!Triple.isOSWindows() || Triple.getArch() != llvm::Triple::x86)
return SOF_OTHER;
@@ -163,13 +163,9 @@
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
if (!MD->isStatic())
++ArgWords;
- for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
- ArgEnd = Proto->arg_type_end();
- Arg != ArgEnd; ++Arg) {
- QualType AT = *Arg;
+ for (const auto &AT : Proto->param_types())
// Size should be aligned to DWORD boundary
ArgWords += llvm::RoundUpToAlignment(ASTContext.getTypeSize(AT), 32) / 32;
- }
Out << 4 * ArgWords;
}
@@ -246,7 +242,9 @@
OS << (MD->isInstanceMethod() ? '-' : '+') << '[' << CD->getName();
if (const ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(CD))
OS << '(' << *CID << ')';
- OS << ' ' << MD->getSelector().getAsString() << ']';
+ OS << ' ';
+ MD->getSelector().print(OS);
+ OS << ']';
Out << OS.str().size() << OS.str();
}
diff --git a/lib/AST/MangleNumberingContext.cpp b/lib/AST/MangleNumberingContext.cpp
index 91ef0e2..b46a085 100644
--- a/lib/AST/MangleNumberingContext.cpp
+++ b/lib/AST/MangleNumberingContext.cpp
@@ -24,7 +24,7 @@
= CallOperator->getType()->getAs<FunctionProtoType>();
ASTContext &Context = CallOperator->getASTContext();
- QualType Key = Context.getFunctionType(Context.VoidTy, Proto->getArgTypes(),
+ QualType Key = Context.getFunctionType(Context.VoidTy, Proto->getParamTypes(),
FunctionProtoType::ExtProtoInfo());
Key = Context.getCanonicalType(Key);
return ++ManglingNumbers[Key->castAs<FunctionProtoType>()];
@@ -38,6 +38,8 @@
}
unsigned
-MangleNumberingContext::getManglingNumber(const TagDecl *TD) {
- return ++TagManglingNumbers[TD->getIdentifier()];
+MangleNumberingContext::getStaticLocalNumber(const VarDecl *VD) {
+ // FIXME: Compute a BlockPointerType? Not obvious how.
+ const Type *Ty = 0;
+ return ++ManglingNumbers[Ty];
}
diff --git a/lib/AST/MicrosoftCXXABI.cpp b/lib/AST/MicrosoftCXXABI.cpp
index 4a93ea1..359e864 100644
--- a/lib/AST/MicrosoftCXXABI.cpp
+++ b/lib/AST/MicrosoftCXXABI.cpp
@@ -13,8 +13,8 @@
//===----------------------------------------------------------------------===//
#include "CXXABI.h"
-#include "clang/AST/Attr.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/RecordLayout.h"
@@ -28,15 +28,15 @@
/// \brief Numbers things which need to correspond across multiple TUs.
/// Typically these are things like static locals, lambdas, or blocks.
class MicrosoftNumberingContext : public MangleNumberingContext {
- unsigned NumStaticLocals;
-
public:
- MicrosoftNumberingContext() : NumStaticLocals(0) { }
+ unsigned getManglingNumber(const VarDecl *VD,
+ unsigned MSLocalManglingNumber) override {
+ return MSLocalManglingNumber;
+ }
- /// Static locals are numbered by source order.
- virtual unsigned getManglingNumber(const VarDecl *VD) {
- assert(VD->isStaticLocal());
- return ++NumStaticLocals;
+ unsigned getManglingNumber(const TagDecl *TD,
+ unsigned MSLocalManglingNumber) override {
+ return MSLocalManglingNumber;
}
};
@@ -46,16 +46,16 @@
MicrosoftCXXABI(ASTContext &Ctx) : Context(Ctx) { }
std::pair<uint64_t, unsigned>
- getMemberPointerWidthAndAlign(const MemberPointerType *MPT) const;
+ getMemberPointerWidthAndAlign(const MemberPointerType *MPT) const override;
- CallingConv getDefaultMethodCallConv(bool isVariadic) const {
+ CallingConv getDefaultMethodCallConv(bool isVariadic) const override {
if (!isVariadic &&
Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86)
return CC_X86ThisCall;
return CC_C;
}
- bool isNearlyEmpty(const CXXRecordDecl *RD) const {
+ bool isNearlyEmpty(const CXXRecordDecl *RD) const override {
// FIXME: Audit the corners
if (!RD->isDynamicClass())
return false;
@@ -69,7 +69,7 @@
Layout.getNonVirtualSize() == PointerSize * 2;
}
- MangleNumberingContext *createMangleNumberingContext() const {
+ MangleNumberingContext *createMangleNumberingContext() const override {
return new MicrosoftNumberingContext();
}
};
@@ -92,26 +92,27 @@
return false;
}
-static MSInheritanceModel MSInheritanceAttrToModel(attr::Kind Kind) {
- switch (Kind) {
- default: llvm_unreachable("expected MS inheritance attribute");
- case attr::SingleInheritance: return MSIM_Single;
- case attr::MultipleInheritance: return MSIM_Multiple;
- case attr::VirtualInheritance: return MSIM_Virtual;
- case attr::UnspecifiedInheritance: return MSIM_Unspecified;
- }
+MSInheritanceAttr::Spelling CXXRecordDecl::calculateInheritanceModel() const {
+ if (!hasDefinition())
+ return MSInheritanceAttr::Keyword_unspecified_inheritance;
+ if (getNumVBases() > 0)
+ return MSInheritanceAttr::Keyword_virtual_inheritance;
+ if (usesMultipleInheritanceModel(this))
+ return MSInheritanceAttr::Keyword_multiple_inheritance;
+ return MSInheritanceAttr::Keyword_single_inheritance;
}
-MSInheritanceModel CXXRecordDecl::getMSInheritanceModel() const {
- if (Attr *IA = this->getAttr<MSInheritanceAttr>())
- return MSInheritanceAttrToModel(IA->getKind());
- // If there was no explicit attribute, the record must be defined already, and
- // we can figure out the inheritance model from its other properties.
- if (this->getNumVBases() > 0)
- return MSIM_Virtual;
- if (usesMultipleInheritanceModel(this))
- return this->isPolymorphic() ? MSIM_MultiplePolymorphic : MSIM_Multiple;
- return this->isPolymorphic() ? MSIM_SinglePolymorphic : MSIM_Single;
+MSInheritanceAttr::Spelling
+CXXRecordDecl::getMSInheritanceModel() const {
+ MSInheritanceAttr *IA = getAttr<MSInheritanceAttr>();
+ assert(IA && "Expected MSInheritanceAttr on the CXXRecordDecl!");
+ return IA->getSemanticSpelling();
+}
+
+MSVtorDispAttr::Mode CXXRecordDecl::getMSVtorDispMode() const {
+ if (MSVtorDispAttr *VDA = getAttr<MSVtorDispAttr>())
+ return VDA->getVtorDispMode();
+ return MSVtorDispAttr::Mode(getASTContext().getLangOpts().VtorDispMode);
}
// Returns the number of pointer and integer slots used to represent a member
@@ -133,49 +134,32 @@
// // offset.
// int NonVirtualBaseAdjustment;
//
+// // The offset of the vb-table pointer within the object. Only needed for
+// // incomplete types.
+// int VBPtrOffset;
+//
// // An offset within the vb-table that selects the virtual base containing
// // the member. Loading from this offset produces a new offset that is
// // added to the address of the vb-table pointer to produce the base.
// int VirtualBaseAdjustmentOffset;
-//
-// // The offset of the vb-table pointer within the object. Only needed for
-// // incomplete types.
-// int VBPtrOffset;
// };
static std::pair<unsigned, unsigned>
getMSMemberPointerSlots(const MemberPointerType *MPT) {
- const CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
- MSInheritanceModel Inheritance = RD->getMSInheritanceModel();
- unsigned Ptrs;
+ const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
+ MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel();
+ unsigned Ptrs = 0;
unsigned Ints = 0;
- if (MPT->isMemberFunctionPointer()) {
- // Member function pointers are a struct of a function pointer followed by a
- // variable number of ints depending on the inheritance model used. The
- // function pointer is a real function if it is non-virtual and a vftable
- // slot thunk if it is virtual. The ints select the object base passed for
- // the 'this' pointer.
- Ptrs = 1; // First slot is always a function pointer.
- switch (Inheritance) {
- case MSIM_Unspecified: ++Ints; // VBTableOffset
- case MSIM_Virtual: ++Ints; // VirtualBaseAdjustmentOffset
- case MSIM_MultiplePolymorphic:
- case MSIM_Multiple: ++Ints; // NonVirtualBaseAdjustment
- case MSIM_SinglePolymorphic:
- case MSIM_Single: break; // Nothing
- }
- } else {
- // Data pointers are an aggregate of ints. The first int is an offset
- // followed by vbtable-related offsets.
- Ptrs = 0;
- switch (Inheritance) {
- case MSIM_Unspecified: ++Ints; // VBTableOffset
- case MSIM_Virtual: ++Ints; // VirtualBaseAdjustmentOffset
- case MSIM_MultiplePolymorphic:
- case MSIM_Multiple: // Nothing
- case MSIM_SinglePolymorphic:
- case MSIM_Single: ++Ints; // Field offset
- }
- }
+ if (MPT->isMemberFunctionPointer())
+ Ptrs = 1;
+ else
+ Ints = 1;
+ if (MSInheritanceAttr::hasNVOffsetField(MPT->isMemberFunctionPointer(),
+ Inheritance))
+ Ints++;
+ if (MSInheritanceAttr::hasVBPtrOffsetField(Inheritance))
+ Ints++;
+ if (MSInheritanceAttr::hasVBTableOffsetField(Inheritance))
+ Ints++;
return std::make_pair(Ptrs, Ints);
}
@@ -185,14 +169,26 @@
assert(Target.getTriple().getArch() == llvm::Triple::x86 ||
Target.getTriple().getArch() == llvm::Triple::x86_64);
unsigned Ptrs, Ints;
- llvm::tie(Ptrs, Ints) = getMSMemberPointerSlots(MPT);
+ std::tie(Ptrs, Ints) = getMSMemberPointerSlots(MPT);
// The nominal struct is laid out with pointers followed by ints and aligned
// to a pointer width if any are present and an int width otherwise.
unsigned PtrSize = Target.getPointerWidth(0);
unsigned IntSize = Target.getIntWidth();
uint64_t Width = Ptrs * PtrSize + Ints * IntSize;
- unsigned Align = Ptrs > 0 ? Target.getPointerAlign(0) : Target.getIntAlign();
- Width = llvm::RoundUpToAlignment(Width, Align);
+ unsigned Align;
+
+ // When MSVC does x86_32 record layout, it aligns aggregate member pointers to
+ // 8 bytes. However, __alignof usually returns 4 for data memptrs and 8 for
+ // function memptrs.
+ if (Ptrs + Ints > 1 && Target.getTriple().getArch() == llvm::Triple::x86)
+ Align = 8 * 8;
+ else if (Ptrs)
+ Align = Target.getPointerAlign(0);
+ else
+ Align = Target.getIntAlign();
+
+ if (Target.getTriple().getArch() == llvm::Triple::x86_64)
+ Width = llvm::RoundUpToAlignment(Width, Align);
return std::make_pair(Width, Align);
}
diff --git a/lib/AST/MicrosoftMangle.cpp b/lib/AST/MicrosoftMangle.cpp
index 5256501..061fc13 100644
--- a/lib/AST/MicrosoftMangle.cpp
+++ b/lib/AST/MicrosoftMangle.cpp
@@ -14,17 +14,21 @@
#include "clang/AST/Mangle.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
-#include "clang/AST/CharUnits.h"
#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/CharUnits.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/VTableBuilder.h"
#include "clang/Basic/ABI.h"
#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/MathExtras.h"
using namespace clang;
@@ -71,10 +75,100 @@
return fn;
}
+static bool isLambda(const NamedDecl *ND) {
+ const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(ND);
+ if (!Record)
+ return false;
+
+ return Record->isLambda();
+}
+
+/// MicrosoftMangleContextImpl - Overrides the default MangleContext for the
+/// Microsoft Visual C++ ABI.
+class MicrosoftMangleContextImpl : public MicrosoftMangleContext {
+ typedef std::pair<const DeclContext *, IdentifierInfo *> DiscriminatorKeyTy;
+ llvm::DenseMap<DiscriminatorKeyTy, unsigned> Discriminator;
+ llvm::DenseMap<const NamedDecl*, unsigned> Uniquifier;
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> LambdaIds;
+
+public:
+ MicrosoftMangleContextImpl(ASTContext &Context, DiagnosticsEngine &Diags)
+ : MicrosoftMangleContext(Context, Diags) {}
+ bool shouldMangleCXXName(const NamedDecl *D) override;
+ bool shouldMangleStringLiteral(const StringLiteral *SL) override;
+ void mangleCXXName(const NamedDecl *D, raw_ostream &Out) override;
+ void mangleVirtualMemPtrThunk(const CXXMethodDecl *MD, raw_ostream &) override;
+ void mangleThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk,
+ raw_ostream &) override;
+ void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
+ const ThisAdjustment &ThisAdjustment,
+ raw_ostream &) override;
+ void mangleCXXVFTable(const CXXRecordDecl *Derived,
+ ArrayRef<const CXXRecordDecl *> BasePath,
+ raw_ostream &Out) override;
+ void mangleCXXVBTable(const CXXRecordDecl *Derived,
+ ArrayRef<const CXXRecordDecl *> BasePath,
+ raw_ostream &Out) override;
+ void mangleCXXRTTI(QualType T, raw_ostream &) override;
+ void mangleCXXRTTIName(QualType T, raw_ostream &) override;
+ void mangleTypeName(QualType T, raw_ostream &) override;
+ void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
+ raw_ostream &) override;
+ void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
+ raw_ostream &) override;
+ void mangleReferenceTemporary(const VarDecl *, raw_ostream &) override;
+ void mangleStaticGuardVariable(const VarDecl *D, raw_ostream &Out) override;
+ void mangleDynamicInitializer(const VarDecl *D, raw_ostream &Out) override;
+ void mangleDynamicAtExitDestructor(const VarDecl *D,
+ raw_ostream &Out) override;
+ void mangleStringLiteral(const StringLiteral *SL, raw_ostream &Out) override;
+ bool getNextDiscriminator(const NamedDecl *ND, unsigned &disc) {
+ // Lambda closure types are already numbered.
+ if (isLambda(ND))
+ return false;
+
+ const DeclContext *DC = getEffectiveDeclContext(ND);
+ if (!DC->isFunctionOrMethod())
+ return false;
+
+ // Use the canonical number for externally visible decls.
+ if (ND->isExternallyVisible()) {
+ disc = getASTContext().getManglingNumber(ND);
+ return true;
+ }
+
+ // Anonymous tags are already numbered.
+ if (const TagDecl *Tag = dyn_cast<TagDecl>(ND)) {
+ if (Tag->getName().empty() && !Tag->getTypedefNameForAnonDecl())
+ return false;
+ }
+
+ // Make up a reasonable number for internal decls.
+ unsigned &discriminator = Uniquifier[ND];
+ if (!discriminator)
+ discriminator = ++Discriminator[std::make_pair(DC, ND->getIdentifier())];
+ disc = discriminator;
+ return true;
+ }
+
+ unsigned getLambdaId(const CXXRecordDecl *RD) {
+ assert(RD->isLambda() && "RD must be a lambda!");
+ assert(!RD->isExternallyVisible() && "RD must not be visible!");
+ assert(RD->getLambdaManglingNumber() == 0 &&
+ "RD must not have a mangling number!");
+ std::pair<llvm::DenseMap<const CXXRecordDecl *, unsigned>::iterator, bool>
+ Result = LambdaIds.insert(std::make_pair(RD, LambdaIds.size()));
+ return Result.first->second;
+ }
+
+private:
+ void mangleInitFiniStub(const VarDecl *D, raw_ostream &Out, char CharCode);
+};
+
/// MicrosoftCXXNameMangler - Manage the mangling of a single name for the
/// Microsoft Visual C++ ABI.
class MicrosoftCXXNameMangler {
- MangleContext &Context;
+ MicrosoftMangleContextImpl &Context;
raw_ostream &Out;
/// The "structor" is the top-level declaration being mangled, if
@@ -99,14 +193,14 @@
public:
enum QualifierMangleMode { QMM_Drop, QMM_Mangle, QMM_Escape, QMM_Result };
- MicrosoftCXXNameMangler(MangleContext &C, raw_ostream &Out_)
+ MicrosoftCXXNameMangler(MicrosoftMangleContextImpl &C, raw_ostream &Out_)
: Context(C), Out(Out_),
Structor(0), StructorType(-1),
UseNameBackReferences(true),
PointersAre64Bit(C.getASTContext().getTargetInfo().getPointerWidth(0) ==
64) { }
- MicrosoftCXXNameMangler(MangleContext &C, raw_ostream &Out_,
+ MicrosoftCXXNameMangler(MicrosoftMangleContextImpl &C, raw_ostream &Out_,
const CXXDestructorDecl *D, CXXDtorType Type)
: Context(C), Out(Out_),
Structor(getStructor(D)), StructorType(Type),
@@ -118,15 +212,20 @@
void mangle(const NamedDecl *D, StringRef Prefix = "\01?");
void mangleName(const NamedDecl *ND);
- void mangleDeclaration(const NamedDecl *ND);
void mangleFunctionEncoding(const FunctionDecl *FD);
void mangleVariableEncoding(const VarDecl *VD);
+ void mangleMemberDataPointer(const CXXRecordDecl *RD, const ValueDecl *VD);
+ void mangleMemberFunctionPointer(const CXXRecordDecl *RD,
+ const CXXMethodDecl *MD);
+ void mangleVirtualMemPtrThunk(
+ const CXXMethodDecl *MD,
+ const MicrosoftVTableContext::MethodVFTableLocation &ML);
void mangleNumber(int64_t Number);
void mangleType(QualType T, SourceRange Range,
QualifierMangleMode QMM = QMM_Mangle);
void mangleFunctionType(const FunctionType *T, const FunctionDecl *D = 0,
bool ForceInstMethod = false);
- void manglePostfix(const DeclContext *DC, bool NoFunction = false);
+ void mangleNestedName(const NamedDecl *ND);
private:
void disableBackReferences() { UseNameBackReferences = false; }
@@ -138,13 +237,13 @@
void mangleOperatorName(OverloadedOperatorKind OO, SourceLocation Loc);
void mangleCXXDtorType(CXXDtorType T);
void mangleQualifiers(Qualifiers Quals, bool IsMember);
- void manglePointerQualifiers(Qualifiers Quals);
+ void manglePointerCVQualifiers(Qualifiers Quals);
+ void manglePointerExtQualifiers(Qualifiers Quals, const Type *PointeeType);
void mangleUnscopedTemplateName(const TemplateDecl *ND);
void mangleTemplateInstantiationName(const TemplateDecl *TD,
const TemplateArgumentList &TemplateArgs);
void mangleObjCMethodName(const ObjCMethodDecl *MD);
- void mangleLocalName(const FunctionDecl *FD);
void mangleArgumentType(QualType T, SourceRange Range);
@@ -157,7 +256,7 @@
#undef ABSTRACT_TYPE
#undef NON_CANONICAL_TYPE
#undef TYPE
-
+
void mangleType(const TagDecl *TD);
void mangleDecayedArrayType(const ArrayType *T);
void mangleArrayType(const ArrayType *T);
@@ -171,47 +270,6 @@
const TemplateArgumentList &TemplateArgs);
void mangleTemplateArg(const TemplateDecl *TD, const TemplateArgument &TA);
};
-
-/// MicrosoftMangleContextImpl - Overrides the default MangleContext for the
-/// Microsoft Visual C++ ABI.
-class MicrosoftMangleContextImpl : public MicrosoftMangleContext {
-public:
- MicrosoftMangleContextImpl(ASTContext &Context, DiagnosticsEngine &Diags)
- : MicrosoftMangleContext(Context, Diags) {}
- virtual bool shouldMangleCXXName(const NamedDecl *D);
- virtual void mangleCXXName(const NamedDecl *D, raw_ostream &Out);
- virtual void mangleVirtualMemPtrThunk(const CXXMethodDecl *MD,
- uint64_t OffsetInVFTable,
- raw_ostream &);
- virtual void mangleThunk(const CXXMethodDecl *MD,
- const ThunkInfo &Thunk,
- raw_ostream &);
- virtual void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
- const ThisAdjustment &ThisAdjustment,
- raw_ostream &);
- virtual void mangleCXXVFTable(const CXXRecordDecl *Derived,
- ArrayRef<const CXXRecordDecl *> BasePath,
- raw_ostream &Out);
- virtual void mangleCXXVBTable(const CXXRecordDecl *Derived,
- ArrayRef<const CXXRecordDecl *> BasePath,
- raw_ostream &Out);
- virtual void mangleCXXRTTI(QualType T, raw_ostream &);
- virtual void mangleCXXRTTIName(QualType T, raw_ostream &);
- virtual void mangleTypeName(QualType T, raw_ostream &);
- virtual void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
- raw_ostream &);
- virtual void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
- raw_ostream &);
- virtual void mangleReferenceTemporary(const VarDecl *, raw_ostream &);
- virtual void mangleStaticGuardVariable(const VarDecl *D, raw_ostream &Out);
- virtual void mangleDynamicInitializer(const VarDecl *D, raw_ostream &Out);
- virtual void mangleDynamicAtExitDestructor(const VarDecl *D,
- raw_ostream &Out);
-
-private:
- void mangleInitFiniStub(const VarDecl *D, raw_ostream &Out, char CharCode);
-};
-
}
bool MicrosoftMangleContextImpl::shouldMangleCXXName(const NamedDecl *D) {
@@ -267,6 +325,13 @@
return true;
}
+bool
+MicrosoftMangleContextImpl::shouldMangleStringLiteral(const StringLiteral *SL) {
+ return SL->isAscii() || SL->isWide();
+ // TODO: This needs to be updated when MSVC gains support for Unicode
+ // literals.
+}
+
void MicrosoftCXXNameMangler::mangle(const NamedDecl *D,
StringRef Prefix) {
// MSVC doesn't mangle C++ names the same way it mangles extern "C" names.
@@ -324,7 +389,7 @@
// ::= 2 # public static member
// ::= 3 # global
// ::= 4 # static local
-
+
// The first character in the encoding (after the name) is the storage class.
if (VD->isStaticDataMember()) {
// If it's a static member, it also encodes the access level.
@@ -345,12 +410,12 @@
// Pointers and references are odd. The type of 'int * const foo;' gets
// mangled as 'QAHA' instead of 'PAHB', for example.
TypeLoc TL = VD->getTypeSourceInfo()->getTypeLoc();
- QualType Ty = TL.getType();
+ QualType Ty = VD->getType();
if (Ty->isPointerType() || Ty->isReferenceType() ||
Ty->isMemberPointerType()) {
mangleType(Ty, TL.getSourceRange(), QMM_Drop);
- if (PointersAre64Bit)
- Out << 'E';
+ manglePointerExtQualifiers(
+ Ty.getDesugaredType(getASTContext()).getLocalQualifiers(), 0);
if (const MemberPointerType *MPT = Ty->getAs<MemberPointerType>()) {
mangleQualifiers(MPT->getPointeeType().getQualifiers(), true);
// Member pointers are suffixed with a back reference to the member
@@ -371,20 +436,130 @@
}
}
+void MicrosoftCXXNameMangler::mangleMemberDataPointer(const CXXRecordDecl *RD,
+ const ValueDecl *VD) {
+ // <member-data-pointer> ::= <integer-literal>
+ // ::= $F <number> <number>
+ // ::= $G <number> <number> <number>
+
+ int64_t FieldOffset;
+ int64_t VBTableOffset;
+ MSInheritanceAttr::Spelling IM = RD->getMSInheritanceModel();
+ if (VD) {
+ FieldOffset = getASTContext().getFieldOffset(VD);
+ assert(FieldOffset % getASTContext().getCharWidth() == 0 &&
+ "cannot take address of bitfield");
+ FieldOffset /= getASTContext().getCharWidth();
+
+ VBTableOffset = 0;
+ } else {
+ FieldOffset = RD->nullFieldOffsetIsZero() ? 0 : -1;
+
+ VBTableOffset = -1;
+ }
+
+ char Code = '\0';
+ switch (IM) {
+ case MSInheritanceAttr::Keyword_single_inheritance: Code = '0'; break;
+ case MSInheritanceAttr::Keyword_multiple_inheritance: Code = '0'; break;
+ case MSInheritanceAttr::Keyword_virtual_inheritance: Code = 'F'; break;
+ case MSInheritanceAttr::Keyword_unspecified_inheritance: Code = 'G'; break;
+ }
+
+ Out << '$' << Code;
+
+ mangleNumber(FieldOffset);
+
+ if (MSInheritanceAttr::hasVBPtrOffsetField(IM))
+ mangleNumber(0);
+ if (MSInheritanceAttr::hasVBTableOffsetField(IM))
+ mangleNumber(VBTableOffset);
+}
+
+void
+MicrosoftCXXNameMangler::mangleMemberFunctionPointer(const CXXRecordDecl *RD,
+ const CXXMethodDecl *MD) {
+ // <member-function-pointer> ::= $1? <name>
+ // ::= $H? <name> <number>
+ // ::= $I? <name> <number> <number>
+ // ::= $J? <name> <number> <number> <number>
+ // ::= $0A@
+
+ MSInheritanceAttr::Spelling IM = RD->getMSInheritanceModel();
+
+ // The null member function pointer is $0A@ in function templates and crashes
+ // MSVC when used in class templates, so we don't know what they really look
+ // like.
+ if (!MD) {
+ Out << "$0A@";
+ return;
+ }
+
+ char Code = '\0';
+ switch (IM) {
+ case MSInheritanceAttr::Keyword_single_inheritance: Code = '1'; break;
+ case MSInheritanceAttr::Keyword_multiple_inheritance: Code = 'H'; break;
+ case MSInheritanceAttr::Keyword_virtual_inheritance: Code = 'I'; break;
+ case MSInheritanceAttr::Keyword_unspecified_inheritance: Code = 'J'; break;
+ }
+
+ Out << '$' << Code << '?';
+
+ // If non-virtual, mangle the name. If virtual, mangle as a virtual memptr
+ // thunk.
+ uint64_t NVOffset = 0;
+ uint64_t VBTableOffset = 0;
+ if (MD->isVirtual()) {
+ MicrosoftVTableContext *VTContext =
+ cast<MicrosoftVTableContext>(getASTContext().getVTableContext());
+ const MicrosoftVTableContext::MethodVFTableLocation &ML =
+ VTContext->getMethodVFTableLocation(GlobalDecl(MD));
+ mangleVirtualMemPtrThunk(MD, ML);
+ NVOffset = ML.VFPtrOffset.getQuantity();
+ VBTableOffset = ML.VBTableIndex * 4;
+ if (ML.VBase) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error,
+ "cannot mangle pointers to member functions from virtual bases");
+ Diags.Report(MD->getLocation(), DiagID);
+ }
+ } else {
+ mangleName(MD);
+ mangleFunctionEncoding(MD);
+ }
+
+ if (MSInheritanceAttr::hasNVOffsetField(/*IsMemberFunction=*/true, IM))
+ mangleNumber(NVOffset);
+ if (MSInheritanceAttr::hasVBPtrOffsetField(IM))
+ mangleNumber(0);
+ if (MSInheritanceAttr::hasVBTableOffsetField(IM))
+ mangleNumber(VBTableOffset);
+}
+
+void MicrosoftCXXNameMangler::mangleVirtualMemPtrThunk(
+ const CXXMethodDecl *MD,
+ const MicrosoftVTableContext::MethodVFTableLocation &ML) {
+ // Get the vftable offset.
+ CharUnits PointerWidth = getASTContext().toCharUnitsFromBits(
+ getASTContext().getTargetInfo().getPointerWidth(0));
+ uint64_t OffsetInVFTable = ML.Index * PointerWidth.getQuantity();
+
+ Out << "?_9";
+ mangleName(MD->getParent());
+ Out << "$B";
+ mangleNumber(OffsetInVFTable);
+ Out << 'A';
+ Out << (PointersAre64Bit ? 'A' : 'E');
+}
+
void MicrosoftCXXNameMangler::mangleName(const NamedDecl *ND) {
// <name> ::= <unscoped-name> {[<named-scope>]+ | [<nested-name>]}? @
- const DeclContext *DC = ND->getDeclContext();
// Always start with the unqualified name.
- mangleUnqualifiedName(ND);
+ mangleUnqualifiedName(ND);
- // If this is an extern variable declared locally, the relevant DeclContext
- // is that of the containing namespace, or the translation unit.
- if (isa<FunctionDecl>(DC) && ND->hasLinkage())
- while (!DC->isNamespace() && !DC->isTranslationUnit())
- DC = DC->getParent();
-
- manglePostfix(DC);
+ mangleNestedName(ND);
// Terminate the whole name with an '@'.
Out << '@';
@@ -438,6 +613,13 @@
return Spec->getSpecializedTemplate();
}
+ // Check if we have a variable template.
+ if (const VarTemplateSpecializationDecl *Spec =
+ dyn_cast<VarTemplateSpecializationDecl>(ND)) {
+ TemplateArgs = &Spec->getTemplateArgs();
+ return Spec->getSpecializedTemplate();
+ }
+
return 0;
}
@@ -461,7 +643,6 @@
return;
}
- // We have a class template.
// Here comes the tricky thing: if we need to mangle something like
// void foo(A::X<Y>, B::X<Y>),
// the X<Y> part is aliased. However, if you need to mangle
@@ -506,17 +687,31 @@
mangleSourceName(II->getName());
break;
}
-
+
// Otherwise, an anonymous entity. We must have a declaration.
assert(ND && "mangling empty name without declaration");
-
+
if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
if (NS->isAnonymousNamespace()) {
Out << "?A@";
break;
}
}
-
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
+ // We must have an anonymous union or struct declaration.
+ const CXXRecordDecl *RD = VD->getType()->getAsCXXRecordDecl();
+ assert(RD && "expected variable decl to have a record type");
+ // Anonymous types with no tag or typedef get the name of their
+ // declarator mangled in. If they have no declarator, number them with
+ // a $S prefix.
+ llvm::SmallString<64> Name("$S");
+ // Get a unique id for the anonymous struct.
+ Name += llvm::utostr(Context.getAnonymousStructId(RD) + 1);
+ mangleSourceName(Name.str());
+ break;
+ }
+
// We must have an anonymous struct.
const TagDecl *TD = cast<TagDecl>(ND);
if (const TypedefNameDecl *D = TD->getTypedefNameForAnonDecl()) {
@@ -528,26 +723,43 @@
break;
}
+ if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(TD)) {
+ if (Record->isLambda()) {
+ llvm::SmallString<10> Name("<lambda_");
+ unsigned LambdaId;
+ if (Record->getLambdaManglingNumber())
+ LambdaId = Record->getLambdaManglingNumber();
+ else
+ LambdaId = Context.getLambdaId(Record);
+
+ Name += llvm::utostr(LambdaId);
+ Name += ">";
+
+ mangleSourceName(Name);
+ break;
+ }
+ }
+
+ llvm::SmallString<64> Name("<unnamed-type-");
if (TD->hasDeclaratorForAnonDecl()) {
// Anonymous types with no tag or typedef get the name of their
- // declarator mangled in.
- llvm::SmallString<64> Name("<unnamed-type-");
+ // declarator mangled in if they have one.
Name += TD->getDeclaratorForAnonDecl()->getName();
- Name += ">";
- mangleSourceName(Name.str());
} else {
- // Anonymous types with no tag, no typedef, or declarator get
- // '<unnamed-tag>'.
- mangleSourceName("<unnamed-tag>");
+ // Otherwise, number the types using a $S prefix.
+ Name += "$S";
+ Name += llvm::utostr(Context.getAnonymousStructId(TD));
}
+ Name += ">";
+ mangleSourceName(Name.str());
break;
}
-
+
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCMultiArgSelector:
llvm_unreachable("Can't mangle Objective-C selector names here!");
-
+
case DeclarationName::CXXConstructorName:
if (ND == Structor) {
assert(StructorType == Ctor_Complete &&
@@ -555,7 +767,7 @@
}
Out << "?0";
break;
-
+
case DeclarationName::CXXDestructorName:
if (ND == Structor)
// If the named decl is the C++ destructor we're mangling,
@@ -566,17 +778,17 @@
// class with a destructor is declared within a destructor.
mangleCXXDtorType(Dtor_Base);
break;
-
+
case DeclarationName::CXXConversionFunctionName:
// <operator-name> ::= ?B # (cast)
// The target type is encoded as the return type.
Out << "?B";
break;
-
+
case DeclarationName::CXXOperatorName:
mangleOperatorName(Name.getCXXOverloadedOperator(), ND->getLocation());
break;
-
+
case DeclarationName::CXXLiteralOperatorName: {
// FIXME: Was this added in VS2010? Does MS even know how to mangle this?
DiagnosticsEngine Diags = Context.getDiags();
@@ -585,51 +797,53 @@
Diags.Report(ND->getLocation(), DiagID);
break;
}
-
+
case DeclarationName::CXXUsingDirective:
llvm_unreachable("Can't mangle a using directive name!");
}
}
-void MicrosoftCXXNameMangler::manglePostfix(const DeclContext *DC,
- bool NoFunction) {
+void MicrosoftCXXNameMangler::mangleNestedName(const NamedDecl *ND) {
// <postfix> ::= <unqualified-name> [<postfix>]
// ::= <substitution> [<postfix>]
+ if (isLambda(ND))
+ return;
- if (!DC) return;
+ const DeclContext *DC = ND->getDeclContext();
- while (isa<LinkageSpecDecl>(DC))
+ while (!DC->isTranslationUnit()) {
+ if (isa<TagDecl>(ND) || isa<VarDecl>(ND)) {
+ unsigned Disc;
+ if (Context.getNextDiscriminator(ND, Disc)) {
+ Out << '?';
+ mangleNumber(Disc);
+ Out << '?';
+ }
+ }
+
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC)) {
+ DiagnosticsEngine Diags = Context.getDiags();
+ unsigned DiagID =
+ Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle a local inside this block yet");
+ Diags.Report(BD->getLocation(), DiagID);
+
+ // FIXME: This is completely, utterly, wrong; see ItaniumMangle
+ // for how this should be done.
+ Out << "__block_invoke" << Context.getBlockId(BD, false);
+ Out << '@';
+ continue;
+ } else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC)) {
+ mangleObjCMethodName(Method);
+ } else if (isa<NamedDecl>(DC)) {
+ ND = cast<NamedDecl>(DC);
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
+ mangle(FD, "?");
+ break;
+ } else
+ mangleUnqualifiedName(ND);
+ }
DC = DC->getParent();
-
- if (DC->isTranslationUnit())
- return;
-
- if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC)) {
- DiagnosticsEngine Diags = Context.getDiags();
- unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
- "cannot mangle a local inside this block yet");
- Diags.Report(BD->getLocation(), DiagID);
-
- // FIXME: This is completely, utterly, wrong; see ItaniumMangle
- // for how this should be done.
- Out << "__block_invoke" << Context.getBlockId(BD, false);
- Out << '@';
- return manglePostfix(DC->getParent(), NoFunction);
- } else if (isa<CapturedDecl>(DC)) {
- // Skip CapturedDecl context.
- manglePostfix(DC->getParent(), NoFunction);
- return;
- }
-
- if (NoFunction && (isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC)))
- return;
- else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC))
- mangleObjCMethodName(Method);
- else if (const FunctionDecl *Func = dyn_cast<FunctionDecl>(DC))
- mangleLocalName(Func);
- else {
- mangleUnqualifiedName(cast<NamedDecl>(DC));
- manglePostfix(DC->getParent(), NoFunction);
}
}
@@ -768,7 +982,7 @@
case OO_Array_New: Out << "?_U"; break;
// <operator-name> ::= ?_V # delete[]
case OO_Array_Delete: Out << "?_V"; break;
-
+
case OO_Conditional: {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
@@ -776,7 +990,7 @@
Diags.Report(Loc, DiagID);
break;
}
-
+
case OO_None:
case NUM_OVERLOADED_OPERATORS:
llvm_unreachable("Not an overloaded operator");
@@ -803,44 +1017,6 @@
Context.mangleObjCMethodName(MD, Out);
}
-// Find out how many function decls live above this one and return an integer
-// suitable for use as the number in a numbered anonymous scope.
-// TODO: Memoize.
-static unsigned getLocalNestingLevel(const FunctionDecl *FD) {
- const DeclContext *DC = FD->getParent();
- int level = 1;
-
- while (DC && !DC->isTranslationUnit()) {
- if (isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC)) level++;
- DC = DC->getParent();
- }
-
- return 2*level;
-}
-
-void MicrosoftCXXNameMangler::mangleLocalName(const FunctionDecl *FD) {
- // <nested-name> ::= <numbered-anonymous-scope> ? <mangled-name>
- // <numbered-anonymous-scope> ::= ? <number>
- // Even though the name is rendered in reverse order (e.g.
- // A::B::C is rendered as C@B@A), VC numbers the scopes from outermost to
- // innermost. So a method bar in class C local to function foo gets mangled
- // as something like:
- // ?bar@C@?1??foo@@YAXXZ@QAEXXZ
- // This is more apparent when you have a type nested inside a method of a
- // type nested inside a function. A method baz in class D local to method
- // bar of class C local to function foo gets mangled as:
- // ?baz@D@?3??bar@C@?1??foo@@YAXXZ@QAEXXZ@QAEXXZ
- // This scheme is general enough to support GCC-style nested
- // functions. You could have a method baz of class C inside a function bar
- // inside a function foo, like so:
- // ?baz@C@?3??bar@?1??foo@@YAXXZ@YAXXZ@QAEXXZ
- unsigned NestLevel = getLocalNestingLevel(FD);
- Out << '?';
- mangleNumber(NestLevel);
- Out << '?';
- mangle(FD, "?");
-}
-
void MicrosoftCXXNameMangler::mangleTemplateInstantiationName(
const TemplateDecl *TD,
const TemplateArgumentList &TemplateArgs) {
@@ -925,20 +1101,25 @@
<< E->getStmtClassName() << E->getSourceRange();
}
-void
-MicrosoftCXXNameMangler::mangleTemplateArgs(const TemplateDecl *TD,
- const TemplateArgumentList &TemplateArgs) {
- // <template-args> ::= {<type> | <integer-literal>}+ @
- unsigned NumTemplateArgs = TemplateArgs.size();
- for (unsigned i = 0; i < NumTemplateArgs; ++i) {
- const TemplateArgument &TA = TemplateArgs[i];
+void MicrosoftCXXNameMangler::mangleTemplateArgs(
+ const TemplateDecl *TD, const TemplateArgumentList &TemplateArgs) {
+ // <template-args> ::= <template-arg>+ @
+ for (const TemplateArgument &TA : TemplateArgs.asArray())
mangleTemplateArg(TD, TA);
- }
Out << '@';
}
void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD,
const TemplateArgument &TA) {
+ // <template-arg> ::= <type>
+ // ::= <integer-literal>
+ // ::= <member-data-pointer>
+ // ::= <member-function-pointer>
+ // ::= $E? <name> <type-encoding>
+ // ::= $1? <name> <type-encoding>
+ // ::= $0A@
+ // ::= <template-args>
+
switch (TA.getKind()) {
case TemplateArgument::Null:
llvm_unreachable("Can't mangle null template arguments!");
@@ -951,16 +1132,38 @@
}
case TemplateArgument::Declaration: {
const NamedDecl *ND = cast<NamedDecl>(TA.getAsDecl());
- mangle(ND, TA.isDeclForReferenceParam() ? "$E?" : "$1?");
+ if (isa<FieldDecl>(ND) || isa<IndirectFieldDecl>(ND)) {
+ mangleMemberDataPointer(
+ cast<CXXRecordDecl>(ND->getDeclContext())->getMostRecentDecl(),
+ cast<ValueDecl>(ND));
+ } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
+ const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
+ if (MD && MD->isInstance())
+ mangleMemberFunctionPointer(MD->getParent()->getMostRecentDecl(), MD);
+ else
+ mangle(FD, "$1?");
+ } else {
+ mangle(ND, TA.isDeclForReferenceParam() ? "$E?" : "$1?");
+ }
break;
}
case TemplateArgument::Integral:
mangleIntegerLiteral(TA.getAsIntegral(),
TA.getIntegralType()->isBooleanType());
break;
- case TemplateArgument::NullPtr:
- Out << "$0A@";
+ case TemplateArgument::NullPtr: {
+ QualType T = TA.getNullPtrType();
+ if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) {
+ const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
+ if (MPT->isMemberFunctionPointerType())
+ mangleMemberFunctionPointer(RD, 0);
+ else
+ mangleMemberDataPointer(RD, 0);
+ } else {
+ Out << "$0A@";
+ }
break;
+ }
case TemplateArgument::Expression:
mangleExpression(TA.getAsExpr());
break;
@@ -1059,13 +1262,25 @@
// FIXME: For now, just drop all extension qualifiers on the floor.
}
-void MicrosoftCXXNameMangler::manglePointerQualifiers(Qualifiers Quals) {
- // <pointer-cvr-qualifiers> ::= P # no qualifiers
- // ::= Q # const
- // ::= R # volatile
- // ::= S # const volatile
+void
+MicrosoftCXXNameMangler::manglePointerExtQualifiers(Qualifiers Quals,
+ const Type *PointeeType) {
+ bool HasRestrict = Quals.hasRestrict();
+ if (PointersAre64Bit && (!PointeeType || !PointeeType->isFunctionType()))
+ Out << 'E';
+
+ if (HasRestrict)
+ Out << 'I';
+}
+
+void MicrosoftCXXNameMangler::manglePointerCVQualifiers(Qualifiers Quals) {
+ // <pointer-cv-qualifiers> ::= P # no qualifiers
+ // ::= Q # const
+ // ::= R # volatile
+ // ::= S # const volatile
bool HasConst = Quals.hasConst(),
HasVolatile = Quals.hasVolatile();
+
if (HasConst && HasVolatile) {
Out << 'S';
} else if (HasVolatile) {
@@ -1165,8 +1380,10 @@
}
// We have to mangle these now, while we still have enough information.
- if (IsPointer)
- manglePointerQualifiers(Quals);
+ if (IsPointer) {
+ manglePointerCVQualifiers(Quals);
+ manglePointerExtQualifiers(Quals, T->getPointeeType().getTypePtr());
+ }
const Type *ty = T.getTypePtr();
switch (ty->getTypeClass()) {
@@ -1254,7 +1471,7 @@
case BuiltinType::OCLImage3d: Out << "PAUocl_image3d@@"; break;
case BuiltinType::OCLSampler: Out << "PAUocl_sampler@@"; break;
case BuiltinType::OCLEvent: Out << "PAUocl_event@@"; break;
-
+
case BuiltinType::NullPtr: Out << "$$T"; break;
case BuiltinType::Char16:
@@ -1306,9 +1523,9 @@
// If this is a C++ instance method, mangle the CVR qualifiers for the
// this pointer.
if (IsInstMethod) {
- if (PointersAre64Bit)
- Out << 'E';
- mangleQualifiers(Qualifiers::fromCVRMask(Proto->getTypeQuals()), false);
+ Qualifiers Quals = Qualifiers::fromCVRMask(Proto->getTypeQuals());
+ manglePointerExtQualifiers(Quals, 0);
+ mangleQualifiers(Quals, false);
}
mangleCallingConvention(T);
@@ -1327,23 +1544,30 @@
}
Out << '@';
} else {
- QualType ResultType = Proto->getResultType();
- if (ResultType->isVoidType())
- ResultType = ResultType.getUnqualifiedType();
- mangleType(ResultType, Range, QMM_Result);
+ QualType ResultType = Proto->getReturnType();
+ if (const auto *AT =
+ dyn_cast_or_null<AutoType>(ResultType->getContainedAutoType())) {
+ Out << '?';
+ mangleQualifiers(ResultType.getLocalQualifiers(), /*IsMember=*/false);
+ Out << '?';
+ mangleSourceName(AT->isDecltypeAuto() ? "<decltype-auto>" : "<auto>");
+ Out << '@';
+ } else {
+ if (ResultType->isVoidType())
+ ResultType = ResultType.getUnqualifiedType();
+ mangleType(ResultType, Range, QMM_Result);
+ }
}
// <argument-list> ::= X # void
// ::= <type>+ @
// ::= <type>* Z # varargs
- if (Proto->getNumArgs() == 0 && !Proto->isVariadic()) {
+ if (Proto->getNumParams() == 0 && !Proto->isVariadic()) {
Out << 'X';
} else {
// Happens for function pointer type arguments for example.
- for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
- ArgEnd = Proto->arg_type_end();
- Arg != ArgEnd; ++Arg)
- mangleArgumentType(*Arg, Range);
+ for (const auto &Arg : Proto->param_types())
+ mangleArgumentType(Arg, Range);
// <builtin-type> ::= Z # ellipsis
if (Proto->isVariadic())
Out << 'Z';
@@ -1465,7 +1689,7 @@
// <union-type> ::= T <name>
// <struct-type> ::= U <name>
// <class-type> ::= V <name>
-// <enum-type> ::= W <size> <name>
+// <enum-type> ::= W4 <name>
void MicrosoftCXXNameMangler::mangleType(const EnumType *T, SourceRange) {
mangleType(cast<TagType>(T)->getDecl());
}
@@ -1485,9 +1709,7 @@
Out << 'V';
break;
case TTK_Enum:
- Out << 'W';
- Out << getASTContext().getTypeSizeInChars(
- cast<EnumDecl>(TD)->getIntegerType()).getQuantity();
+ Out << "W4";
break;
}
mangleName(TD);
@@ -1503,7 +1725,7 @@
void MicrosoftCXXNameMangler::mangleDecayedArrayType(const ArrayType *T) {
// This isn't a recursive mangling, so now we have to do it all in this
// one call.
- manglePointerQualifiers(T->getElementType().getQualifiers());
+ manglePointerCVQualifiers(T->getElementType().getQualifiers());
mangleType(T->getElementType(), SourceRange());
}
void MicrosoftCXXNameMangler::mangleType(const ConstantArrayType *T,
@@ -1575,8 +1797,6 @@
mangleName(T->getClass()->castAs<RecordType>()->getDecl());
mangleFunctionType(FPT, 0, true);
} else {
- if (PointersAre64Bit && !T->getPointeeType()->isFunctionType())
- Out << 'E';
mangleQualifiers(PointeeType.getQualifiers(), true);
mangleName(T->getClass()->castAs<RecordType>()->getDecl());
mangleType(PointeeType, Range, QMM_Drop);
@@ -1604,42 +1824,37 @@
// <type> ::= <pointer-type>
// <pointer-type> ::= E? <pointer-cvr-qualifiers> <cvr-qualifiers> <type>
-// # the E is required for 64-bit non static pointers
+// # the E is required for 64-bit non-static pointers
void MicrosoftCXXNameMangler::mangleType(const PointerType *T,
SourceRange Range) {
QualType PointeeTy = T->getPointeeType();
- if (PointersAre64Bit && !T->getPointeeType()->isFunctionType())
- Out << 'E';
mangleType(PointeeTy, Range);
}
void MicrosoftCXXNameMangler::mangleType(const ObjCObjectPointerType *T,
SourceRange Range) {
// Object pointers never have qualifiers.
Out << 'A';
- if (PointersAre64Bit && !T->getPointeeType()->isFunctionType())
- Out << 'E';
+ manglePointerExtQualifiers(Qualifiers(), T->getPointeeType().getTypePtr());
mangleType(T->getPointeeType(), Range);
}
// <type> ::= <reference-type>
// <reference-type> ::= A E? <cvr-qualifiers> <type>
-// # the E is required for 64-bit non static lvalue references
+// # the E is required for 64-bit non-static lvalue references
void MicrosoftCXXNameMangler::mangleType(const LValueReferenceType *T,
SourceRange Range) {
Out << 'A';
- if (PointersAre64Bit && !T->getPointeeType()->isFunctionType())
- Out << 'E';
+ manglePointerExtQualifiers(Qualifiers(), T->getPointeeType().getTypePtr());
mangleType(T->getPointeeType(), Range);
}
// <type> ::= <r-value-reference-type>
// <r-value-reference-type> ::= $$Q E? <cvr-qualifiers> <type>
-// # the E is required for 64-bit non static rvalue references
+// # the E is required for 64-bit non-static rvalue references
void MicrosoftCXXNameMangler::mangleType(const RValueReferenceType *T,
SourceRange Range) {
Out << "$$Q";
- if (PointersAre64Bit && !T->getPointeeType()->isFunctionType())
- Out << 'E';
+ manglePointerExtQualifiers(Qualifiers(), T->getPointeeType().getTypePtr());
mangleType(T->getPointeeType(), Range);
}
@@ -1804,6 +2019,8 @@
}
void MicrosoftCXXNameMangler::mangleType(const AutoType *T, SourceRange Range) {
+ assert(T->getDeducedType().isNull() && "expecting a dependent type!");
+
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"cannot mangle this 'auto' type yet");
@@ -1923,17 +2140,17 @@
}
}
-void MicrosoftMangleContextImpl::mangleVirtualMemPtrThunk(
- const CXXMethodDecl *MD, uint64_t OffsetInVFTable, raw_ostream &Out) {
- bool Is64Bit = getASTContext().getTargetInfo().getPointerWidth(0) == 64;
+void
+MicrosoftMangleContextImpl::mangleVirtualMemPtrThunk(const CXXMethodDecl *MD,
+ raw_ostream &Out) {
+ MicrosoftVTableContext *VTContext =
+ cast<MicrosoftVTableContext>(getASTContext().getVTableContext());
+ const MicrosoftVTableContext::MethodVFTableLocation &ML =
+ VTContext->getMethodVFTableLocation(GlobalDecl(MD));
MicrosoftCXXNameMangler Mangler(*this, Out);
- Mangler.getStream() << "\01??_9";
- Mangler.mangleName(MD->getParent());
- Mangler.getStream() << "$B";
- Mangler.mangleNumber(OffsetInVFTable);
- Mangler.getStream() << "A";
- Mangler.getStream() << (Is64Bit ? "A" : "E");
+ Mangler.getStream() << "\01?";
+ Mangler.mangleVirtualMemPtrThunk(MD, ML);
}
void MicrosoftMangleContextImpl::mangleThunk(const CXXMethodDecl *MD,
@@ -2050,7 +2267,16 @@
void MicrosoftMangleContextImpl::mangleStaticGuardVariable(const VarDecl *VD,
raw_ostream &Out) {
- // <guard-name> ::= ?_B <postfix> @51
+ // TODO: This is not correct, especially with respect to MSVC2013. MSVC2013
+ // utilizes thread local variables to implement thread safe, re-entrant
+ // initialization for statics. They no longer differentiate between an
+ // externally visible and non-externally visible static with respect to
+ // mangling, they all get $TSS <number>.
+ //
+ // N.B. This means that they can get more than 32 static variable guards in a
+ // scope. It also means that they broke compatibility with their own ABI.
+
+ // <guard-name> ::= ?_B <postfix> @5 <scope-depth>
// ::= ?$S <guard-num> @ <postfix> @4IA
// The first mangling is what MSVC uses to guard static locals in inline
@@ -2064,8 +2290,17 @@
bool Visible = VD->isExternallyVisible();
// <operator-name> ::= ?_B # local static guard
Mangler.getStream() << (Visible ? "\01??_B" : "\01?$S1@");
- Mangler.manglePostfix(VD->getDeclContext());
- Mangler.getStream() << (Visible ? "@51" : "@4IA");
+ unsigned ScopeDepth = 0;
+ if (Visible && !getNextDiscriminator(VD, ScopeDepth))
+ // If we do not have a discriminator and are emitting a guard variable for
+ // use at global scope, then mangling the nested name will not be enough to
+ // remove ambiguities.
+ Mangler.mangle(VD, "");
+ else
+ Mangler.mangleNestedName(VD);
+ Mangler.getStream() << (Visible ? "@5" : "@4IA");
+ if (ScopeDepth)
+ Mangler.mangleNumber(ScopeDepth);
}
void MicrosoftMangleContextImpl::mangleInitFiniStub(const VarDecl *D,
@@ -2074,6 +2309,10 @@
MicrosoftCXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "\01??__" << CharCode;
Mangler.mangleName(D);
+ if (D->isStaticDataMember()) {
+ Mangler.mangleVariableEncoding(D);
+ Mangler.getStream() << '@';
+ }
// This is the function class mangling. These stubs are global, non-variadic,
// cdecl functions that return void and take no args.
Mangler.getStream() << "YAXXZ";
@@ -2092,6 +2331,172 @@
mangleInitFiniStub(D, Out, 'F');
}
+void MicrosoftMangleContextImpl::mangleStringLiteral(const StringLiteral *SL,
+ raw_ostream &Out) {
+ // <char-type> ::= 0 # char
+ // ::= 1 # wchar_t
+ // ::= ??? # char16_t/char32_t will need a mangling too...
+ //
+ // <literal-length> ::= <non-negative integer> # the length of the literal
+ //
+ // <encoded-crc> ::= <hex digit>+ @ # crc of the literal including
+ // # null-terminator
+ //
+ // <encoded-string> ::= <simple character> # uninteresting character
+ // ::= '?$' <hex digit> <hex digit> # these two nibbles
+ // # encode the byte for the
+ // # character
+ // ::= '?' [a-z] # \xe1 - \xfa
+ // ::= '?' [A-Z] # \xc1 - \xda
+ // ::= '?' [0-9] # [,/\:. \n\t'-]
+ //
+ // <literal> ::= '??_C@_' <char-type> <literal-length> <encoded-crc>
+ // <encoded-string> '@'
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "\01??_C@_";
+
+ // <char-type>: The "kind" of string literal is encoded into the mangled name.
+ // TODO: This needs to be updated when MSVC gains support for unicode
+ // literals.
+ if (SL->isAscii())
+ Mangler.getStream() << '0';
+ else if (SL->isWide())
+ Mangler.getStream() << '1';
+ else
+ llvm_unreachable("unexpected string literal kind!");
+
+ // <literal-length>: The next part of the mangled name consists of the length
+ // of the string.
+ // The StringLiteral does not consider the NUL terminator byte(s) but the
+ // mangling does.
+ // N.B. The length is in terms of bytes, not characters.
+ Mangler.mangleNumber(SL->getByteLength() + SL->getCharByteWidth());
+
+ // We will use the "Rocksoft^tm Model CRC Algorithm" to describe the
+ // properties of our CRC:
+ // Width : 32
+ // Poly : 04C11DB7
+ // Init : FFFFFFFF
+ // RefIn : True
+ // RefOut : True
+ // XorOut : 00000000
+ // Check : 340BC6D9
+ uint32_t CRC = 0xFFFFFFFFU;
+
+ auto UpdateCRC = [&CRC](char Byte) {
+ for (unsigned i = 0; i < 8; ++i) {
+ bool Bit = CRC & 0x80000000U;
+ if (Byte & (1U << i))
+ Bit = !Bit;
+ CRC <<= 1;
+ if (Bit)
+ CRC ^= 0x04C11DB7U;
+ }
+ };
+
+ auto GetLittleEndianByte = [&Mangler, &SL](unsigned Index) {
+ unsigned CharByteWidth = SL->getCharByteWidth();
+ uint32_t CodeUnit = SL->getCodeUnit(Index / CharByteWidth);
+ unsigned OffsetInCodeUnit = Index % CharByteWidth;
+ return static_cast<char>((CodeUnit >> (8 * OffsetInCodeUnit)) & 0xff);
+ };
+
+ auto GetBigEndianByte = [&Mangler, &SL](unsigned Index) {
+ unsigned CharByteWidth = SL->getCharByteWidth();
+ uint32_t CodeUnit = SL->getCodeUnit(Index / CharByteWidth);
+ unsigned OffsetInCodeUnit = (CharByteWidth - 1) - (Index % CharByteWidth);
+ return static_cast<char>((CodeUnit >> (8 * OffsetInCodeUnit)) & 0xff);
+ };
+
+ // CRC all the bytes of the StringLiteral.
+ for (unsigned I = 0, E = SL->getByteLength(); I != E; ++I)
+ UpdateCRC(GetLittleEndianByte(I));
+
+ // The NUL terminator byte(s) were not present earlier,
+ // we need to manually process those bytes into the CRC.
+ for (unsigned NullTerminator = 0; NullTerminator < SL->getCharByteWidth();
+ ++NullTerminator)
+ UpdateCRC('\x00');
+
+ // The literature refers to the process of reversing the bits in the final CRC
+ // output as "reflection".
+ CRC = llvm::reverseBits(CRC);
+
+ // <encoded-crc>: The CRC is encoded utilizing the standard number mangling
+ // scheme.
+ Mangler.mangleNumber(CRC);
+
+ // <encoded-string>: The mangled name also contains the first 32 _characters_
+ // (including null-terminator bytes) of the StringLiteral.
+ // Each character is encoded by splitting them into bytes and then encoding
+ // the constituent bytes.
+ auto MangleByte = [&Mangler](char Byte) {
+ // There are five different manglings for characters:
+ // - [a-zA-Z0-9_$]: A one-to-one mapping.
+ // - ?[a-z]: The range from \xe1 to \xfa.
+ // - ?[A-Z]: The range from \xc1 to \xda.
+ // - ?[0-9]: The set of [,/\:. \n\t'-].
+ // - ?$XX: A fallback which maps nibbles.
+ if (isIdentifierBody(Byte, /*AllowDollar=*/true)) {
+ Mangler.getStream() << Byte;
+ } else if (isLetter(Byte & 0x7f)) {
+ Mangler.getStream() << '?' << static_cast<char>(Byte & 0x7f);
+ } else {
+ switch (Byte) {
+ case ',':
+ Mangler.getStream() << "?0";
+ break;
+ case '/':
+ Mangler.getStream() << "?1";
+ break;
+ case '\\':
+ Mangler.getStream() << "?2";
+ break;
+ case ':':
+ Mangler.getStream() << "?3";
+ break;
+ case '.':
+ Mangler.getStream() << "?4";
+ break;
+ case ' ':
+ Mangler.getStream() << "?5";
+ break;
+ case '\n':
+ Mangler.getStream() << "?6";
+ break;
+ case '\t':
+ Mangler.getStream() << "?7";
+ break;
+ case '\'':
+ Mangler.getStream() << "?8";
+ break;
+ case '-':
+ Mangler.getStream() << "?9";
+ break;
+ default:
+ Mangler.getStream() << "?$";
+ Mangler.getStream() << static_cast<char>('A' + ((Byte >> 4) & 0xf));
+ Mangler.getStream() << static_cast<char>('A' + (Byte & 0xf));
+ break;
+ }
+ }
+ };
+
+ // Enforce our 32 character max.
+ unsigned NumCharsToMangle = std::min(32U, SL->getLength());
+ for (unsigned I = 0, E = NumCharsToMangle * SL->getCharByteWidth(); I != E;
+ ++I)
+ MangleByte(GetBigEndianByte(I));
+
+ // Encode the NUL terminator if there is room.
+ if (NumCharsToMangle < 32)
+ for (unsigned NullTerminator = 0; NullTerminator < SL->getCharByteWidth();
+ ++NullTerminator)
+ MangleByte(0);
+
+ Mangler.getStream() << '@';
+}
+
MicrosoftMangleContext *
MicrosoftMangleContext::create(ASTContext &Context, DiagnosticsEngine &Diags) {
return new MicrosoftMangleContextImpl(Context, Diags);
diff --git a/lib/AST/RawCommentList.cpp b/lib/AST/RawCommentList.cpp
index 1fa7cea..24b129a 100644
--- a/lib/AST/RawCommentList.cpp
+++ b/lib/AST/RawCommentList.cpp
@@ -95,10 +95,9 @@
unsigned BeginOffset;
unsigned EndOffset;
- llvm::tie(BeginFileID, BeginOffset) =
+ std::tie(BeginFileID, BeginOffset) =
SourceMgr.getDecomposedLoc(Range.getBegin());
- llvm::tie(EndFileID, EndOffset) =
- SourceMgr.getDecomposedLoc(Range.getEnd());
+ std::tie(EndFileID, EndOffset) = SourceMgr.getDecomposedLoc(Range.getEnd());
const unsigned Length = EndOffset - BeginOffset;
if (Length < 2)
@@ -252,3 +251,15 @@
Comments.push_back(new (Allocator) RawComment(RC));
}
}
+
+void RawCommentList::addDeserializedComments(ArrayRef<RawComment *> DeserializedComments) {
+ std::vector<RawComment *> MergedComments;
+ MergedComments.reserve(Comments.size() + DeserializedComments.size());
+
+ std::merge(Comments.begin(), Comments.end(),
+ DeserializedComments.begin(), DeserializedComments.end(),
+ std::back_inserter(MergedComments),
+ BeforeThanCompare<RawComment>(SourceMgr));
+ std::swap(Comments, MergedComments);
+}
+
diff --git a/lib/AST/RecordLayout.cpp b/lib/AST/RecordLayout.cpp
index 71e44ec..9d46cc6 100644
--- a/lib/AST/RecordLayout.cpp
+++ b/lib/AST/RecordLayout.cpp
@@ -29,10 +29,13 @@
}
ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx, CharUnits size,
- CharUnits alignment, CharUnits datasize,
+ CharUnits alignment,
+ CharUnits requiredAlignment,
+ CharUnits datasize,
const uint64_t *fieldoffsets,
unsigned fieldcount)
- : Size(size), DataSize(datasize), Alignment(alignment), FieldOffsets(0),
+ : Size(size), DataSize(datasize), Alignment(alignment),
+ RequiredAlignment(requiredAlignment), FieldOffsets(0),
FieldCount(fieldcount), CXXInfo(0) {
if (FieldCount > 0) {
FieldOffsets = new (Ctx) uint64_t[FieldCount];
@@ -43,21 +46,24 @@
// Constructor for C++ records.
ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx,
CharUnits size, CharUnits alignment,
+ CharUnits requiredAlignment,
bool hasOwnVFPtr, bool hasExtendableVFPtr,
CharUnits vbptroffset,
CharUnits datasize,
const uint64_t *fieldoffsets,
unsigned fieldcount,
CharUnits nonvirtualsize,
- CharUnits nonvirtualalign,
+ CharUnits nonvirtualalignment,
CharUnits SizeOfLargestEmptySubobject,
const CXXRecordDecl *PrimaryBase,
bool IsPrimaryBaseVirtual,
const CXXRecordDecl *BaseSharingVBPtr,
- bool AlignAfterVBases,
+ bool HasZeroSizedSubObject,
+ bool LeadsWithZeroSizedBase,
const BaseOffsetsMapTy& BaseOffsets,
const VBaseOffsetsMapTy& VBaseOffsets)
- : Size(size), DataSize(datasize), Alignment(alignment), FieldOffsets(0),
+ : Size(size), DataSize(datasize), Alignment(alignment),
+ RequiredAlignment(requiredAlignment), FieldOffsets(0),
FieldCount(fieldcount), CXXInfo(new (Ctx) CXXRecordLayoutInfo)
{
if (FieldCount > 0) {
@@ -68,7 +74,7 @@
CXXInfo->PrimaryBase.setPointer(PrimaryBase);
CXXInfo->PrimaryBase.setInt(IsPrimaryBaseVirtual);
CXXInfo->NonVirtualSize = nonvirtualsize;
- CXXInfo->NonVirtualAlign = nonvirtualalign;
+ CXXInfo->NonVirtualAlignment = nonvirtualalignment;
CXXInfo->SizeOfLargestEmptySubobject = SizeOfLargestEmptySubobject;
CXXInfo->BaseOffsets = BaseOffsets;
CXXInfo->VBaseOffsets = VBaseOffsets;
@@ -76,7 +82,8 @@
CXXInfo->VBPtrOffset = vbptroffset;
CXXInfo->HasExtendableVFPtr = hasExtendableVFPtr;
CXXInfo->BaseSharingVBPtr = BaseSharingVBPtr;
- CXXInfo->AlignAfterVBases = AlignAfterVBases;
+ CXXInfo->HasZeroSizedSubObject = HasZeroSizedSubObject;
+ CXXInfo->LeadsWithZeroSizedBase = LeadsWithZeroSizedBase;
#ifndef NDEBUG
diff --git a/lib/AST/RecordLayoutBuilder.cpp b/lib/AST/RecordLayoutBuilder.cpp
index 4390e66..ce55d2e 100644
--- a/lib/AST/RecordLayoutBuilder.cpp
+++ b/lib/AST/RecordLayoutBuilder.cpp
@@ -140,10 +140,9 @@
void EmptySubobjectMap::ComputeEmptySubobjectSizes() {
// Check the bases.
- for (CXXRecordDecl::base_class_const_iterator I = Class->bases_begin(),
- E = Class->bases_end(); I != E; ++I) {
+ for (const auto &I : Class->bases()) {
const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
CharUnits EmptySize;
const ASTRecordLayout &Layout = Context.getASTRecordLayout(BaseDecl);
@@ -160,9 +159,7 @@
}
// Check the fields.
- for (CXXRecordDecl::field_iterator I = Class->field_begin(),
- E = Class->field_end(); I != E; ++I) {
-
+ for (const auto *I : Class->fields()) {
const RecordType *RT =
Context.getBaseElementType(I->getType())->getAs<RecordType>();
@@ -348,13 +345,12 @@
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
// Traverse all non-virtual bases.
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- if (I->isVirtual())
+ for (const auto &I : RD->bases()) {
+ if (I.isVirtual())
continue;
const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl);
if (!CanPlaceFieldSubobjectAtOffset(BaseDecl, Class, BaseOffset))
@@ -363,10 +359,9 @@
if (RD == Class) {
// This is the most derived class, traverse virtual bases as well.
- for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
- E = RD->vbases_end(); I != E; ++I) {
+ for (const auto &I : RD->vbases()) {
const CXXRecordDecl *VBaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBaseDecl);
if (!CanPlaceFieldSubobjectAtOffset(VBaseDecl, Class, VBaseOffset))
@@ -460,13 +455,12 @@
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
// Traverse all non-virtual bases.
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- if (I->isVirtual())
+ for (const auto &I : RD->bases()) {
+ if (I.isVirtual())
continue;
const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl);
UpdateEmptyFieldSubobjects(BaseDecl, Class, BaseOffset);
@@ -474,10 +468,9 @@
if (RD == Class) {
// This is the most derived class, traverse virtual bases as well.
- for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
- E = RD->vbases_end(); I != E; ++I) {
+ for (const auto &I : RD->vbases()) {
const CXXRecordDecl *VBaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBaseDecl);
UpdateEmptyFieldSubobjects(VBaseDecl, Class, VBaseOffset);
@@ -783,16 +776,15 @@
void
RecordLayoutBuilder::SelectPrimaryVBase(const CXXRecordDecl *RD) {
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- assert(!I->getType()->isDependentType() &&
+ for (const auto &I : RD->bases()) {
+ assert(!I.getType()->isDependentType() &&
"Cannot layout class with dependent bases.");
const CXXRecordDecl *Base =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
// Check if this is a nearly empty virtual base.
- if (I->isVirtual() && Context.isNearlyEmpty(Base)) {
+ if (I.isVirtual() && Context.isNearlyEmpty(Base)) {
// If it's not an indirect primary base, then we've found our primary
// base.
if (!IndirectPrimaryBases.count(Base)) {
@@ -825,14 +817,13 @@
// If the record has a dynamic base class, attempt to choose a primary base
// class. It is the first (in direct base class order) non-virtual dynamic
// base class, if one exists.
- for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
- e = RD->bases_end(); i != e; ++i) {
+ for (const auto &I : RD->bases()) {
// Ignore virtual bases.
- if (i->isVirtual())
+ if (I.isVirtual())
continue;
const CXXRecordDecl *Base =
- cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
if (Base->isDynamicClass()) {
// We found it.
@@ -918,12 +909,11 @@
}
// Now go through all direct bases.
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- bool IsVirtual = I->isVirtual();
+ for (const auto &I : RD->bases()) {
+ bool IsVirtual = I.isVirtual();
const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
Info->Bases.push_back(ComputeBaseSubobjectInfo(BaseDecl, IsVirtual, Info));
}
@@ -944,12 +934,11 @@
}
void RecordLayoutBuilder::ComputeBaseSubobjectInfo(const CXXRecordDecl *RD) {
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- bool IsVirtual = I->isVirtual();
+ for (const auto &I : RD->bases()) {
+ bool IsVirtual = I.isVirtual();
const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
// Compute the base subobject info for this base.
BaseSubobjectInfo *Info = ComputeBaseSubobjectInfo(BaseDecl, IsVirtual, 0);
@@ -1033,15 +1022,13 @@
}
// Now lay out the non-virtual bases.
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
+ for (const auto &I : RD->bases()) {
// Ignore virtual bases.
- if (I->isVirtual())
+ if (I.isVirtual())
continue;
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+ const CXXRecordDecl *BaseDecl = I.getType()->getAsCXXRecordDecl();
// Skip the primary base, because we've already laid it out. The
// !PrimaryBaseIsVirtual check is required because we might have a
@@ -1118,15 +1105,13 @@
PrimaryBaseIsVirtual = Layout.isPrimaryBaseVirtual();
}
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- assert(!I->getType()->isDependentType() &&
+ for (const auto &I : RD->bases()) {
+ assert(!I.getType()->isDependentType() &&
"Cannot layout class with dependent bases.");
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+ const CXXRecordDecl *BaseDecl = I.getType()->getAsCXXRecordDecl();
- if (I->isVirtual()) {
+ if (I.isVirtual()) {
if (PrimaryBase != BaseDecl || !PrimaryBaseIsVirtual) {
bool IndirectPrimaryBase = IndirectPrimaryBases.count(BaseDecl);
@@ -1191,7 +1176,7 @@
}
}
- CharUnits UnpackedBaseAlign = Layout.getNonVirtualAlign();
+ CharUnits UnpackedBaseAlign = Layout.getNonVirtualAlignment();
CharUnits BaseAlign = (Packed) ? CharUnits::One() : UnpackedBaseAlign;
// If we have an empty base class, try to place it at offset 0.
@@ -1326,22 +1311,20 @@
#ifndef NDEBUG
// Check that we have base offsets for all bases.
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- if (I->isVirtual())
+ for (const auto &I : RD->bases()) {
+ if (I.isVirtual())
continue;
const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
assert(Bases.count(BaseDecl) && "Did not find base offset!");
}
// And all virtual bases.
- for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
- E = RD->vbases_end(); I != E; ++I) {
+ for (const auto &I : RD->vbases()) {
const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
assert(VBases.count(BaseDecl) && "Did not find base offset!");
}
@@ -1374,9 +1357,8 @@
void RecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
// Layout each field, for now, just sequentially, respecting alignment. In
// the future, this will need to be tweakable by targets.
- for (RecordDecl::field_iterator Field = D->field_begin(),
- FieldEnd = D->field_end(); Field != FieldEnd; ++Field)
- LayoutField(*Field);
+ for (const auto *Field : D->fields())
+ LayoutField(Field);
}
void RecordLayoutBuilder::LayoutWideBitField(uint64_t FieldSize,
@@ -1452,127 +1434,224 @@
uint64_t TypeSize = FieldInfo.first;
unsigned FieldAlign = FieldInfo.second;
+ // UnfilledBitsInLastUnit is the difference between the end of the
+ // last allocated bitfield (i.e. the first bit offset available for
+ // bitfields) and the end of the current data size in bits (i.e. the
+ // first bit offset available for non-bitfields). The current data
+ // size in bits is always a multiple of the char size; additionally,
+ // for ms_struct records it's also a multiple of the
+ // LastBitfieldTypeSize (if set).
+
+ // The struct-layout algorithm is dictated by the platform ABI,
+ // which in principle could use almost any rules it likes. In
+ // practice, UNIXy targets tend to inherit the algorithm described
+ // in the System V generic ABI. The basic bitfield layout rule in
+ // System V is to place bitfields at the next available bit offset
+ // where the entire bitfield would fit in an aligned storage unit of
+ // the declared type; it's okay if an earlier or later non-bitfield
+ // is allocated in the same storage unit. However, some targets
+ // (those that !useBitFieldTypeAlignment(), e.g. ARM APCS) don't
+ // require this storage unit to be aligned, and therefore always put
+ // the bitfield at the next available bit offset.
+
+ // ms_struct basically requests a complete replacement of the
+ // platform ABI's struct-layout algorithm, with the high-level goal
+ // of duplicating MSVC's layout. For non-bitfields, this follows
+ // the the standard algorithm. The basic bitfield layout rule is to
+ // allocate an entire unit of the bitfield's declared type
+ // (e.g. 'unsigned long'), then parcel it up among successive
+ // bitfields whose declared types have the same size, making a new
+ // unit as soon as the last can no longer store the whole value.
+ // Since it completely replaces the platform ABI's algorithm,
+ // settings like !useBitFieldTypeAlignment() do not apply.
+
+ // A zero-width bitfield forces the use of a new storage unit for
+ // later bitfields. In general, this occurs by rounding up the
+ // current size of the struct as if the algorithm were about to
+ // place a non-bitfield of the field's formal type. Usually this
+ // does not change the alignment of the struct itself, but it does
+ // on some targets (those that useZeroLengthBitfieldAlignment(),
+ // e.g. ARM). In ms_struct layout, zero-width bitfields are
+ // ignored unless they follow a non-zero-width bitfield.
+
+ // A field alignment restriction (e.g. from #pragma pack) or
+ // specification (e.g. from __attribute__((aligned))) changes the
+ // formal alignment of the field. For System V, this alters the
+ // required alignment of the notional storage unit that must contain
+ // the bitfield. For ms_struct, this only affects the placement of
+ // new storage units. In both cases, the effect of #pragma pack is
+ // ignored on zero-width bitfields.
+
+ // On System V, a packed field (e.g. from #pragma pack or
+ // __attribute__((packed))) always uses the next available bit
+ // offset.
+
+ // In an ms_struct struct, the alignment of a fundamental type is
+ // always equal to its size. This is necessary in order to mimic
+ // the i386 alignment rules on targets which might not fully align
+ // all types (e.g. Darwin PPC32, where alignof(long long) == 4).
+
+ // First, some simple bookkeeping to perform for ms_struct structs.
if (IsMsStruct) {
- // The field alignment for integer types in ms_struct structs is
- // always the size.
+ // The field alignment for integer types is always the size.
FieldAlign = TypeSize;
- // Ignore zero-length bitfields after non-bitfields in ms_struct structs.
- if (!FieldSize && !LastBitfieldTypeSize)
- FieldAlign = 1;
- // If a bitfield is followed by a bitfield of a different size, don't
- // pack the bits together in ms_struct structs.
+
+ // If the previous field was not a bitfield, or was a bitfield
+ // with a different storage unit size, we're done with that
+ // storage unit.
if (LastBitfieldTypeSize != TypeSize) {
+ // Also, ignore zero-length bitfields after non-bitfields.
+ if (!LastBitfieldTypeSize && !FieldSize)
+ FieldAlign = 1;
+
UnfilledBitsInLastUnit = 0;
LastBitfieldTypeSize = 0;
}
}
- uint64_t UnpaddedFieldOffset = getDataSizeInBits() - UnfilledBitsInLastUnit;
- uint64_t FieldOffset = IsUnion ? 0 : UnpaddedFieldOffset;
-
- bool ZeroLengthBitfield = false;
- if (!Context.getTargetInfo().useBitFieldTypeAlignment() &&
- Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
- FieldSize == 0) {
- // The alignment of a zero-length bitfield affects the alignment
- // of the next member. The alignment is the max of the zero
- // length bitfield's alignment and a target specific fixed value.
- ZeroLengthBitfield = true;
- unsigned ZeroLengthBitfieldBoundary =
- Context.getTargetInfo().getZeroLengthBitfieldBoundary();
- if (ZeroLengthBitfieldBoundary > FieldAlign)
- FieldAlign = ZeroLengthBitfieldBoundary;
- }
-
+ // If the field is wider than its declared type, it follows
+ // different rules in all cases.
if (FieldSize > TypeSize) {
LayoutWideBitField(FieldSize, TypeSize, FieldPacked, D);
return;
}
- // The align if the field is not packed. This is to check if the attribute
- // was unnecessary (-Wpacked).
+ // Compute the next available bit offset.
+ uint64_t FieldOffset =
+ IsUnion ? 0 : (getDataSizeInBits() - UnfilledBitsInLastUnit);
+
+ // Handle targets that don't honor bitfield type alignment.
+ if (!IsMsStruct && !Context.getTargetInfo().useBitFieldTypeAlignment()) {
+ // Some such targets do honor it on zero-width bitfields.
+ if (FieldSize == 0 &&
+ Context.getTargetInfo().useZeroLengthBitfieldAlignment()) {
+ // The alignment to round up to is the max of the field's natural
+ // alignment and a target-specific fixed value (sometimes zero).
+ unsigned ZeroLengthBitfieldBoundary =
+ Context.getTargetInfo().getZeroLengthBitfieldBoundary();
+ FieldAlign = std::max(FieldAlign, ZeroLengthBitfieldBoundary);
+
+ // If that doesn't apply, just ignore the field alignment.
+ } else {
+ FieldAlign = 1;
+ }
+ }
+
+ // Remember the alignment we would have used if the field were not packed.
unsigned UnpackedFieldAlign = FieldAlign;
- uint64_t UnpackedFieldOffset = FieldOffset;
- if (!Context.getTargetInfo().useBitFieldTypeAlignment() && !ZeroLengthBitfield)
- UnpackedFieldAlign = 1;
- if (FieldPacked ||
- (!Context.getTargetInfo().useBitFieldTypeAlignment() && !ZeroLengthBitfield))
+ // Ignore the field alignment if the field is packed unless it has zero-size.
+ if (!IsMsStruct && FieldPacked && FieldSize != 0)
FieldAlign = 1;
- FieldAlign = std::max(FieldAlign, D->getMaxAlignment());
- UnpackedFieldAlign = std::max(UnpackedFieldAlign, D->getMaxAlignment());
- // The maximum field alignment overrides the aligned attribute.
- if (!MaxFieldAlignment.isZero() && FieldSize != 0) {
+ // But, if there's an 'aligned' attribute on the field, honor that.
+ if (unsigned ExplicitFieldAlign = D->getMaxAlignment()) {
+ FieldAlign = std::max(FieldAlign, ExplicitFieldAlign);
+ UnpackedFieldAlign = std::max(UnpackedFieldAlign, ExplicitFieldAlign);
+ }
+
+ // But, if there's a #pragma pack in play, that takes precedent over
+ // even the 'aligned' attribute, for non-zero-width bitfields.
+ if (!MaxFieldAlignment.isZero() && FieldSize) {
unsigned MaxFieldAlignmentInBits = Context.toBits(MaxFieldAlignment);
FieldAlign = std::min(FieldAlign, MaxFieldAlignmentInBits);
UnpackedFieldAlign = std::min(UnpackedFieldAlign, MaxFieldAlignmentInBits);
}
- // ms_struct bitfields always have to start at a round alignment.
- if (IsMsStruct && !LastBitfieldTypeSize) {
- FieldOffset = llvm::RoundUpToAlignment(FieldOffset, FieldAlign);
- UnpackedFieldOffset = llvm::RoundUpToAlignment(UnpackedFieldOffset,
- UnpackedFieldAlign);
+ // For purposes of diagnostics, we're going to simultaneously
+ // compute the field offsets that we would have used if we weren't
+ // adding any alignment padding or if the field weren't packed.
+ uint64_t UnpaddedFieldOffset = FieldOffset;
+ uint64_t UnpackedFieldOffset = FieldOffset;
+
+ // Check if we need to add padding to fit the bitfield within an
+ // allocation unit with the right size and alignment. The rules are
+ // somewhat different here for ms_struct structs.
+ if (IsMsStruct) {
+ // If it's not a zero-width bitfield, and we can fit the bitfield
+ // into the active storage unit (and we haven't already decided to
+ // start a new storage unit), just do so, regardless of any other
+ // other consideration. Otherwise, round up to the right alignment.
+ if (FieldSize == 0 || FieldSize > UnfilledBitsInLastUnit) {
+ FieldOffset = llvm::RoundUpToAlignment(FieldOffset, FieldAlign);
+ UnpackedFieldOffset = llvm::RoundUpToAlignment(UnpackedFieldOffset,
+ UnpackedFieldAlign);
+ UnfilledBitsInLastUnit = 0;
+ }
+
+ } else {
+ // #pragma pack, with any value, suppresses the insertion of padding.
+ bool AllowPadding = MaxFieldAlignment.isZero();
+
+ // Compute the real offset.
+ if (FieldSize == 0 ||
+ (AllowPadding &&
+ (FieldOffset & (FieldAlign-1)) + FieldSize > TypeSize)) {
+ FieldOffset = llvm::RoundUpToAlignment(FieldOffset, FieldAlign);
+ }
+
+ // Repeat the computation for diagnostic purposes.
+ if (FieldSize == 0 ||
+ (AllowPadding &&
+ (UnpackedFieldOffset & (UnpackedFieldAlign-1)) + FieldSize > TypeSize))
+ UnpackedFieldOffset = llvm::RoundUpToAlignment(UnpackedFieldOffset,
+ UnpackedFieldAlign);
}
- // Check if we need to add padding to give the field the correct alignment.
- if (FieldSize == 0 ||
- (MaxFieldAlignment.isZero() &&
- (FieldOffset & (FieldAlign-1)) + FieldSize > TypeSize))
- FieldOffset = llvm::RoundUpToAlignment(FieldOffset, FieldAlign);
-
- if (FieldSize == 0 ||
- (MaxFieldAlignment.isZero() &&
- (UnpackedFieldOffset & (UnpackedFieldAlign-1)) + FieldSize > TypeSize))
- UnpackedFieldOffset = llvm::RoundUpToAlignment(UnpackedFieldOffset,
- UnpackedFieldAlign);
-
- // Padding members don't affect overall alignment, unless zero length bitfield
- // alignment is enabled.
- if (!D->getIdentifier() &&
- !Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
- !IsMsStruct)
- FieldAlign = UnpackedFieldAlign = 1;
-
+ // If we're using external layout, give the external layout a chance
+ // to override this information.
if (ExternalLayout)
FieldOffset = updateExternalFieldOffset(D, FieldOffset);
- // Place this field at the current location.
+ // Okay, place the bitfield at the calculated offset.
FieldOffsets.push_back(FieldOffset);
+ // Bookkeeping:
+
+ // Anonymous members don't affect the overall record alignment,
+ // except on targets where they do.
+ if (!IsMsStruct &&
+ !Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
+ !D->getIdentifier())
+ FieldAlign = UnpackedFieldAlign = 1;
+
+ // Diagnose differences in layout due to padding or packing.
if (!ExternalLayout)
CheckFieldPadding(FieldOffset, UnpaddedFieldOffset, UnpackedFieldOffset,
UnpackedFieldAlign, FieldPacked, D);
// Update DataSize to include the last byte containing (part of) the bitfield.
+
+ // For unions, this is just a max operation, as usual.
if (IsUnion) {
// FIXME: I think FieldSize should be TypeSize here.
setDataSize(std::max(getDataSizeInBits(), FieldSize));
- } else {
- if (IsMsStruct && FieldSize) {
- // Under ms_struct, a bitfield always takes up space equal to the size
- // of the type. We can't just change the alignment computation on the
- // other codepath because of the way this interacts with #pragma pack:
- // in a packed struct, we need to allocate misaligned space in the
- // struct to hold the bitfield.
- if (!UnfilledBitsInLastUnit) {
- setDataSize(FieldOffset + TypeSize);
- UnfilledBitsInLastUnit = TypeSize - FieldSize;
- } else if (UnfilledBitsInLastUnit < FieldSize) {
- setDataSize(getDataSizeInBits() + TypeSize);
- UnfilledBitsInLastUnit = TypeSize - FieldSize;
- } else {
- UnfilledBitsInLastUnit -= FieldSize;
- }
- LastBitfieldTypeSize = TypeSize;
- } else {
- uint64_t NewSizeInBits = FieldOffset + FieldSize;
- uint64_t BitfieldAlignment = Context.getTargetInfo().getCharAlign();
- setDataSize(llvm::RoundUpToAlignment(NewSizeInBits, BitfieldAlignment));
- UnfilledBitsInLastUnit = getDataSizeInBits() - NewSizeInBits;
- LastBitfieldTypeSize = 0;
+
+ // For non-zero-width bitfields in ms_struct structs, allocate a new
+ // storage unit if necessary.
+ } else if (IsMsStruct && FieldSize) {
+ // We should have cleared UnfilledBitsInLastUnit in every case
+ // where we changed storage units.
+ if (!UnfilledBitsInLastUnit) {
+ setDataSize(FieldOffset + TypeSize);
+ UnfilledBitsInLastUnit = TypeSize;
}
+ UnfilledBitsInLastUnit -= FieldSize;
+ LastBitfieldTypeSize = TypeSize;
+
+ // Otherwise, bump the data size up to include the bitfield,
+ // including padding up to char alignment, and then remember how
+ // bits we didn't use.
+ } else {
+ uint64_t NewSizeInBits = FieldOffset + FieldSize;
+ uint64_t CharAlignment = Context.getTargetInfo().getCharAlign();
+ setDataSize(llvm::RoundUpToAlignment(NewSizeInBits, CharAlignment));
+ UnfilledBitsInLastUnit = getDataSizeInBits() - NewSizeInBits;
+
+ // The only time we can get here for an ms_struct is if this is a
+ // zero-width bitfield, which doesn't count as anything for the
+ // purposes of unfilled bits.
+ LastBitfieldTypeSize = 0;
}
// Update the size.
@@ -1878,20 +1957,18 @@
if (!RD->isExternallyVisible())
return 0;
- // Template instantiations don't have key functions,see Itanium C++ ABI 5.2.6.
+ // Template instantiations don't have key functions per Itanium C++ ABI 5.2.6.
// Same behavior as GCC.
TemplateSpecializationKind TSK = RD->getTemplateSpecializationKind();
if (TSK == TSK_ImplicitInstantiation ||
+ TSK == TSK_ExplicitInstantiationDeclaration ||
TSK == TSK_ExplicitInstantiationDefinition)
return 0;
bool allowInlineFunctions =
Context.getTargetInfo().getCXXABI().canKeyFunctionBeInline();
- for (CXXRecordDecl::method_iterator I = RD->method_begin(),
- E = RD->method_end(); I != E; ++I) {
- const CXXMethodDecl *MD = *I;
-
+ for (const auto *MD : RD->methods()) {
if (!MD->isVirtual())
continue;
@@ -1985,7 +2062,7 @@
//
// * The alignment of bitfields in unions is ignored when computing the
// alignment of the union.
-// * The existance of zero-width bitfield that occurs after anything other than
+// * The existence of zero-width bitfield that occurs after anything other than
// a non-zero length bitfield is ignored.
// * The Itanium equivalent vtable pointers are split into a vfptr (virtual
// function pointer) and a vbptr (virtual base pointer). They can each be
@@ -2014,15 +2091,32 @@
// one.
// * The last zero size virtual base may be placed at the end of the struct.
// and can potentially alias a zero sized type in the next struct.
-// * If the last field is a non-zero length bitfield and we have any virtual
-// bases then some extra padding is added before the virtual bases for no
-// obvious reason.
+// * If the last field is a non-zero length bitfield, all virtual bases will
+// have extra padding added before them for no obvious reason. The padding
+// has the same number of bits as the type of the bitfield.
// * When laying out empty non-virtual bases, an extra byte of padding is added
// if the non-virtual base before the empty non-virtual base has a vbptr.
-
+// * The ABI attempts to avoid aliasing of zero sized bases by adding padding
+// between bases or vbases with specific properties. The criteria for
+// additional padding between two bases is that the first base is zero sized
+// or has a zero sized subobject and the second base is zero sized or leads
+// with a zero sized base (sharing of vfptrs can reorder the layout of the
+// so the leading base is not always the first one declared). The padding
+// added for bases is 1 byte. The padding added for vbases depends on the
+// alignment of the object but is at least 4 bytes (in both 32 and 64 bit
+// modes).
+// * There is no concept of non-virtual alignment or any distinction between
+// data size and non-virtual size.
+// * __declspec(align) on bitfields has the effect of changing the bitfield's
+// alignment instead of its required alignment. This has implications on how
+// it interacts with pragam pack.
namespace {
struct MicrosoftRecordLayoutBuilder {
+ struct ElementInfo {
+ CharUnits Size;
+ CharUnits Alignment;
+ };
typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits> BaseOffsetsMapTy;
MicrosoftRecordLayoutBuilder(const ASTContext &Context) : Context(Context) {}
private:
@@ -2030,19 +2124,20 @@
LLVM_DELETED_FUNCTION;
void operator=(const MicrosoftRecordLayoutBuilder &) LLVM_DELETED_FUNCTION;
public:
-
void layout(const RecordDecl *RD);
void cxxLayout(const CXXRecordDecl *RD);
/// \brief Initializes size and alignment and honors some flags.
void initializeLayout(const RecordDecl *RD);
/// \brief Initialized C++ layout, compute alignment and virtual alignment and
- /// existance of vfptrs and vbptrs. Alignment is needed before the vfptr is
+ /// existence of vfptrs and vbptrs. Alignment is needed before the vfptr is
/// laid out.
void initializeCXXLayout(const CXXRecordDecl *RD);
- void layoutVFPtr(const CXXRecordDecl *RD);
void layoutNonVirtualBases(const CXXRecordDecl *RD);
- void layoutNonVirtualBase(const CXXRecordDecl *RD);
- void layoutVBPtr(const CXXRecordDecl *RD);
+ void layoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
+ const ASTRecordLayout &BaseLayout,
+ const ASTRecordLayout *&PreviousBaseLayout);
+ void injectVFPtr(const CXXRecordDecl *RD);
+ void injectVBPtr(const CXXRecordDecl *RD);
/// \brief Lays out the fields of the record. Also rounds size up to
/// alignment.
void layoutFields(const RecordDecl *RD);
@@ -2052,22 +2147,15 @@
/// special cases associated with zero-width bit-fields.
void layoutZeroWidthBitField(const FieldDecl *FD);
void layoutVirtualBases(const CXXRecordDecl *RD);
- void layoutVirtualBase(const CXXRecordDecl *RD, bool HasVtordisp);
- /// \brief Flushes the lazy virtual base and conditionally rounds up to
- /// alignment.
- void finalizeCXXLayout(const CXXRecordDecl *RD);
- void honorDeclspecAlign(const RecordDecl *RD);
-
- /// \brief Updates the alignment of the type. This function doesn't take any
- /// properties (such as packedness) into account. getAdjustedFieldInfo()
- /// adjustes for packedness.
- void updateAlignment(CharUnits NewAlignment) {
- Alignment = std::max(Alignment, NewAlignment);
- }
- /// \brief Gets the size and alignment taking attributes into account.
- std::pair<CharUnits, CharUnits> getAdjustedFieldInfo(const FieldDecl *FD);
- /// \brief Places a field at offset 0.
- void placeFieldAtZero() { FieldOffsets.push_back(0); }
+ void finalizeLayout(const RecordDecl *RD);
+ /// \brief Gets the size and alignment of a base taking pragma pack and
+ /// __declspec(align) into account.
+ ElementInfo getAdjustedElementInfo(const ASTRecordLayout &Layout,
+ bool AsBase = true);
+ /// \brief Gets the size and alignment of a field taking pragma pack and
+ /// __declspec(align) into account. It also updates RequiredAlignment as a
+ /// side effect because it is most convenient to do so here.
+ ElementInfo getAdjustedElementInfo(const FieldDecl *FD);
/// \brief Places a field at an offset in CharUnits.
void placeFieldAtOffset(CharUnits FieldOffset) {
FieldOffsets.push_back(Context.toBits(FieldOffset));
@@ -2079,334 +2167,289 @@
/// \brief Compute the set of virtual bases for which vtordisps are required.
llvm::SmallPtrSet<const CXXRecordDecl *, 2>
computeVtorDispSet(const CXXRecordDecl *RD);
-
const ASTContext &Context;
/// \brief The size of the record being laid out.
CharUnits Size;
+ /// \brief The non-virtual size of the record layout.
+ CharUnits NonVirtualSize;
+ /// \brief The data size of the record layout.
+ CharUnits DataSize;
/// \brief The current alignment of the record layout.
CharUnits Alignment;
- /// \brief The collection of field offsets.
- SmallVector<uint64_t, 16> FieldOffsets;
/// \brief The maximum allowed field alignment. This is set by #pragma pack.
CharUnits MaxFieldAlignment;
- /// \brief Alignment does not occur for virtual bases unless something
- /// forces it to by explicitly using __declspec(align())
- bool AlignAfterVBases : 1;
- bool IsUnion : 1;
- /// \brief True if the last field laid out was a bitfield and was not 0
- /// width.
- bool LastFieldIsNonZeroWidthBitfield : 1;
+ /// \brief The alignment that this record must obey. This is imposed by
+ /// __declspec(align()) on the record itself or one of its fields or bases.
+ CharUnits RequiredAlignment;
/// \brief The size of the allocation of the currently active bitfield.
/// This value isn't meaningful unless LastFieldIsNonZeroWidthBitfield
/// is true.
CharUnits CurrentBitfieldSize;
- /// \brief The number of remaining bits in our last bitfield allocation.
- /// This value isn't meaningful unless LastFieldIsNonZeroWidthBitfield is
- /// true.
- unsigned RemainingBitsInField;
-
- /// \brief The data alignment of the record layout.
- CharUnits DataSize;
- /// \brief The alignment of the non-virtual portion of the record layout
- /// without the impact of the virtual pointers.
- /// Only used for C++ layouts.
- CharUnits BasesAndFieldsAlignment;
- /// \brief The alignment of the non-virtual portion of the record layout
- /// Only used for C++ layouts.
- CharUnits NonVirtualAlignment;
- /// \brief The additional alignment imposed by the virtual bases.
- CharUnits VirtualAlignment;
+ /// \brief Offset to the virtual base table pointer (if one exists).
+ CharUnits VBPtrOffset;
+ /// \brief The size and alignment info of a pointer.
+ ElementInfo PointerInfo;
/// \brief The primary base class (if one exists).
const CXXRecordDecl *PrimaryBase;
/// \brief The class we share our vb-pointer with.
const CXXRecordDecl *SharedVBPtrBase;
- /// \brief True if the class has a vftable pointer that can be extended
- /// by this class or classes derived from it. Such a vfptr will always occur
- /// at offset 0.
- bool HasExtendableVFPtr : 1;
- /// \brief True if the class has a (not necessarily its own) vbtable pointer.
- bool HasVBPtr : 1;
- /// \brief Offset to the virtual base table pointer (if one exists).
- CharUnits VBPtrOffset;
+ /// \brief The collection of field offsets.
+ SmallVector<uint64_t, 16> FieldOffsets;
/// \brief Base classes and their offsets in the record.
BaseOffsetsMapTy Bases;
/// \brief virtual base classes and their offsets in the record.
ASTRecordLayout::VBaseOffsetsMapTy VBases;
- /// \brief The size of a pointer.
- CharUnits PointerSize;
- /// \brief The alignment of a pointer.
- CharUnits PointerAlignment;
- /// \brief Holds an empty base we haven't yet laid out.
- const CXXRecordDecl *LazyEmptyBase;
- /// \brief Lets us know if the last base we laid out was empty. Only used
- /// when adjusting the placement of a last zero-sized base in 64 bit mode.
- bool LastBaseWasEmpty;
+ /// \brief The number of remaining bits in our last bitfield allocation.
+ /// This value isn't meaningful unless LastFieldIsNonZeroWidthBitfield is
+ /// true.
+ unsigned RemainingBitsInField;
+ bool IsUnion : 1;
+ /// \brief True if the last field laid out was a bitfield and was not 0
+ /// width.
+ bool LastFieldIsNonZeroWidthBitfield : 1;
+ /// \brief True if the class has its own vftable pointer.
+ bool HasOwnVFPtr : 1;
+ /// \brief True if the class has a vbtable pointer.
+ bool HasVBPtr : 1;
/// \brief Lets us know if we're in 64-bit mode
- bool Is64BitMode;
- /// \brief True if the last non-virtual base has a vbptr.
- bool LastNonVirtualBaseHasVBPtr;
+ bool Is64BitMode : 1;
+ /// \brief True if this class contains a zero sized member or base or a base
+ /// with a zero sized member or base. Only used for MS-ABI.
+ bool HasZeroSizedSubObject : 1;
+ /// \brief True if this class is zero sized or first base is zero sized or
+ /// has this property. Only used for MS-ABI.
+ bool LeadsWithZeroSizedBase : 1;
};
} // namespace
-std::pair<CharUnits, CharUnits>
-MicrosoftRecordLayoutBuilder::getAdjustedFieldInfo(const FieldDecl *FD) {
- std::pair<CharUnits, CharUnits> FieldInfo =
- Context.getTypeInfoInChars(FD->getType());
-
- // If we're not on win32 and using ms_struct the field alignment will be wrong
- // for 64 bit types, so we fix that here.
- if (FD->getASTContext().getTargetInfo().getTriple().getOS() !=
- llvm::Triple::Win32) {
- QualType T = Context.getBaseElementType(FD->getType());
- if (const BuiltinType *BTy = T->getAs<BuiltinType>()) {
- CharUnits TypeSize = Context.getTypeSizeInChars(BTy);
- if (TypeSize > FieldInfo.second)
- FieldInfo.second = TypeSize;
- }
- }
-
- // Respect packed attribute.
- if (FD->hasAttr<PackedAttr>())
- FieldInfo.second = CharUnits::One();
- // Respect pack pragma.
- else if (!MaxFieldAlignment.isZero())
- FieldInfo.second = std::min(FieldInfo.second, MaxFieldAlignment);
- // Respect alignment attributes.
- if (unsigned fieldAlign = FD->getMaxAlignment()) {
- CharUnits FieldAlign = Context.toCharUnitsFromBits(fieldAlign);
- AlignAfterVBases = true;
- FieldInfo.second = std::max(FieldInfo.second, FieldAlign);
- }
- return FieldInfo;
+MicrosoftRecordLayoutBuilder::ElementInfo
+MicrosoftRecordLayoutBuilder::getAdjustedElementInfo(
+ const ASTRecordLayout &Layout, bool AsBase) {
+ ElementInfo Info;
+ Info.Alignment = Layout.getAlignment();
+ // Respect pragma pack.
+ if (!MaxFieldAlignment.isZero())
+ Info.Alignment = std::min(Info.Alignment, MaxFieldAlignment);
+ // Track zero-sized subobjects here where it's already available.
+ if (Layout.hasZeroSizedSubObject())
+ HasZeroSizedSubObject = true;
+ // Respect required alignment, this is necessary because we may have adjusted
+ // the alignment in the case of pragam pack. Note that the required alignment
+ // doesn't actually apply to the struct alignment at this point.
+ Alignment = std::max(Alignment, Info.Alignment);
+ Info.Alignment = std::max(Info.Alignment, Layout.getRequiredAlignment());
+ Info.Size = AsBase ? Layout.getNonVirtualSize() : Layout.getSize();
+ return Info;
}
-void MicrosoftRecordLayoutBuilder::initializeLayout(const RecordDecl *RD) {
- IsUnion = RD->isUnion();
- Is64BitMode = Context.getTargetInfo().getPointerWidth(0) == 64;
-
- Size = CharUnits::Zero();
- Alignment = CharUnits::One();
- AlignAfterVBases = false;
-
- // Compute the maximum field alignment.
- MaxFieldAlignment = CharUnits::Zero();
- // Honor the default struct packing maximum alignment flag.
- if (unsigned DefaultMaxFieldAlignment = Context.getLangOpts().PackStruct)
- MaxFieldAlignment = CharUnits::fromQuantity(DefaultMaxFieldAlignment);
- // Honor the packing attribute.
- if (const MaxFieldAlignmentAttr *MFAA = RD->getAttr<MaxFieldAlignmentAttr>())
- MaxFieldAlignment = Context.toCharUnitsFromBits(MFAA->getAlignment());
- // Packed attribute forces max field alignment to be 1.
- if (RD->hasAttr<PackedAttr>())
- MaxFieldAlignment = CharUnits::One();
+MicrosoftRecordLayoutBuilder::ElementInfo
+MicrosoftRecordLayoutBuilder::getAdjustedElementInfo(
+ const FieldDecl *FD) {
+ ElementInfo Info;
+ std::tie(Info.Size, Info.Alignment) =
+ Context.getTypeInfoInChars(FD->getType());
+ // Respect align attributes.
+ CharUnits FieldRequiredAlignment =
+ Context.toCharUnitsFromBits(FD->getMaxAlignment());
+ // Respect attributes applied to subobjects of the field.
+ if (const RecordType *RT =
+ FD->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RT->getDecl());
+ // Get the element info for a layout, respecting pack.
+ Info.Alignment = getAdjustedElementInfo(Layout, false).Alignment;
+ // Capture required alignment as a side-effect.
+ RequiredAlignment = std::max(RequiredAlignment,
+ Layout.getRequiredAlignment());
+ } else {
+ if (FD->isBitField() && FD->getMaxAlignment() != 0)
+ Info.Alignment = std::max(Info.Alignment, FieldRequiredAlignment);
+ // Respect pragma pack.
+ if (!MaxFieldAlignment.isZero())
+ Info.Alignment = std::min(Info.Alignment, MaxFieldAlignment);
+ }
+ // Respect packed field attribute.
+ if (FD->hasAttr<PackedAttr>())
+ Info.Alignment = CharUnits::One();
+ // Take required alignment into account. __declspec(align) on bitfields
+ // impacts the alignment rather than the required alignment.
+ if (!FD->isBitField()) {
+ Info.Alignment = std::max(Info.Alignment, FieldRequiredAlignment);
+ // Capture required alignment as a side-effect.
+ RequiredAlignment = std::max(RequiredAlignment, FieldRequiredAlignment);
+ }
+ // TODO: Add a Sema warning that MS ignores bitfield alignment in unions.
+ if (!(FD->isBitField() && IsUnion)) {
+ Alignment = std::max(Alignment, Info.Alignment);
+ if (!MaxFieldAlignment.isZero())
+ Alignment = std::min(Alignment, MaxFieldAlignment);
+ }
+ return Info;
}
void MicrosoftRecordLayoutBuilder::layout(const RecordDecl *RD) {
initializeLayout(RD);
layoutFields(RD);
- honorDeclspecAlign(RD);
+ DataSize = Size = Size.RoundUpToAlignment(Alignment);
+ RequiredAlignment = std::max(
+ RequiredAlignment, Context.toCharUnitsFromBits(RD->getMaxAlignment()));
+ finalizeLayout(RD);
}
void MicrosoftRecordLayoutBuilder::cxxLayout(const CXXRecordDecl *RD) {
initializeLayout(RD);
initializeCXXLayout(RD);
- layoutVFPtr(RD);
layoutNonVirtualBases(RD);
- layoutVBPtr(RD);
layoutFields(RD);
- DataSize = Size;
- NonVirtualAlignment = Alignment;
+ injectVBPtr(RD);
+ injectVFPtr(RD);
+ if (HasOwnVFPtr || (HasVBPtr && !SharedVBPtrBase))
+ Alignment = std::max(Alignment, PointerInfo.Alignment);
+ NonVirtualSize = Size = Size.RoundUpToAlignment(Alignment);
+ RequiredAlignment = std::max(
+ RequiredAlignment, Context.toCharUnitsFromBits(RD->getMaxAlignment()));
layoutVirtualBases(RD);
- finalizeCXXLayout(RD);
- honorDeclspecAlign(RD);
+ finalizeLayout(RD);
+}
+
+void MicrosoftRecordLayoutBuilder::initializeLayout(const RecordDecl *RD) {
+ IsUnion = RD->isUnion();
+ Is64BitMode = Context.getTargetInfo().getPointerWidth(0) == 64;
+ Size = CharUnits::Zero();
+ Alignment = CharUnits::One();
+ // In 64-bit mode we always perform an alignment step after laying out vbases.
+ // In 32-bit mode we do not. The check to see if we need to perform alignment
+ // checks the RequiredAlignment field and performs alignment if it isn't 0.
+ RequiredAlignment = Is64BitMode ? CharUnits::One() : CharUnits::Zero();
+ // Compute the maximum field alignment.
+ MaxFieldAlignment = CharUnits::Zero();
+ // Honor the default struct packing maximum alignment flag.
+ if (unsigned DefaultMaxFieldAlignment = Context.getLangOpts().PackStruct)
+ MaxFieldAlignment = CharUnits::fromQuantity(DefaultMaxFieldAlignment);
+ // Honor the packing attribute. The MS-ABI ignores pragma pack if its larger
+ // than the pointer size.
+ if (const MaxFieldAlignmentAttr *MFAA = RD->getAttr<MaxFieldAlignmentAttr>()){
+ unsigned PackedAlignment = MFAA->getAlignment();
+ if (PackedAlignment <= Context.getTargetInfo().getPointerWidth(0))
+ MaxFieldAlignment = Context.toCharUnitsFromBits(PackedAlignment);
+ }
+ // Packed attribute forces max field alignment to be 1.
+ if (RD->hasAttr<PackedAttr>())
+ MaxFieldAlignment = CharUnits::One();
}
void
MicrosoftRecordLayoutBuilder::initializeCXXLayout(const CXXRecordDecl *RD) {
- // Calculate pointer size and alignment.
- PointerSize =
- Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
- PointerAlignment = PointerSize;
- if (!MaxFieldAlignment.isZero())
- PointerAlignment = std::min(PointerAlignment, MaxFieldAlignment);
-
- // Initialize information about the bases.
+ HasZeroSizedSubObject = false;
+ LeadsWithZeroSizedBase = false;
+ HasOwnVFPtr = false;
HasVBPtr = false;
- HasExtendableVFPtr = false;
- SharedVBPtrBase = 0;
PrimaryBase = 0;
- VirtualAlignment = CharUnits::One();
- AlignAfterVBases = Is64BitMode;
-
- // If the record has a dynamic base class, attempt to choose a primary base
- // class. It is the first (in direct base class order) non-virtual dynamic
- // base class, if one exists.
- for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
- e = RD->bases_end();
- i != e; ++i) {
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(BaseDecl);
- // Handle forced alignment.
- if (Layout.getAlignAfterVBases())
- AlignAfterVBases = true;
- // Handle virtual bases.
- if (i->isVirtual()) {
- VirtualAlignment = std::max(VirtualAlignment, Layout.getAlignment());
- HasVBPtr = true;
- continue;
- }
- // We located a primary base class!
- if (!PrimaryBase && Layout.hasExtendableVFPtr()) {
- PrimaryBase = BaseDecl;
- HasExtendableVFPtr = true;
- }
- // We located a base to share a VBPtr with!
- if (!SharedVBPtrBase && Layout.hasVBPtr()) {
- SharedVBPtrBase = BaseDecl;
- HasVBPtr = true;
- }
- updateAlignment(Layout.getAlignment());
- }
-
- // Use LayoutFields to compute the alignment of the fields. The layout
- // is discarded. This is the simplest way to get all of the bit-field
- // behavior correct and is not actually very expensive.
- layoutFields(RD);
- Size = CharUnits::Zero();
- BasesAndFieldsAlignment = Alignment;
- FieldOffsets.clear();
-}
-
-void MicrosoftRecordLayoutBuilder::layoutVFPtr(const CXXRecordDecl *RD) {
- // If we have a primary base then our VFPtr was already laid out
- if (PrimaryBase)
- return;
-
- // Look at all of our methods to determine if we need a VFPtr. We need a
- // vfptr if we define a new virtual function.
- if (!HasExtendableVFPtr && RD->isDynamicClass())
- for (CXXRecordDecl::method_iterator i = RD->method_begin(),
- e = RD->method_end();
- !HasExtendableVFPtr && i != e; ++i)
- HasExtendableVFPtr = i->isVirtual() && i->size_overridden_methods() == 0;
- if (!HasExtendableVFPtr)
- return;
-
- // MSVC 32 (but not 64) potentially over-aligns the vf-table pointer by giving
- // it the max alignment of all the non-virtual data in the class. The
- // resulting layout is essentially { vftbl, { nvdata } }. This is completely
- // unnecessary, but we're not here to pass judgment.
- updateAlignment(PointerAlignment);
- if (Is64BitMode)
- Size = Size.RoundUpToAlignment(PointerAlignment) + PointerSize;
- else
- Size = Size.RoundUpToAlignment(PointerAlignment) + Alignment;
+ SharedVBPtrBase = 0;
+ // Calculate pointer size and alignment. These are used for vfptr and vbprt
+ // injection.
+ PointerInfo.Size =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
+ PointerInfo.Alignment = PointerInfo.Size;
+ // Respect pragma pack.
+ if (!MaxFieldAlignment.isZero())
+ PointerInfo.Alignment = std::min(PointerInfo.Alignment, MaxFieldAlignment);
}
void
MicrosoftRecordLayoutBuilder::layoutNonVirtualBases(const CXXRecordDecl *RD) {
- LazyEmptyBase = 0;
- LastBaseWasEmpty = false;
- LastNonVirtualBaseHasVBPtr = false;
-
- // Lay out the primary base first.
- if (PrimaryBase)
- layoutNonVirtualBase(PrimaryBase);
-
+ // The MS-ABI lays out all bases that contain leading vfptrs before it lays
+ // out any bases that do not contain vfptrs. We implement this as two passes
+ // over the bases. This approach guarantees that the primary base is laid out
+ // first. We use these passes to calculate some additional aggregated
+ // information about the bases, such as reqruied alignment and the presence of
+ // zero sized members.
+ const ASTRecordLayout* PreviousBaseLayout = 0;
// Iterate through the bases and lay out the non-virtual ones.
- for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
- e = RD->bases_end();
- i != e; ++i) {
- if (i->isVirtual())
+ for (const auto &I : RD->bases()) {
+ const CXXRecordDecl *BaseDecl = I.getType()->getAsCXXRecordDecl();
+ const ASTRecordLayout &BaseLayout = Context.getASTRecordLayout(BaseDecl);
+ // Mark and skip virtual bases.
+ if (I.isVirtual()) {
+ HasVBPtr = true;
continue;
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(i->getType()->castAs<RecordType>()->getDecl());
- if (BaseDecl != PrimaryBase)
- layoutNonVirtualBase(BaseDecl);
- }
-}
-
-void
-MicrosoftRecordLayoutBuilder::layoutNonVirtualBase(const CXXRecordDecl *RD) {
- const ASTRecordLayout *Layout = RD ? &Context.getASTRecordLayout(RD) : 0;
-
- // If we have a lazy empty base we haven't laid out yet, do that now.
- if (LazyEmptyBase) {
- const ASTRecordLayout &LazyLayout =
- Context.getASTRecordLayout(LazyEmptyBase);
- Size = Size.RoundUpToAlignment(LazyLayout.getAlignment());
- // If the last non-virtual base has a vbptr we add a byte of padding for no
- // obvious reason.
- if (LastNonVirtualBaseHasVBPtr)
- Size++;
- Bases.insert(std::make_pair(LazyEmptyBase, Size));
- // Empty bases only consume space when followed by another empty base.
- if (RD && Layout->getNonVirtualSize().isZero()) {
- LastBaseWasEmpty = true;
- Size++;
}
- LazyEmptyBase = 0;
- LastNonVirtualBaseHasVBPtr = false;
+ // Track RequiredAlignment for all bases in this pass.
+ RequiredAlignment = std::max(RequiredAlignment,
+ BaseLayout.getRequiredAlignment());
+ // Check fo a base to share a VBPtr with.
+ if (!SharedVBPtrBase && BaseLayout.hasVBPtr()) {
+ SharedVBPtrBase = BaseDecl;
+ HasVBPtr = true;
+ }
+ // Only lay out bases with extendable VFPtrs on the first pass.
+ if (!BaseLayout.hasExtendableVFPtr())
+ continue;
+ // If we don't have a primary base, this one qualifies.
+ if (!PrimaryBase) {
+ PrimaryBase = BaseDecl;
+ LeadsWithZeroSizedBase = BaseLayout.leadsWithZeroSizedBase();
+ }
+ // Lay out the base.
+ layoutNonVirtualBase(BaseDecl, BaseLayout, PreviousBaseLayout);
}
-
- // RD is null when flushing the final lazy base.
- if (!RD)
- return;
-
- if (Layout->getNonVirtualSize().isZero()) {
- LazyEmptyBase = RD;
- return;
+ // Figure out if we need a fresh VFPtr for this class.
+ if (!PrimaryBase && RD->isDynamicClass())
+ for (CXXRecordDecl::method_iterator i = RD->method_begin(),
+ e = RD->method_end();
+ !HasOwnVFPtr && i != e; ++i)
+ HasOwnVFPtr = i->isVirtual() && i->size_overridden_methods() == 0;
+ // If we don't have a primary base then we have a leading object that could
+ // itself lead with a zero-sized object, something we track.
+ bool CheckLeadingLayout = !PrimaryBase;
+ // Iterate through the bases and lay out the non-virtual ones.
+ for (const auto &I : RD->bases()) {
+ if (I.isVirtual())
+ continue;
+ const CXXRecordDecl *BaseDecl = I.getType()->getAsCXXRecordDecl();
+ const ASTRecordLayout &BaseLayout = Context.getASTRecordLayout(BaseDecl);
+ // Only lay out bases without extendable VFPtrs on the second pass.
+ if (BaseLayout.hasExtendableVFPtr())
+ continue;
+ // If this is the first layout, check to see if it leads with a zero sized
+ // object. If it does, so do we.
+ if (CheckLeadingLayout) {
+ CheckLeadingLayout = false;
+ LeadsWithZeroSizedBase = BaseLayout.leadsWithZeroSizedBase();
+ }
+ // Lay out the base.
+ layoutNonVirtualBase(BaseDecl, BaseLayout, PreviousBaseLayout);
}
-
- // Insert the base here.
- CharUnits BaseOffset = Size.RoundUpToAlignment(Layout->getAlignment());
- Bases.insert(std::make_pair(RD, BaseOffset));
- Size = BaseOffset + Layout->getDataSize();
- // Note: we don't update alignment here because it was accounted
- // for during initalization.
- LastBaseWasEmpty = false;
- LastNonVirtualBaseHasVBPtr = Layout->hasVBPtr();
-}
-
-void MicrosoftRecordLayoutBuilder::layoutVBPtr(const CXXRecordDecl *RD) {
+ // Set our VBPtroffset if we know it at this point.
if (!HasVBPtr)
VBPtrOffset = CharUnits::fromQuantity(-1);
else if (SharedVBPtrBase) {
const ASTRecordLayout &Layout = Context.getASTRecordLayout(SharedVBPtrBase);
VBPtrOffset = Bases[SharedVBPtrBase] + Layout.getVBPtrOffset();
- } else {
- VBPtrOffset = Size.RoundUpToAlignment(PointerAlignment);
- CharUnits OldSize = Size;
- Size = VBPtrOffset + PointerSize;
- if (BasesAndFieldsAlignment <= PointerAlignment) {
- // Handle strange padding rules for the lazily placed base. I have no
- // explanation for why the last virtual base is padded in such an odd way.
- // Two things to note about this padding are that the rules are different
- // if the alignment of the bases+fields is <= to the alignemnt of a
- // pointer and that the rule in 64-bit mode behaves differently depending
- // on if the second to last base was also zero sized.
- Size += OldSize % BasesAndFieldsAlignment.getQuantity();
- } else {
- if (Is64BitMode)
- Size += LastBaseWasEmpty ? CharUnits::One() : CharUnits::Zero();
- else
- Size = OldSize + BasesAndFieldsAlignment;
- }
- updateAlignment(PointerAlignment);
}
+}
- // Flush the lazy empty base.
- layoutNonVirtualBase(0);
+void MicrosoftRecordLayoutBuilder::layoutNonVirtualBase(
+ const CXXRecordDecl *BaseDecl,
+ const ASTRecordLayout &BaseLayout,
+ const ASTRecordLayout *&PreviousBaseLayout) {
+ // Insert padding between two bases if the left first one is zero sized or
+ // contains a zero sized subobject and the right is zero sized or one leads
+ // with a zero sized base.
+ if (PreviousBaseLayout && PreviousBaseLayout->hasZeroSizedSubObject() &&
+ BaseLayout.leadsWithZeroSizedBase())
+ Size++;
+ ElementInfo Info = getAdjustedElementInfo(BaseLayout);
+ CharUnits BaseOffset = Size.RoundUpToAlignment(Info.Alignment);
+ Bases.insert(std::make_pair(BaseDecl, BaseOffset));
+ Size = BaseOffset + BaseLayout.getNonVirtualSize();
+ PreviousBaseLayout = &BaseLayout;
+ VBPtrOffset = Size;
}
void MicrosoftRecordLayoutBuilder::layoutFields(const RecordDecl *RD) {
LastFieldIsNonZeroWidthBitfield = false;
- for (RecordDecl::field_iterator Field = RD->field_begin(),
- FieldEnd = RD->field_end();
- Field != FieldEnd; ++Field)
- layoutField(*Field);
- Size = Size.RoundUpToAlignment(Alignment);
+ for (const auto *Field : RD->fields())
+ layoutField(Field);
}
void MicrosoftRecordLayoutBuilder::layoutField(const FieldDecl *FD) {
@@ -2415,20 +2458,14 @@
return;
}
LastFieldIsNonZeroWidthBitfield = false;
-
- std::pair<CharUnits, CharUnits> FieldInfo = getAdjustedFieldInfo(FD);
- CharUnits FieldSize = FieldInfo.first;
- CharUnits FieldAlign = FieldInfo.second;
-
- updateAlignment(FieldAlign);
+ ElementInfo Info = getAdjustedElementInfo(FD);
if (IsUnion) {
- placeFieldAtZero();
- Size = std::max(Size, FieldSize);
+ placeFieldAtOffset(CharUnits::Zero());
+ Size = std::max(Size, Info.Size);
} else {
- // Round up the current record size to the field's alignment boundary.
- CharUnits FieldOffset = Size.RoundUpToAlignment(FieldAlign);
+ CharUnits FieldOffset = Size.RoundUpToAlignment(Info.Alignment);
placeFieldAtOffset(FieldOffset);
- Size = FieldOffset + FieldSize;
+ Size = FieldOffset + Info.Size;
}
}
@@ -2438,39 +2475,31 @@
layoutZeroWidthBitField(FD);
return;
}
-
- std::pair<CharUnits, CharUnits> FieldInfo = getAdjustedFieldInfo(FD);
- CharUnits FieldSize = FieldInfo.first;
- CharUnits FieldAlign = FieldInfo.second;
-
+ ElementInfo Info = getAdjustedElementInfo(FD);
// Clamp the bitfield to a containable size for the sake of being able
// to lay them out. Sema will throw an error.
- if (Width > Context.toBits(FieldSize))
- Width = Context.toBits(FieldSize);
-
+ if (Width > Context.toBits(Info.Size))
+ Width = Context.toBits(Info.Size);
// Check to see if this bitfield fits into an existing allocation. Note:
// MSVC refuses to pack bitfields of formal types with different sizes
// into the same allocation.
if (!IsUnion && LastFieldIsNonZeroWidthBitfield &&
- CurrentBitfieldSize == FieldSize && Width <= RemainingBitsInField) {
+ CurrentBitfieldSize == Info.Size && Width <= RemainingBitsInField) {
placeFieldAtBitOffset(Context.toBits(Size) - RemainingBitsInField);
RemainingBitsInField -= Width;
return;
}
-
LastFieldIsNonZeroWidthBitfield = true;
- CurrentBitfieldSize = FieldSize;
+ CurrentBitfieldSize = Info.Size;
if (IsUnion) {
- placeFieldAtZero();
- Size = std::max(Size, FieldSize);
- // TODO: Add a Sema warning that MS ignores bitfield alignment in unions.
+ placeFieldAtOffset(CharUnits::Zero());
+ Size = std::max(Size, Info.Size);
} else {
// Allocate a new block of memory and place the bitfield in it.
- CharUnits FieldOffset = Size.RoundUpToAlignment(FieldAlign);
+ CharUnits FieldOffset = Size.RoundUpToAlignment(Info.Alignment);
placeFieldAtOffset(FieldOffset);
- Size = FieldOffset + FieldSize;
- updateAlignment(FieldAlign);
- RemainingBitsInField = Context.toBits(FieldSize) - Width;
+ Size = FieldOffset + Info.Size;
+ RemainingBitsInField = Context.toBits(Info.Size) - Width;
}
}
@@ -2478,120 +2507,139 @@
MicrosoftRecordLayoutBuilder::layoutZeroWidthBitField(const FieldDecl *FD) {
// Zero-width bitfields are ignored unless they follow a non-zero-width
// bitfield.
- std::pair<CharUnits, CharUnits> FieldInfo = getAdjustedFieldInfo(FD);
- CharUnits FieldSize = FieldInfo.first;
- CharUnits FieldAlign = FieldInfo.second;
-
if (!LastFieldIsNonZeroWidthBitfield) {
placeFieldAtOffset(IsUnion ? CharUnits::Zero() : Size);
// TODO: Add a Sema warning that MS ignores alignment for zero
- // sized bitfields that occur after zero-size bitfields or non bitfields.
+ // sized bitfields that occur after zero-size bitfields or non-bitfields.
return;
}
-
LastFieldIsNonZeroWidthBitfield = false;
+ ElementInfo Info = getAdjustedElementInfo(FD);
if (IsUnion) {
- placeFieldAtZero();
- Size = std::max(Size, FieldSize);
+ placeFieldAtOffset(CharUnits::Zero());
+ Size = std::max(Size, Info.Size);
} else {
// Round up the current record size to the field's alignment boundary.
- CharUnits FieldOffset = Size.RoundUpToAlignment(FieldAlign);
+ CharUnits FieldOffset = Size.RoundUpToAlignment(Info.Alignment);
placeFieldAtOffset(FieldOffset);
Size = FieldOffset;
- updateAlignment(FieldAlign);
}
}
+void MicrosoftRecordLayoutBuilder::injectVBPtr(const CXXRecordDecl *RD) {
+ if (!HasVBPtr || SharedVBPtrBase)
+ return;
+ // Inject the VBPointer at the injection site.
+ CharUnits InjectionSite = VBPtrOffset;
+ // But before we do, make sure it's properly aligned.
+ VBPtrOffset = VBPtrOffset.RoundUpToAlignment(PointerInfo.Alignment);
+ // Determine where the first field should be laid out after the vbptr.
+ CharUnits FieldStart = VBPtrOffset + PointerInfo.Size;
+ // Make sure that the amount we push the fields back by is a multiple of the
+ // alignment.
+ CharUnits Offset = (FieldStart - InjectionSite).RoundUpToAlignment(
+ std::max(RequiredAlignment, Alignment));
+ // Increase the size of the object and push back all fields by the offset
+ // amount.
+ Size += Offset;
+ for (SmallVector<uint64_t, 16>::iterator i = FieldOffsets.begin(),
+ e = FieldOffsets.end();
+ i != e; ++i)
+ *i += Context.toBits(Offset);
+ for (BaseOffsetsMapTy::iterator i = Bases.begin(), e = Bases.end();
+ i != e; ++i)
+ if (i->second >= InjectionSite)
+ i->second += Offset;
+}
+
+void MicrosoftRecordLayoutBuilder::injectVFPtr(const CXXRecordDecl *RD) {
+ if (!HasOwnVFPtr)
+ return;
+ // Make sure that the amount we push the struct back by is a multiple of the
+ // alignment.
+ CharUnits Offset = PointerInfo.Size.RoundUpToAlignment(
+ std::max(RequiredAlignment, Alignment));
+ // Increase the size of the object and push back all fields, the vbptr and all
+ // bases by the offset amount.
+ Size += Offset;
+ for (SmallVectorImpl<uint64_t>::iterator i = FieldOffsets.begin(),
+ e = FieldOffsets.end();
+ i != e; ++i)
+ *i += Context.toBits(Offset);
+ if (HasVBPtr)
+ VBPtrOffset += Offset;
+ for (BaseOffsetsMapTy::iterator i = Bases.begin(), e = Bases.end();
+ i != e; ++i)
+ i->second += Offset;
+}
+
void MicrosoftRecordLayoutBuilder::layoutVirtualBases(const CXXRecordDecl *RD) {
if (!HasVBPtr)
return;
-
- updateAlignment(VirtualAlignment);
-
- // Zero-sized v-bases obey the alignment attribute so apply it here. The
- // alignment attribute is normally accounted for in FinalizeLayout.
- if (unsigned MaxAlign = RD->getMaxAlignment())
- updateAlignment(Context.toCharUnitsFromBits(MaxAlign));
-
- llvm::SmallPtrSet<const CXXRecordDecl *, 2> HasVtordisp =
+ // Vtordisps are always 4 bytes (even in 64-bit mode)
+ CharUnits VtorDispSize = CharUnits::fromQuantity(4);
+ CharUnits VtorDispAlignment = VtorDispSize;
+ // vtordisps respect pragma pack.
+ if (!MaxFieldAlignment.isZero())
+ VtorDispAlignment = std::min(VtorDispAlignment, MaxFieldAlignment);
+ // The alignment of the vtordisp is at least the required alignment of the
+ // entire record. This requirement may be present to support vtordisp
+ // injection.
+ for (const auto &I : RD->vbases()) {
+ const CXXRecordDecl *BaseDecl = I.getType()->getAsCXXRecordDecl();
+ const ASTRecordLayout &BaseLayout = Context.getASTRecordLayout(BaseDecl);
+ RequiredAlignment =
+ std::max(RequiredAlignment, BaseLayout.getRequiredAlignment());
+ }
+ VtorDispAlignment = std::max(VtorDispAlignment, RequiredAlignment);
+ // Compute the vtordisp set.
+ llvm::SmallPtrSet<const CXXRecordDecl *, 2> HasVtordispSet =
computeVtorDispSet(RD);
-
- // If the last field we laid out was a non-zero length bitfield then add some
- // extra padding for no obvious reason.
- if (LastFieldIsNonZeroWidthBitfield)
- Size += CurrentBitfieldSize;
-
// Iterate through the virtual bases and lay them out.
- for (CXXRecordDecl::base_class_const_iterator i = RD->vbases_begin(),
- e = RD->vbases_end();
- i != e; ++i) {
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(i->getType()->castAs<RecordType>()->getDecl());
- layoutVirtualBase(BaseDecl, HasVtordisp.count(BaseDecl));
+ const ASTRecordLayout* PreviousBaseLayout = 0;
+ for (const auto &I : RD->vbases()) {
+ const CXXRecordDecl *BaseDecl = I.getType()->getAsCXXRecordDecl();
+ const ASTRecordLayout &BaseLayout = Context.getASTRecordLayout(BaseDecl);
+ bool HasVtordisp = HasVtordispSet.count(BaseDecl);
+ // If the last field we laid out was a non-zero length bitfield then add
+ // some extra padding for no obvious reason.
+ if (LastFieldIsNonZeroWidthBitfield)
+ Size += CurrentBitfieldSize;
+ // Insert padding between two bases if the left first one is zero sized or
+ // contains a zero sized subobject and the right is zero sized or one leads
+ // with a zero sized base. The padding between virtual bases is 4
+ // bytes (in both 32 and 64 bits modes) and always involves rounding up to
+ // the required alignment, we don't know why.
+ if (PreviousBaseLayout && PreviousBaseLayout->hasZeroSizedSubObject() &&
+ BaseLayout.leadsWithZeroSizedBase())
+ Size = Size.RoundUpToAlignment(VtorDispAlignment) + VtorDispSize;
+ // Insert the vtordisp.
+ if (HasVtordisp)
+ Size = Size.RoundUpToAlignment(VtorDispAlignment) + VtorDispSize;
+ // Insert the virtual base.
+ HasZeroSizedSubObject = false;
+ ElementInfo Info = getAdjustedElementInfo(BaseLayout);
+ CharUnits BaseOffset = Size.RoundUpToAlignment(Info.Alignment);
+ VBases.insert(std::make_pair(BaseDecl,
+ ASTRecordLayout::VBaseInfo(BaseOffset, HasVtordisp)));
+ Size = BaseOffset + BaseLayout.getNonVirtualSize();
+ PreviousBaseLayout = &BaseLayout;
}
}
-void MicrosoftRecordLayoutBuilder::layoutVirtualBase(const CXXRecordDecl *RD,
- bool HasVtordisp) {
- if (LazyEmptyBase) {
- const ASTRecordLayout &LazyLayout =
- Context.getASTRecordLayout(LazyEmptyBase);
- Size = Size.RoundUpToAlignment(LazyLayout.getAlignment());
- VBases.insert(
- std::make_pair(LazyEmptyBase, ASTRecordLayout::VBaseInfo(Size, false)));
- // Empty bases only consume space when followed by another empty base.
- // The space consumed is in an Alignment sized/aligned block and the v-base
- // is placed at its alignment offset into the chunk, unless its alignment
- // is less than 4 bytes, at which it is placed at 4 byte offset in the
- // chunk. We have no idea why.
- if (RD && Context.getASTRecordLayout(RD).getNonVirtualSize().isZero())
- Size = Size.RoundUpToAlignment(Alignment) + CharUnits::fromQuantity(4);
- LazyEmptyBase = 0;
- }
-
- // RD is null when flushing the final lazy virtual base.
- if (!RD)
- return;
-
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
- if (Layout.getNonVirtualSize().isZero() && !HasVtordisp) {
- LazyEmptyBase = RD;
- return;
- }
-
- CharUnits BaseNVSize = Layout.getNonVirtualSize();
- CharUnits BaseAlign = Layout.getAlignment();
-
- // vtordisps are always 4 bytes (even in 64-bit mode)
- if (HasVtordisp)
- Size = Size.RoundUpToAlignment(Alignment) + CharUnits::fromQuantity(4);
- Size = Size.RoundUpToAlignment(BaseAlign);
-
- // Insert the base here.
- CharUnits BaseOffset = Size.RoundUpToAlignment(BaseAlign);
- VBases.insert(
- std::make_pair(RD, ASTRecordLayout::VBaseInfo(BaseOffset, HasVtordisp)));
- Size = BaseOffset + BaseNVSize;
- // Note: we don't update alignment here because it was accounted for in
- // InitializeLayout.
-}
-
-void MicrosoftRecordLayoutBuilder::finalizeCXXLayout(const CXXRecordDecl *RD) {
- // Flush the lazy virtual base.
- layoutVirtualBase(0, false);
-
- if (RD->vbases_begin() == RD->vbases_end() || AlignAfterVBases)
+void MicrosoftRecordLayoutBuilder::finalizeLayout(const RecordDecl *RD) {
+ // Respect required alignment. Note that in 32-bit mode Required alignment
+ // may be 0 nad cause size not to be updated.
+ DataSize = Size;
+ if (!RequiredAlignment.isZero()) {
+ Alignment = std::max(Alignment, RequiredAlignment);
Size = Size.RoundUpToAlignment(Alignment);
-
- if (Size.isZero())
+ }
+ // Zero-sized structures have size equal to their alignment.
+ if (Size.isZero()) {
+ HasZeroSizedSubObject = true;
+ LeadsWithZeroSizedBase = true;
Size = Alignment;
-}
-
-void MicrosoftRecordLayoutBuilder::honorDeclspecAlign(const RecordDecl *RD) {
- if (unsigned MaxAlign = RD->getMaxAlignment()) {
- AlignAfterVBases = true;
- updateAlignment(Context.toCharUnitsFromBits(MaxAlign));
- Size = Size.RoundUpToAlignment(Alignment);
}
}
@@ -2602,37 +2650,53 @@
return true;
// If any of a virtual bases non-virtual bases (recursively) requires a
// vtordisp than so does this virtual base.
- for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
- e = RD->bases_end();
- i != e; ++i)
- if (!i->isVirtual() &&
+ for (const auto &I : RD->bases())
+ if (!I.isVirtual() &&
RequiresVtordisp(
HasVtordisp,
- cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl())))
+ cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl())))
return true;
return false;
}
llvm::SmallPtrSet<const CXXRecordDecl *, 2>
MicrosoftRecordLayoutBuilder::computeVtorDispSet(const CXXRecordDecl *RD) {
- llvm::SmallPtrSet<const CXXRecordDecl *, 2> HasVtordisp;
+ llvm::SmallPtrSet<const CXXRecordDecl *, 2> HasVtordispSet;
+
+ // /vd0 or #pragma vtordisp(0): Never use vtordisps when used as a vbase.
+ if (RD->getMSVtorDispMode() == MSVtorDispAttr::Never)
+ return HasVtordispSet;
+
+ // /vd2 or #pragma vtordisp(2): Always use vtordisps for virtual bases with
+ // vftables.
+ if (RD->getMSVtorDispMode() == MSVtorDispAttr::ForVFTable) {
+ for (const auto &I : RD->vbases()) {
+ const CXXRecordDecl *BaseDecl = I.getType()->getAsCXXRecordDecl();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(BaseDecl);
+ if (Layout.hasExtendableVFPtr())
+ HasVtordispSet.insert(BaseDecl);
+ }
+ return HasVtordispSet;
+ }
+
+ // /vd1 or #pragma vtordisp(1): Try to guess based on whether we think it's
+ // possible for a partially constructed object with virtual base overrides to
+ // escape a non-trivial constructor.
+ assert(RD->getMSVtorDispMode() == MSVtorDispAttr::ForVBaseOverride);
// If any of our bases need a vtordisp for this type, so do we. Check our
// direct bases for vtordisp requirements.
- for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
- e = RD->bases_end();
- i != e; ++i) {
+ for (const auto &I : RD->bases()) {
const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
const ASTRecordLayout &Layout = Context.getASTRecordLayout(BaseDecl);
for (ASTRecordLayout::VBaseOffsetsMapTy::const_iterator
bi = Layout.getVBaseOffsetsMap().begin(),
be = Layout.getVBaseOffsetsMap().end();
bi != be; ++bi)
if (bi->second.hasVtorDisp())
- HasVtordisp.insert(bi->first);
+ HasVtordispSet.insert(bi->first);
}
-
// If we define a constructor or destructor and override a function that is
// defined in a virtual base's vtable, that virtual bases need a vtordisp.
// Here we collect a list of classes with vtables for which our virtual bases
@@ -2643,36 +2707,31 @@
if (RD->hasUserDeclaredConstructor() || RD->hasUserDeclaredDestructor()) {
llvm::SmallPtrSet<const CXXMethodDecl *, 8> Work;
// Seed the working set with our non-destructor virtual methods.
- for (CXXRecordDecl::method_iterator i = RD->method_begin(),
- e = RD->method_end();
- i != e; ++i)
- if ((*i)->isVirtual() && !isa<CXXDestructorDecl>(*i))
- Work.insert(*i);
+ for (const auto *I : RD->methods())
+ if (I->isVirtual() && !isa<CXXDestructorDecl>(I))
+ Work.insert(I);
while (!Work.empty()) {
const CXXMethodDecl *MD = *Work.begin();
CXXMethodDecl::method_iterator i = MD->begin_overridden_methods(),
e = MD->end_overridden_methods();
if (i == e)
// If a virtual method has no-overrides it lives in its parent's vtable.
- HasVtordisp.insert(MD->getParent());
+ HasVtordispSet.insert(MD->getParent());
else
Work.insert(i, e);
// We've finished processing this element, remove it from the working set.
Work.erase(MD);
}
}
-
// Re-check all of our vbases for vtordisp requirements (in case their
// non-virtual bases have vtordisp requirements).
- for (CXXRecordDecl::base_class_const_iterator i = RD->vbases_begin(),
- e = RD->vbases_end();
- i != e; ++i) {
- const CXXRecordDecl *BaseDecl = i->getType()->getAsCXXRecordDecl();
- if (!HasVtordisp.count(BaseDecl) && RequiresVtordisp(HasVtordisp, BaseDecl))
- HasVtordisp.insert(BaseDecl);
+ for (const auto &I : RD->vbases()) {
+ const CXXRecordDecl *BaseDecl = I.getType()->getAsCXXRecordDecl();
+ if (!HasVtordispSet.count(BaseDecl) &&
+ RequiresVtordisp(HasVtordispSet, BaseDecl))
+ HasVtordispSet.insert(BaseDecl);
}
-
- return HasVtordisp;
+ return HasVtordispSet;
}
/// \brief Get or compute information about the layout of the specified record
@@ -2684,19 +2743,20 @@
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
Builder.cxxLayout(RD);
return new (*this) ASTRecordLayout(
- *this, Builder.Size, Builder.Alignment,
- Builder.HasExtendableVFPtr && !Builder.PrimaryBase,
- Builder.HasExtendableVFPtr,
- Builder.VBPtrOffset, Builder.DataSize, Builder.FieldOffsets.data(),
- Builder.FieldOffsets.size(), Builder.DataSize,
- Builder.NonVirtualAlignment, CharUnits::Zero(), Builder.PrimaryBase,
- false, Builder.SharedVBPtrBase, Builder.AlignAfterVBases, Builder.Bases,
- Builder.VBases);
+ *this, Builder.Size, Builder.Alignment, Builder.RequiredAlignment,
+ Builder.HasOwnVFPtr,
+ Builder.HasOwnVFPtr || Builder.PrimaryBase,
+ Builder.VBPtrOffset, Builder.NonVirtualSize, Builder.FieldOffsets.data(),
+ Builder.FieldOffsets.size(), Builder.NonVirtualSize,
+ Builder.Alignment, CharUnits::Zero(), Builder.PrimaryBase,
+ false, Builder.SharedVBPtrBase,
+ Builder.HasZeroSizedSubObject, Builder.LeadsWithZeroSizedBase,
+ Builder.Bases, Builder.VBases);
} else {
Builder.layout(D);
return new (*this) ASTRecordLayout(
- *this, Builder.Size, Builder.Alignment, Builder.Size,
- Builder.FieldOffsets.data(), Builder.FieldOffsets.size());
+ *this, Builder.Size, Builder.Alignment, Builder.RequiredAlignment,
+ Builder.Size, Builder.FieldOffsets.data(), Builder.FieldOffsets.size());
}
}
@@ -2747,6 +2807,8 @@
NewEntry =
new (*this) ASTRecordLayout(*this, Builder.getSize(),
Builder.Alignment,
+ /*RequiredAlignment : used by MS-ABI)*/
+ Builder.Alignment,
Builder.HasOwnVFPtr,
RD->isDynamicClass(),
CharUnits::fromQuantity(-1),
@@ -2758,7 +2820,7 @@
EmptySubobjects.SizeOfLargestEmptySubobject,
Builder.PrimaryBase,
Builder.PrimaryBaseIsVirtual,
- 0, true,
+ 0, false, false,
Builder.Bases, Builder.VBases);
} else {
RecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/0);
@@ -2767,6 +2829,8 @@
NewEntry =
new (*this) ASTRecordLayout(*this, Builder.getSize(),
Builder.Alignment,
+ /*RequiredAlignment : used by MS-ABI)*/
+ Builder.Alignment,
Builder.getSize(),
Builder.FieldOffsets.data(),
Builder.FieldOffsets.size());
@@ -2830,10 +2894,8 @@
const IndirectFieldDecl *IFD = cast<IndirectFieldDecl>(VD);
OffsetInBits = 0;
- for (IndirectFieldDecl::chain_iterator CI = IFD->chain_begin(),
- CE = IFD->chain_end();
- CI != CE; ++CI)
- OffsetInBits += ::getFieldOffset(*this, cast<FieldDecl>(*CI));
+ for (const auto *CI : IFD->chain())
+ OffsetInBits += ::getFieldOffset(*this, cast<FieldDecl>(CI));
}
return OffsetInBits;
@@ -2876,6 +2938,8 @@
const ASTRecordLayout *NewEntry =
new (*this) ASTRecordLayout(*this, Builder.getSize(),
Builder.Alignment,
+ /*RequiredAlignment : used by MS-ABI)*/
+ Builder.Alignment,
Builder.getDataSize(),
Builder.FieldOffsets.data(),
Builder.FieldOffsets.size());
@@ -2928,19 +2992,27 @@
OS << '(' << *RD << " vftable pointer)\n";
}
- // Dump (non-virtual) bases
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- assert(!I->getType()->isDependentType() &&
+ // Collect nvbases.
+ SmallVector<const CXXRecordDecl *, 4> Bases;
+ for (const auto &I : RD->bases()) {
+ assert(!I.getType()->isDependentType() &&
"Cannot layout class with dependent bases.");
- if (I->isVirtual())
- continue;
+ if (!I.isVirtual())
+ Bases.push_back(I.getType()->getAsCXXRecordDecl());
+ }
- const CXXRecordDecl *Base =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ // Sort nvbases by offset.
+ std::stable_sort(Bases.begin(), Bases.end(),
+ [&](const CXXRecordDecl *L, const CXXRecordDecl *R) {
+ return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R);
+ });
+ // Dump (non-virtual) bases
+ for (SmallVectorImpl<const CXXRecordDecl *>::iterator I = Bases.begin(),
+ E = Bases.end();
+ I != E; ++I) {
+ const CXXRecordDecl *Base = *I;
CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(Base);
-
DumpCXXRecordLayout(OS, Base, C, BaseOffset, IndentLevel,
Base == PrimaryBase ? "(primary base)" : "(base)",
/*IncludeVirtualBases=*/false);
@@ -2979,11 +3051,10 @@
// Dump virtual bases.
const ASTRecordLayout::VBaseOffsetsMapTy &vtordisps =
Layout.getVBaseOffsetsMap();
- for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
- E = RD->vbases_end(); I != E; ++I) {
- assert(I->isVirtual() && "Found non-virtual class!");
+ for (const auto &I : RD->vbases()) {
+ assert(I.isVirtual() && "Found non-virtual class!");
const CXXRecordDecl *VBase =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBase);
@@ -3006,7 +3077,7 @@
PrintIndentNoOffset(OS, IndentLevel - 1);
OS << " nvsize=" << Layout.getNonVirtualSize().getQuantity();
- OS << ", nvalign=" << Layout.getNonVirtualAlign().getQuantity() << "]\n";
+ OS << ", nvalign=" << Layout.getNonVirtualAlignment().getQuantity() << "]\n";
OS << '\n';
}
diff --git a/lib/AST/Stmt.cpp b/lib/AST/Stmt.cpp
index de85161..a03ef00 100644
--- a/lib/AST/Stmt.cpp
+++ b/lib/AST/Stmt.cpp
@@ -803,7 +803,7 @@
Expr *CXXForRangeStmt::getRangeInit() {
DeclStmt *RangeStmt = getRangeStmt();
VarDecl *RangeDecl = dyn_cast_or_null<VarDecl>(RangeStmt->getSingleDecl());
- assert(RangeDecl &&& "for-range should have a single var decl");
+ assert(RangeDecl && "for-range should have a single var decl");
return RangeDecl->getInit();
}
@@ -1100,15 +1100,14 @@
}
bool CapturedStmt::capturesVariable(const VarDecl *Var) const {
- for (const_capture_iterator I = capture_begin(),
- E = capture_end(); I != E; ++I) {
- if (!I->capturesVariable())
+ for (const auto &I : captures()) {
+ if (!I.capturesVariable())
continue;
// This does not handle variable redeclarations. This should be
// extended to capture variables with redeclarations, for example
// a thread-private variable in OpenMP.
- if (I->getCapturedVar() == Var)
+ if (I.getCapturedVar() == Var)
return true;
}
@@ -1130,8 +1129,9 @@
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<Expr *> VL) {
- void *Mem = C.Allocate(sizeof(OMPPrivateClause) + sizeof(Expr *) * VL.size(),
- llvm::alignOf<OMPPrivateClause>());
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPPrivateClause),
+ llvm::alignOf<Expr *>()) +
+ sizeof(Expr *) * VL.size());
OMPPrivateClause *Clause = new (Mem) OMPPrivateClause(StartLoc, LParenLoc,
EndLoc, VL.size());
Clause->setVarRefs(VL);
@@ -1140,8 +1140,9 @@
OMPPrivateClause *OMPPrivateClause::CreateEmpty(const ASTContext &C,
unsigned N) {
- void *Mem = C.Allocate(sizeof(OMPPrivateClause) + sizeof(Expr *) * N,
- llvm::alignOf<OMPPrivateClause>());
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPPrivateClause),
+ llvm::alignOf<Expr *>()) +
+ sizeof(Expr *) * N);
return new (Mem) OMPPrivateClause(N);
}
@@ -1150,9 +1151,9 @@
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<Expr *> VL) {
- void *Mem = C.Allocate(sizeof(OMPFirstprivateClause) +
- sizeof(Expr *) * VL.size(),
- llvm::alignOf<OMPFirstprivateClause>());
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPFirstprivateClause),
+ llvm::alignOf<Expr *>()) +
+ sizeof(Expr *) * VL.size());
OMPFirstprivateClause *Clause = new (Mem) OMPFirstprivateClause(StartLoc,
LParenLoc,
EndLoc,
@@ -1163,8 +1164,9 @@
OMPFirstprivateClause *OMPFirstprivateClause::CreateEmpty(const ASTContext &C,
unsigned N) {
- void *Mem = C.Allocate(sizeof(OMPFirstprivateClause) + sizeof(Expr *) * N,
- llvm::alignOf<OMPFirstprivateClause>());
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPFirstprivateClause),
+ llvm::alignOf<Expr *>()) +
+ sizeof(Expr *) * N);
return new (Mem) OMPFirstprivateClause(N);
}
@@ -1173,8 +1175,9 @@
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<Expr *> VL) {
- void *Mem = C.Allocate(sizeof(OMPSharedClause) + sizeof(Expr *) * VL.size(),
- llvm::alignOf<OMPSharedClause>());
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPSharedClause),
+ llvm::alignOf<Expr *>()) +
+ sizeof(Expr *) * VL.size());
OMPSharedClause *Clause = new (Mem) OMPSharedClause(StartLoc, LParenLoc,
EndLoc, VL.size());
Clause->setVarRefs(VL);
@@ -1183,15 +1186,38 @@
OMPSharedClause *OMPSharedClause::CreateEmpty(const ASTContext &C,
unsigned N) {
- void *Mem = C.Allocate(sizeof(OMPSharedClause) + sizeof(Expr *) * N,
- llvm::alignOf<OMPSharedClause>());
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPSharedClause),
+ llvm::alignOf<Expr *>()) +
+ sizeof(Expr *) * N);
return new (Mem) OMPSharedClause(N);
}
+OMPCopyinClause *OMPCopyinClause::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc,
+ ArrayRef<Expr *> VL) {
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyinClause),
+ llvm::alignOf<Expr *>()) +
+ sizeof(Expr *) * VL.size());
+ OMPCopyinClause *Clause = new (Mem) OMPCopyinClause(StartLoc, LParenLoc,
+ EndLoc, VL.size());
+ Clause->setVarRefs(VL);
+ return Clause;
+}
+
+OMPCopyinClause *OMPCopyinClause::CreateEmpty(const ASTContext &C,
+ unsigned N) {
+ void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyinClause),
+ llvm::alignOf<Expr *>()) +
+ sizeof(Expr *) * N);
+ return new (Mem) OMPCopyinClause(N);
+}
+
void OMPExecutableDirective::setClauses(ArrayRef<OMPClause *> Clauses) {
- assert(Clauses.size() == this->Clauses.size() &&
+ assert(Clauses.size() == getNumClauses() &&
"Number of clauses is not the same as the preallocated buffer");
- std::copy(Clauses.begin(), Clauses.end(), this->Clauses.begin());
+ std::copy(Clauses.begin(), Clauses.end(), getClauses().begin());
}
OMPParallelDirective *OMPParallelDirective::Create(
@@ -1200,9 +1226,10 @@
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt) {
- void *Mem = C.Allocate(sizeof(OMPParallelDirective) +
- sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *),
- llvm::alignOf<OMPParallelDirective>());
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *));
OMPParallelDirective *Dir = new (Mem) OMPParallelDirective(StartLoc, EndLoc,
Clauses.size());
Dir->setClauses(Clauses);
@@ -1211,10 +1238,39 @@
}
OMPParallelDirective *OMPParallelDirective::CreateEmpty(const ASTContext &C,
- unsigned N,
+ unsigned NumClauses,
EmptyShell) {
- void *Mem = C.Allocate(sizeof(OMPParallelDirective) +
- sizeof(OMPClause *) * N + sizeof(Stmt *),
- llvm::alignOf<OMPParallelDirective>());
- return new (Mem) OMPParallelDirective(N);
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPParallelDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *));
+ return new (Mem) OMPParallelDirective(NumClauses);
}
+
+OMPSimdDirective *OMPSimdDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSimdDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *));
+ OMPSimdDirective *Dir = new (Mem) OMPSimdDirective(StartLoc, EndLoc,
+ 1, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ return Dir;
+}
+
+OMPSimdDirective *OMPSimdDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell) {
+ unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPSimdDirective),
+ llvm::alignOf<OMPClause *>());
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *));
+ return new (Mem) OMPSimdDirective(CollapsedNum, NumClauses);
+}
+
diff --git a/lib/AST/StmtPrinter.cpp b/lib/AST/StmtPrinter.cpp
index 0ecb5b5..7ad5491 100644
--- a/lib/AST/StmtPrinter.cpp
+++ b/lib/AST/StmtPrinter.cpp
@@ -70,6 +70,7 @@
void PrintCallArgs(CallExpr *E);
void PrintRawSEHExceptHandler(SEHExceptStmt *S);
void PrintRawSEHFinallyStmt(SEHFinallyStmt *S);
+ void PrintOMPExecutableDirective(OMPExecutableDirective *S);
void PrintExpr(Expr *E) {
if (E)
@@ -89,7 +90,7 @@
return;
else StmtVisitor<StmtPrinter>::Visit(S);
}
-
+
void VisitStmt(Stmt *Node) LLVM_ATTRIBUTE_UNUSED {
Indent() << "<<unknown stmt type>>\n";
}
@@ -113,9 +114,8 @@
/// with no newline after the }.
void StmtPrinter::PrintRawCompoundStmt(CompoundStmt *Node) {
OS << "{\n";
- for (CompoundStmt::body_iterator I = Node->body_begin(), E = Node->body_end();
- I != E; ++I)
- PrintStmt(*I);
+ for (auto *I : Node->body())
+ PrintStmt(I);
Indent() << "}";
}
@@ -125,11 +125,7 @@
}
void StmtPrinter::PrintRawDeclStmt(const DeclStmt *S) {
- DeclStmt::const_decl_iterator Begin = S->decl_begin(), End = S->decl_end();
- SmallVector<Decl*, 2> Decls;
- for ( ; Begin != End; ++Begin)
- Decls.push_back(*Begin);
-
+ SmallVector<Decl*, 2> Decls(S->decls());
Decl::printGroup(Decls.data(), Decls.size(), OS, Policy, IndentLevel);
}
@@ -330,7 +326,8 @@
PrintExpr(Node->getRangeInit());
OS << ") {\n";
PrintStmt(Node->getBody());
- Indent() << "}\n";
+ Indent() << "}";
+ if (Policy.IncludeNewlines) OS << "\n";
}
void StmtPrinter::VisitMSDependentExistsStmt(MSDependentExistsStmt *Node) {
@@ -350,21 +347,25 @@
}
void StmtPrinter::VisitGotoStmt(GotoStmt *Node) {
- Indent() << "goto " << Node->getLabel()->getName() << ";\n";
+ Indent() << "goto " << Node->getLabel()->getName() << ";";
+ if (Policy.IncludeNewlines) OS << "\n";
}
void StmtPrinter::VisitIndirectGotoStmt(IndirectGotoStmt *Node) {
Indent() << "goto *";
PrintExpr(Node->getTarget());
- OS << ";\n";
+ OS << ";";
+ if (Policy.IncludeNewlines) OS << "\n";
}
void StmtPrinter::VisitContinueStmt(ContinueStmt *Node) {
- Indent() << "continue;\n";
+ Indent() << "continue;";
+ if (Policy.IncludeNewlines) OS << "\n";
}
void StmtPrinter::VisitBreakStmt(BreakStmt *Node) {
- Indent() << "break;\n";
+ Indent() << "break;";
+ if (Policy.IncludeNewlines) OS << "\n";
}
@@ -374,7 +375,8 @@
OS << " ";
PrintExpr(Node->getRetValue());
}
- OS << ";\n";
+ OS << ";";
+ if (Policy.IncludeNewlines) OS << "\n";
}
@@ -437,7 +439,8 @@
VisitStringLiteral(Node->getClobberStringLiteral(i));
}
- OS << ");\n";
+ OS << ");";
+ if (Policy.IncludeNewlines) OS << "\n";
}
void StmtPrinter::VisitMSAsmStmt(MSAsmStmt *Node) {
@@ -586,16 +589,36 @@
namespace {
class OMPClausePrinter : public OMPClauseVisitor<OMPClausePrinter> {
raw_ostream &OS;
+ const PrintingPolicy &Policy;
/// \brief Process clauses with list of variables.
template <typename T>
void VisitOMPClauseList(T *Node, char StartSym);
public:
- OMPClausePrinter(raw_ostream &OS) : OS(OS) { }
+ OMPClausePrinter(raw_ostream &OS, const PrintingPolicy &Policy)
+ : OS(OS), Policy(Policy) { }
#define OPENMP_CLAUSE(Name, Class) \
void Visit##Class(Class *S);
#include "clang/Basic/OpenMPKinds.def"
};
+void OMPClausePrinter::VisitOMPIfClause(OMPIfClause *Node) {
+ OS << "if(";
+ Node->getCondition()->printPretty(OS, 0, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPNumThreadsClause(OMPNumThreadsClause *Node) {
+ OS << "num_threads(";
+ Node->getNumThreads()->printPretty(OS, 0, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPSafelenClause(OMPSafelenClause *Node) {
+ OS << "safelen(";
+ Node->getSafelen()->printPretty(OS, 0, Policy, 0);
+ OS << ")";
+}
+
void OMPClausePrinter::VisitOMPDefaultClause(OMPDefaultClause *Node) {
OS << "default("
<< getOpenMPSimpleClauseTypeName(OMPC_default, Node->getDefaultKind())
@@ -606,9 +629,15 @@
void OMPClausePrinter::VisitOMPClauseList(T *Node, char StartSym) {
for (typename T::varlist_iterator I = Node->varlist_begin(),
E = Node->varlist_end();
- I != E; ++I)
- OS << (I == Node->varlist_begin() ? StartSym : ',')
- << *cast<NamedDecl>(cast<DeclRefExpr>(*I)->getDecl());
+ I != E; ++I) {
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(*I)) {
+ OS << (I == Node->varlist_begin() ? StartSym : ',');
+ cast<NamedDecl>(DRE->getDecl())->printQualifiedName(OS);
+ } else {
+ OS << (I == Node->varlist_begin() ? StartSym : ',');
+ (*I)->printPretty(OS, 0, Policy, 0);
+ }
+ }
}
void OMPClausePrinter::VisitOMPPrivateClause(OMPPrivateClause *Node) {
@@ -635,17 +664,23 @@
}
}
+void OMPClausePrinter::VisitOMPCopyinClause(OMPCopyinClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "copyin";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
}
//===----------------------------------------------------------------------===//
// OpenMP directives printing methods
//===----------------------------------------------------------------------===//
-void StmtPrinter::VisitOMPParallelDirective(OMPParallelDirective *Node) {
- Indent() << "#pragma omp parallel ";
-
- OMPClausePrinter Printer(OS);
- ArrayRef<OMPClause *> Clauses = Node->clauses();
+void StmtPrinter::PrintOMPExecutableDirective(OMPExecutableDirective *S) {
+ OMPClausePrinter Printer(OS, Policy);
+ ArrayRef<OMPClause *> Clauses = S->clauses();
for (ArrayRef<OMPClause *>::iterator I = Clauses.begin(), E = Clauses.end();
I != E; ++I)
if (*I && !(*I)->isImplicit()) {
@@ -653,13 +688,24 @@
OS << ' ';
}
OS << "\n";
- if (Node->getAssociatedStmt()) {
- assert(isa<CapturedStmt>(Node->getAssociatedStmt()) &&
+ if (S->getAssociatedStmt()) {
+ assert(isa<CapturedStmt>(S->getAssociatedStmt()) &&
"Expected captured statement!");
- Stmt *CS = cast<CapturedStmt>(Node->getAssociatedStmt())->getCapturedStmt();
+ Stmt *CS = cast<CapturedStmt>(S->getAssociatedStmt())->getCapturedStmt();
PrintStmt(CS);
}
}
+
+void StmtPrinter::VisitOMPParallelDirective(OMPParallelDirective *Node) {
+ Indent() << "#pragma omp parallel ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPSimdDirective(OMPSimdDirective *Node) {
+ Indent() << "#pragma omp simd ";
+ PrintOMPExecutableDirective(Node);
+}
+
//===----------------------------------------------------------------------===//
// Expr printing methods.
//===----------------------------------------------------------------------===//
@@ -709,13 +755,15 @@
void StmtPrinter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *Node) {
if (Node->isSuperReceiver())
OS << "super.";
- else if (Node->getBase()) {
+ else if (Node->isObjectReceiver() && Node->getBase()) {
PrintExpr(Node->getBase());
OS << ".";
+ } else if (Node->isClassReceiver() && Node->getClassReceiver()) {
+ OS << Node->getClassReceiver()->getName() << ".";
}
if (Node->isImplicitProperty())
- OS << Node->getImplicitPropertyGetter()->getSelector().getAsString();
+ Node->getImplicitPropertyGetter()->getSelector().print(OS);
else
OS << Node->getExplicitProperty()->getName();
}
@@ -1041,7 +1089,7 @@
PrintExpr(Node->getInitializer());
}
void StmtPrinter::VisitImplicitCastExpr(ImplicitCastExpr *Node) {
- // No need to print anything, simply forward to the sub expression.
+ // No need to print anything, simply forward to the subexpression.
PrintExpr(Node->getSubExpr());
}
void StmtPrinter::VisitBinaryOperator(BinaryOperator *Node) {
@@ -1279,6 +1327,12 @@
}
void StmtPrinter::VisitCXXMemberCallExpr(CXXMemberCallExpr *Node) {
+ // If we have a conversion operator call only print the argument.
+ CXXMethodDecl *MD = Node->getMethodDecl();
+ if (MD && isa<CXXConversionDecl>(MD)) {
+ PrintExpr(Node->getImplicitObjectArgument());
+ return;
+ }
VisitCallExpr(cast<CallExpr>(Node));
}
@@ -1490,16 +1544,14 @@
OS << " (";
CXXMethodDecl *Method = Node->getCallOperator();
NeedComma = false;
- for (CXXMethodDecl::param_iterator P = Method->param_begin(),
- PEnd = Method->param_end();
- P != PEnd; ++P) {
+ for (auto P : Method->params()) {
if (NeedComma) {
OS << ", ";
} else {
NeedComma = true;
}
- std::string ParamStr = (*P)->getNameAsString();
- (*P)->getOriginalType().print(OS, Policy, ParamStr);
+ std::string ParamStr = P->getNameAsString();
+ P->getOriginalType().print(OS, Policy, ParamStr);
}
if (Method->isVariadic()) {
if (NeedComma)
@@ -1520,7 +1572,7 @@
// Print the trailing return type if it was specified in the source.
if (Node->hasExplicitResultType()) {
OS << " -> ";
- Proto->getResultType().print(OS, Policy);
+ Proto->getReturnType().print(OS, Policy);
}
}
@@ -1625,7 +1677,7 @@
}
void StmtPrinter::VisitExprWithCleanups(ExprWithCleanups *E) {
- // Just forward to the sub expression.
+ // Just forward to the subexpression.
PrintExpr(E->getSubExpr());
}
@@ -1675,74 +1727,15 @@
OS, Node->getTemplateArgs(), Node->getNumTemplateArgs(), Policy);
}
-static const char *getTypeTraitName(UnaryTypeTrait UTT) {
- switch (UTT) {
- case UTT_HasNothrowAssign: return "__has_nothrow_assign";
- case UTT_HasNothrowMoveAssign: return "__has_nothrow_move_assign";
- case UTT_HasNothrowConstructor: return "__has_nothrow_constructor";
- case UTT_HasNothrowCopy: return "__has_nothrow_copy";
- case UTT_HasTrivialAssign: return "__has_trivial_assign";
- case UTT_HasTrivialMoveAssign: return "__has_trivial_move_assign";
- case UTT_HasTrivialMoveConstructor: return "__has_trivial_move_constructor";
- case UTT_HasTrivialDefaultConstructor: return "__has_trivial_constructor";
- case UTT_HasTrivialCopy: return "__has_trivial_copy";
- case UTT_HasTrivialDestructor: return "__has_trivial_destructor";
- case UTT_HasVirtualDestructor: return "__has_virtual_destructor";
- case UTT_IsAbstract: return "__is_abstract";
- case UTT_IsArithmetic: return "__is_arithmetic";
- case UTT_IsArray: return "__is_array";
- case UTT_IsClass: return "__is_class";
- case UTT_IsCompleteType: return "__is_complete_type";
- case UTT_IsCompound: return "__is_compound";
- case UTT_IsConst: return "__is_const";
- case UTT_IsEmpty: return "__is_empty";
- case UTT_IsEnum: return "__is_enum";
- case UTT_IsFinal: return "__is_final";
- case UTT_IsFloatingPoint: return "__is_floating_point";
- case UTT_IsFunction: return "__is_function";
- case UTT_IsFundamental: return "__is_fundamental";
- case UTT_IsIntegral: return "__is_integral";
- case UTT_IsInterfaceClass: return "__is_interface_class";
- case UTT_IsLiteral: return "__is_literal";
- case UTT_IsLvalueReference: return "__is_lvalue_reference";
- case UTT_IsMemberFunctionPointer: return "__is_member_function_pointer";
- case UTT_IsMemberObjectPointer: return "__is_member_object_pointer";
- case UTT_IsMemberPointer: return "__is_member_pointer";
- case UTT_IsObject: return "__is_object";
- case UTT_IsPOD: return "__is_pod";
- case UTT_IsPointer: return "__is_pointer";
- case UTT_IsPolymorphic: return "__is_polymorphic";
- case UTT_IsReference: return "__is_reference";
- case UTT_IsRvalueReference: return "__is_rvalue_reference";
- case UTT_IsScalar: return "__is_scalar";
- case UTT_IsSealed: return "__is_sealed";
- case UTT_IsSigned: return "__is_signed";
- case UTT_IsStandardLayout: return "__is_standard_layout";
- case UTT_IsTrivial: return "__is_trivial";
- case UTT_IsTriviallyCopyable: return "__is_trivially_copyable";
- case UTT_IsUnion: return "__is_union";
- case UTT_IsUnsigned: return "__is_unsigned";
- case UTT_IsVoid: return "__is_void";
- case UTT_IsVolatile: return "__is_volatile";
- }
- llvm_unreachable("Type trait not covered by switch statement");
-}
-
-static const char *getTypeTraitName(BinaryTypeTrait BTT) {
- switch (BTT) {
- case BTT_IsBaseOf: return "__is_base_of";
- case BTT_IsConvertible: return "__is_convertible";
- case BTT_IsSame: return "__is_same";
- case BTT_TypeCompatible: return "__builtin_types_compatible_p";
- case BTT_IsConvertibleTo: return "__is_convertible_to";
- case BTT_IsTriviallyAssignable: return "__is_trivially_assignable";
- }
- llvm_unreachable("Binary type trait not covered by switch");
-}
-
static const char *getTypeTraitName(TypeTrait TT) {
switch (TT) {
- case clang::TT_IsTriviallyConstructible:return "__is_trivially_constructible";
+#define TYPE_TRAIT_1(Spelling, Name, Key) \
+case clang::UTT_##Name: return #Spelling;
+#define TYPE_TRAIT_2(Spelling, Name, Key) \
+case clang::BTT_##Name: return #Spelling;
+#define TYPE_TRAIT_N(Spelling, Name, Key) \
+ case clang::TT_##Name: return #Spelling;
+#include "clang/Basic/TokenKinds.def"
}
llvm_unreachable("Type trait not covered by switch");
}
@@ -1763,20 +1756,6 @@
llvm_unreachable("Expression type trait not covered by switch");
}
-void StmtPrinter::VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E) {
- OS << getTypeTraitName(E->getTrait()) << '(';
- E->getQueriedType().print(OS, Policy);
- OS << ')';
-}
-
-void StmtPrinter::VisitBinaryTypeTraitExpr(BinaryTypeTraitExpr *E) {
- OS << getTypeTraitName(E->getTrait()) << '(';
- E->getLhsType().print(OS, Policy);
- OS << ',';
- E->getRhsType().print(OS, Policy);
- OS << ')';
-}
-
void StmtPrinter::VisitTypeTraitExpr(TypeTraitExpr *E) {
OS << getTypeTraitName(E->getTrait()) << "(";
for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I) {
@@ -1881,7 +1860,9 @@
}
void StmtPrinter::VisitObjCSelectorExpr(ObjCSelectorExpr *Node) {
- OS << "@selector(" << Node->getSelector().getAsString() << ')';
+ OS << "@selector(";
+ Node->getSelector().print(OS);
+ OS << ')';
}
void StmtPrinter::VisitObjCProtocolExpr(ObjCProtocolExpr *Node) {
diff --git a/lib/AST/StmtProfile.cpp b/lib/AST/StmtProfile.cpp
index 6805e62..07ed86a 100644
--- a/lib/AST/StmtProfile.cpp
+++ b/lib/AST/StmtProfile.cpp
@@ -79,9 +79,8 @@
void StmtProfiler::VisitDeclStmt(const DeclStmt *S) {
VisitStmt(S);
- for (DeclStmt::const_decl_iterator D = S->decl_begin(), DEnd = S->decl_end();
- D != DEnd; ++D)
- VisitDecl(*D);
+ for (const auto *D : S->decls())
+ VisitDecl(D);
}
void StmtProfiler::VisitNullStmt(const NullStmt *S) {
@@ -265,14 +264,27 @@
#include "clang/Basic/OpenMPKinds.def"
};
+void OMPClauseProfiler::VisitOMPIfClause(const OMPIfClause *C) {
+ if (C->getCondition())
+ Profiler->VisitStmt(C->getCondition());
+}
+
+void OMPClauseProfiler::VisitOMPNumThreadsClause(const OMPNumThreadsClause *C) {
+ if (C->getNumThreads())
+ Profiler->VisitStmt(C->getNumThreads());
+}
+
+void OMPClauseProfiler::VisitOMPSafelenClause(const OMPSafelenClause *C) {
+ if (C->getSafelen())
+ Profiler->VisitStmt(C->getSafelen());
+}
+
void OMPClauseProfiler::VisitOMPDefaultClause(const OMPDefaultClause *C) { }
template<typename T>
void OMPClauseProfiler::VisitOMPClauseList(T *Node) {
- for (typename T::varlist_const_iterator I = Node->varlist_begin(),
- E = Node->varlist_end();
- I != E; ++I)
- Profiler->VisitStmt(*I);
+ for (auto *I : Node->varlists())
+ Profiler->VisitStmt(I);
}
void OMPClauseProfiler::VisitOMPPrivateClause(const OMPPrivateClause *C) {
@@ -285,10 +297,13 @@
void OMPClauseProfiler::VisitOMPSharedClause(const OMPSharedClause *C) {
VisitOMPClauseList(C);
}
+void OMPClauseProfiler::VisitOMPCopyinClause(const OMPCopyinClause *C) {
+ VisitOMPClauseList(C);
+}
}
void
-StmtProfiler::VisitOMPParallelDirective(const OMPParallelDirective *S) {
+StmtProfiler::VisitOMPExecutableDirective(const OMPExecutableDirective *S) {
VisitStmt(S);
OMPClauseProfiler P(this);
ArrayRef<OMPClause *> Clauses = S->clauses();
@@ -298,6 +313,14 @@
P.Visit(*I);
}
+void StmtProfiler::VisitOMPParallelDirective(const OMPParallelDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPSimdDirective(const OMPSimdDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
void StmtProfiler::VisitExpr(const Expr *S) {
VisitStmt(S);
}
@@ -948,19 +971,6 @@
VisitOverloadExpr(S);
}
-void StmtProfiler::VisitUnaryTypeTraitExpr(const UnaryTypeTraitExpr *S) {
- VisitExpr(S);
- ID.AddInteger(S->getTrait());
- VisitType(S->getQueriedType());
-}
-
-void StmtProfiler::VisitBinaryTypeTraitExpr(const BinaryTypeTraitExpr *S) {
- VisitExpr(S);
- ID.AddInteger(S->getTrait());
- VisitType(S->getLhsType());
- VisitType(S->getRhsType());
-}
-
void StmtProfiler::VisitTypeTraitExpr(const TypeTraitExpr *S) {
VisitExpr(S);
ID.AddInteger(S->getTrait());
diff --git a/lib/AST/TemplateBase.cpp b/lib/AST/TemplateBase.cpp
index 16efb79..52f95bf 100644
--- a/lib/AST/TemplateBase.cpp
+++ b/lib/AST/TemplateBase.cpp
@@ -345,7 +345,7 @@
raw_ostream &Out) const {
switch (getKind()) {
case Null:
- Out << "<no value>";
+ Out << "(no value)";
break;
case Type: {
@@ -362,7 +362,7 @@
// FIXME: distinguish between pointer and reference args?
ND->printQualifiedName(Out);
} else {
- Out << "<anonymous>";
+ Out << "(anonymous)";
}
break;
}
@@ -511,6 +511,8 @@
const ASTTemplateArgumentListInfo *
ASTTemplateArgumentListInfo::Create(ASTContext &C,
const TemplateArgumentListInfo &List) {
+ assert(llvm::alignOf<ASTTemplateArgumentListInfo>() >=
+ llvm::alignOf<TemplateArgumentLoc>());
std::size_t size = ASTTemplateArgumentListInfo::sizeFor(List.size());
void *Mem = C.Allocate(size, llvm::alignOf<ASTTemplateArgumentListInfo>());
ASTTemplateArgumentListInfo *TAI = new (Mem) ASTTemplateArgumentListInfo();
diff --git a/lib/AST/Type.cpp b/lib/AST/Type.cpp
index 7421bae..dc3fcc8 100644
--- a/lib/AST/Type.cpp
+++ b/lib/AST/Type.cpp
@@ -422,6 +422,10 @@
return BPT->getPointeeType();
if (const ReferenceType *RT = getAs<ReferenceType>())
return RT->getPointeeType();
+ if (const MemberPointerType *MPT = getAs<MemberPointerType>())
+ return MPT->getPointeeType();
+ if (const DecayedType *DT = getAs<DecayedType>())
+ return DT->getPointeeType();
return QualType();
}
@@ -585,7 +589,7 @@
return Visit(T->getElementType());
}
AutoType *VisitFunctionType(const FunctionType *T) {
- return Visit(T->getResultType());
+ return Visit(T->getReturnType());
}
AutoType *VisitParenType(const ParenType *T) {
return Visit(T->getInnerType());
@@ -593,6 +597,9 @@
AutoType *VisitAttributedType(const AttributedType *T) {
return Visit(T->getModifiedType());
}
+ AutoType *VisitAdjustedType(const AdjustedType *T) {
+ return Visit(T->getOriginalType());
+ }
};
}
@@ -1516,7 +1523,7 @@
case ULong: return "unsigned long";
case ULongLong: return "unsigned long long";
case UInt128: return "unsigned __int128";
- case Half: return "half";
+ case Half: return Policy.Half ? "half" : "__fp16";
case Float: return "float";
case Double: return "double";
case LongDouble: return "long double";
@@ -1582,41 +1589,37 @@
llvm_unreachable("Invalid calling convention.");
}
-FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> args,
+FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
QualType canonical,
const ExtProtoInfo &epi)
- : FunctionType(FunctionProto, result, epi.TypeQuals,
- canonical,
- result->isDependentType(),
- result->isInstantiationDependentType(),
- result->isVariablyModifiedType(),
- result->containsUnexpandedParameterPack(),
- epi.ExtInfo),
- NumArgs(args.size()), NumExceptions(epi.NumExceptions),
- ExceptionSpecType(epi.ExceptionSpecType),
- HasAnyConsumedArgs(epi.ConsumedArguments != 0),
- Variadic(epi.Variadic), HasTrailingReturn(epi.HasTrailingReturn),
- RefQualifier(epi.RefQualifier)
-{
- assert(NumArgs == args.size() && "function has too many parameters");
+ : FunctionType(FunctionProto, result, epi.TypeQuals, canonical,
+ result->isDependentType(),
+ result->isInstantiationDependentType(),
+ result->isVariablyModifiedType(),
+ result->containsUnexpandedParameterPack(), epi.ExtInfo),
+ NumParams(params.size()), NumExceptions(epi.NumExceptions),
+ ExceptionSpecType(epi.ExceptionSpecType),
+ HasAnyConsumedParams(epi.ConsumedParameters != 0), Variadic(epi.Variadic),
+ HasTrailingReturn(epi.HasTrailingReturn), RefQualifier(epi.RefQualifier) {
+ assert(NumParams == params.size() && "function has too many parameters");
// Fill in the trailing argument array.
QualType *argSlot = reinterpret_cast<QualType*>(this+1);
- for (unsigned i = 0; i != NumArgs; ++i) {
- if (args[i]->isDependentType())
+ for (unsigned i = 0; i != NumParams; ++i) {
+ if (params[i]->isDependentType())
setDependent();
- else if (args[i]->isInstantiationDependentType())
+ else if (params[i]->isInstantiationDependentType())
setInstantiationDependent();
-
- if (args[i]->containsUnexpandedParameterPack())
+
+ if (params[i]->containsUnexpandedParameterPack())
setContainsUnexpandedParameterPack();
- argSlot[i] = args[i];
+ argSlot[i] = params[i];
}
if (getExceptionSpecType() == EST_Dynamic) {
// Fill in the exception array.
- QualType *exnSlot = argSlot + NumArgs;
+ QualType *exnSlot = argSlot + NumParams;
for (unsigned i = 0, e = epi.NumExceptions; i != e; ++i) {
if (epi.Exceptions[i]->isDependentType())
setDependent();
@@ -1630,7 +1633,7 @@
}
} else if (getExceptionSpecType() == EST_ComputedNoexcept) {
// Store the noexcept expression and context.
- Expr **noexSlot = reinterpret_cast<Expr**>(argSlot + NumArgs);
+ Expr **noexSlot = reinterpret_cast<Expr **>(argSlot + NumParams);
*noexSlot = epi.NoexceptExpr;
if (epi.NoexceptExpr) {
@@ -1643,7 +1646,8 @@
} else if (getExceptionSpecType() == EST_Uninstantiated) {
// Store the function decl from which we will resolve our
// exception specification.
- FunctionDecl **slot = reinterpret_cast<FunctionDecl**>(argSlot + NumArgs);
+ FunctionDecl **slot =
+ reinterpret_cast<FunctionDecl **>(argSlot + NumParams);
slot[0] = epi.ExceptionSpecDecl;
slot[1] = epi.ExceptionSpecTemplate;
// This exception specification doesn't make the type dependent, because
@@ -1651,14 +1655,15 @@
} else if (getExceptionSpecType() == EST_Unevaluated) {
// Store the function decl from which we will resolve our
// exception specification.
- FunctionDecl **slot = reinterpret_cast<FunctionDecl**>(argSlot + NumArgs);
+ FunctionDecl **slot =
+ reinterpret_cast<FunctionDecl **>(argSlot + NumParams);
slot[0] = epi.ExceptionSpecDecl;
}
- if (epi.ConsumedArguments) {
- bool *consumedArgs = const_cast<bool*>(getConsumedArgsBuffer());
- for (unsigned i = 0; i != NumArgs; ++i)
- consumedArgs[i] = epi.ConsumedArguments[i];
+ if (epi.ConsumedParameters) {
+ bool *consumedParams = const_cast<bool *>(getConsumedParamsBuffer());
+ for (unsigned i = 0; i != NumParams; ++i)
+ consumedParams[i] = epi.ConsumedParameters[i];
}
}
@@ -1686,16 +1691,41 @@
return value.getBoolValue() ? NR_Nothrow : NR_Throw;
}
+bool FunctionProtoType::isNothrow(const ASTContext &Ctx,
+ bool ResultIfDependent) const {
+ ExceptionSpecificationType EST = getExceptionSpecType();
+ assert(EST != EST_Unevaluated && EST != EST_Uninstantiated);
+ if (EST == EST_DynamicNone || EST == EST_BasicNoexcept)
+ return true;
+
+ if (EST == EST_Dynamic && ResultIfDependent == true) {
+ // A dynamic exception specification is throwing unless every exception
+ // type is an (unexpanded) pack expansion type.
+ for (unsigned I = 0, N = NumExceptions; I != N; ++I)
+ if (!getExceptionType(I)->getAs<PackExpansionType>())
+ return false;
+ return ResultIfDependent;
+ }
+
+ if (EST != EST_ComputedNoexcept)
+ return false;
+
+ NoexceptResult NR = getNoexceptSpec(Ctx);
+ if (NR == NR_Dependent)
+ return ResultIfDependent;
+ return NR == NR_Nothrow;
+}
+
bool FunctionProtoType::isTemplateVariadic() const {
- for (unsigned ArgIdx = getNumArgs(); ArgIdx; --ArgIdx)
- if (isa<PackExpansionType>(getArgType(ArgIdx - 1)))
+ for (unsigned ArgIdx = getNumParams(); ArgIdx; --ArgIdx)
+ if (isa<PackExpansionType>(getParamType(ArgIdx - 1)))
return true;
return false;
}
void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
- const QualType *ArgTys, unsigned NumArgs,
+ const QualType *ArgTys, unsigned NumParams,
const ExtProtoInfo &epi,
const ASTContext &Context) {
@@ -1717,7 +1747,7 @@
// whether the following bool is the EH spec or part of the arguments.
ID.AddPointer(Result.getAsOpaquePtr());
- for (unsigned i = 0; i != NumArgs; ++i)
+ for (unsigned i = 0; i != NumParams; ++i)
ID.AddPointer(ArgTys[i].getAsOpaquePtr());
// This method is relatively performance sensitive, so as a performance
// shortcut, use one AddInteger call instead of four for the next four
@@ -1740,9 +1770,9 @@
epi.ExceptionSpecType == EST_Unevaluated) {
ID.AddPointer(epi.ExceptionSpecDecl->getCanonicalDecl());
}
- if (epi.ConsumedArguments) {
- for (unsigned i = 0; i != NumArgs; ++i)
- ID.AddBoolean(epi.ConsumedArguments[i]);
+ if (epi.ConsumedParameters) {
+ for (unsigned i = 0; i != NumParams; ++i)
+ ID.AddBoolean(epi.ConsumedParameters[i]);
}
epi.ExtInfo.Profile(ID);
ID.AddBoolean(epi.HasTrailingReturn);
@@ -1750,7 +1780,7 @@
void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID,
const ASTContext &Ctx) {
- Profile(ID, getResultType(), arg_type_begin(), NumArgs, getExtProtoInfo(),
+ Profile(ID, getReturnType(), param_type_begin(), NumParams, getExtProtoInfo(),
Ctx);
}
@@ -1819,11 +1849,9 @@
decl(const_cast<TagDecl*>(D)) {}
static TagDecl *getInterestingTagDecl(TagDecl *decl) {
- for (TagDecl::redecl_iterator I = decl->redecls_begin(),
- E = decl->redecls_end();
- I != E; ++I) {
+ for (auto I : decl->redecls()) {
if (I->isCompleteDefinition() || I->isBeingDefined())
- return *I;
+ return I;
}
// If there's no definition (not even in progress), return what we have.
return decl;
@@ -2200,13 +2228,12 @@
case Type::ExtVector:
return Cache::get(cast<VectorType>(T)->getElementType());
case Type::FunctionNoProto:
- return Cache::get(cast<FunctionType>(T)->getResultType());
+ return Cache::get(cast<FunctionType>(T)->getReturnType());
case Type::FunctionProto: {
const FunctionProtoType *FPT = cast<FunctionProtoType>(T);
- CachedProperties result = Cache::get(FPT->getResultType());
- for (FunctionProtoType::arg_type_iterator ai = FPT->arg_type_begin(),
- ae = FPT->arg_type_end(); ai != ae; ++ai)
- result = merge(result, Cache::get(*ai));
+ CachedProperties result = Cache::get(FPT->getReturnType());
+ for (const auto &ai : FPT->param_types())
+ result = merge(result, Cache::get(ai));
return result;
}
case Type::ObjCInterface: {
@@ -2285,13 +2312,12 @@
case Type::ExtVector:
return computeLinkageInfo(cast<VectorType>(T)->getElementType());
case Type::FunctionNoProto:
- return computeLinkageInfo(cast<FunctionType>(T)->getResultType());
+ return computeLinkageInfo(cast<FunctionType>(T)->getReturnType());
case Type::FunctionProto: {
const FunctionProtoType *FPT = cast<FunctionProtoType>(T);
- LinkageInfo LV = computeLinkageInfo(FPT->getResultType());
- for (FunctionProtoType::arg_type_iterator ai = FPT->arg_type_begin(),
- ae = FPT->arg_type_end(); ai != ae; ++ai)
- LV.merge(computeLinkageInfo(*ai));
+ LinkageInfo LV = computeLinkageInfo(FPT->getReturnType());
+ for (const auto &ai : FPT->param_types())
+ LV.merge(computeLinkageInfo(ai));
return LV;
}
case Type::ObjCInterface:
@@ -2441,3 +2467,7 @@
return DK_none;
}
+
+CXXRecordDecl *MemberPointerType::getMostRecentCXXRecordDecl() const {
+ return getClass()->getAsCXXRecordDecl()->getMostRecentDecl();
+}
diff --git a/lib/AST/TypePrinter.cpp b/lib/AST/TypePrinter.cpp
index 571e3db..89ec8d6 100644
--- a/lib/AST/TypePrinter.cpp
+++ b/lib/AST/TypePrinter.cpp
@@ -204,6 +204,7 @@
NeedARCStrongQualifier = true;
// Fall through
+ case Type::Adjusted:
case Type::Decayed:
case Type::Pointer:
case Type::BlockPointer:
@@ -471,12 +472,21 @@
printAfter(T->getElementType(), OS);
}
+void TypePrinter::printAdjustedBefore(const AdjustedType *T, raw_ostream &OS) {
+ // Print the adjusted representation, otherwise the adjustment will be
+ // invisible.
+ printBefore(T->getAdjustedType(), OS);
+}
+void TypePrinter::printAdjustedAfter(const AdjustedType *T, raw_ostream &OS) {
+ printAfter(T->getAdjustedType(), OS);
+}
+
void TypePrinter::printDecayedBefore(const DecayedType *T, raw_ostream &OS) {
// Print as though it's a pointer.
- printBefore(T->getDecayedType(), OS);
+ printAdjustedBefore(T, OS);
}
void TypePrinter::printDecayedAfter(const DecayedType *T, raw_ostream &OS) {
- printAfter(T->getDecayedType(), OS);
+ printAdjustedAfter(T, OS);
}
void TypePrinter::printDependentSizedArrayBefore(
@@ -598,7 +608,7 @@
} else {
// If needed for precedence reasons, wrap the inner part in grouping parens.
SaveAndRestore<bool> PrevPHIsEmpty(HasEmptyPlaceHolder, false);
- printBefore(T->getResultType(), OS);
+ printBefore(T->getReturnType(), OS);
if (!PrevPHIsEmpty.get())
OS << '(';
}
@@ -614,17 +624,17 @@
OS << '(';
{
ParamPolicyRAII ParamPolicy(Policy);
- for (unsigned i = 0, e = T->getNumArgs(); i != e; ++i) {
+ for (unsigned i = 0, e = T->getNumParams(); i != e; ++i) {
if (i) OS << ", ";
- print(T->getArgType(i), OS, StringRef());
+ print(T->getParamType(i), OS, StringRef());
}
}
if (T->isVariadic()) {
- if (T->getNumArgs())
+ if (T->getNumParams())
OS << ", ";
OS << "...";
- } else if (T->getNumArgs() == 0 && !Policy.LangOpts.CPlusPlus) {
+ } else if (T->getNumParams() == 0 && !Policy.LangOpts.CPlusPlus) {
// Do not emit int() if we have a proto, emit 'int(void)'.
OS << "void";
}
@@ -703,17 +713,17 @@
T->printExceptionSpecification(OS, Policy);
if (T->hasTrailingReturn()) {
- OS << " -> ";
- print(T->getResultType(), OS, StringRef());
+ OS << " -> ";
+ print(T->getReturnType(), OS, StringRef());
} else
- printAfter(T->getResultType(), OS);
+ printAfter(T->getReturnType(), OS);
}
void TypePrinter::printFunctionNoProtoBefore(const FunctionNoProtoType *T,
raw_ostream &OS) {
// If needed for precedence reasons, wrap the inner part in grouping parens.
SaveAndRestore<bool> PrevPHIsEmpty(HasEmptyPlaceHolder, false);
- printBefore(T->getResultType(), OS);
+ printBefore(T->getReturnType(), OS);
if (!PrevPHIsEmpty.get())
OS << '(';
}
@@ -727,7 +737,7 @@
OS << "()";
if (T->getNoReturnAttr())
OS << " __attribute__((noreturn))";
- printAfter(T->getResultType(), OS);
+ printAfter(T->getReturnType(), OS);
}
void TypePrinter::printTypeSpec(const NamedDecl *D, raw_ostream &OS) {
@@ -838,7 +848,7 @@
if (NS->getIdentifier())
OS << NS->getName() << "::";
else
- OS << "<anonymous>::";
+ OS << "(anonymous namespace)::";
} else if (ClassTemplateSpecializationDecl *Spec
= dyn_cast<ClassTemplateSpecializationDecl>(DC)) {
IncludeStrongLifetimeRAII Strong(Policy);
@@ -890,13 +900,13 @@
OS << Typedef->getIdentifier()->getName();
} else {
// Make an unambiguous representation for anonymous types, e.g.
- // <anonymous enum at /usr/include/string.h:120:9>
+ // (anonymous enum at /usr/include/string.h:120:9)
if (isa<CXXRecordDecl>(D) && cast<CXXRecordDecl>(D)->isLambda()) {
- OS << "<lambda";
+ OS << "(lambda";
HasKindDecoration = true;
} else {
- OS << "<anonymous";
+ OS << "(anonymous";
}
if (Policy.AnonymousTagLocations) {
@@ -915,7 +925,7 @@
}
}
- OS << '>';
+ OS << ')';
}
// If this is a class template specialization, print the template
@@ -1249,13 +1259,12 @@
print(T->getBaseType(), OS, StringRef());
OS << '<';
bool isFirst = true;
- for (ObjCObjectType::qual_iterator
- I = T->qual_begin(), E = T->qual_end(); I != E; ++I) {
+ for (const auto *I : T->quals()) {
if (isFirst)
isFirst = false;
else
OS << ',';
- OS << (*I)->getName();
+ OS << I->getName();
}
OS << '>';
spaceBeforePlaceHolder(OS);
@@ -1271,12 +1280,12 @@
T->getPointeeType().getLocalQualifiers().print(OS, Policy,
/*appendSpaceIfNonEmpty=*/true);
+ assert(!T->isObjCSelType());
+
if (T->isObjCIdType() || T->isObjCQualifiedIdType())
OS << "id";
else if (T->isObjCClassType() || T->isObjCQualifiedClassType())
OS << "Class";
- else if (T->isObjCSelType())
- OS << "SEL";
else
OS << T->getInterfaceDecl()->getName();
@@ -1292,7 +1301,8 @@
OS << '>';
}
- if (!T->isObjCIdType() && !T->isObjCQualifiedIdType()) {
+ if (!T->isObjCIdType() && !T->isObjCQualifiedIdType() &&
+ !T->isObjCClassType() && !T->isObjCQualifiedClassType()) {
OS << " *"; // Don't forget the implicit pointer.
} else {
spaceBeforePlaceHolder(OS);
@@ -1413,13 +1423,10 @@
print(llvm::errs(), PrintingPolicy(LO), "identifier");
llvm::errs() << '\n';
}
-void QualType::dump() const {
- dump(0);
-}
-void Type::dump() const {
- QualType(this, 0).dump();
-}
+LLVM_DUMP_METHOD void QualType::dump() const { dump(0); }
+
+LLVM_DUMP_METHOD void Type::dump() const { QualType(this, 0).dump(); }
std::string Qualifiers::getAsString() const {
LangOptions LO;
diff --git a/lib/AST/VTTBuilder.cpp b/lib/AST/VTTBuilder.cpp
index 5ca4e86..c213d1c 100644
--- a/lib/AST/VTTBuilder.cpp
+++ b/lib/AST/VTTBuilder.cpp
@@ -56,15 +56,13 @@
void VTTBuilder::LayoutSecondaryVTTs(BaseSubobject Base) {
const CXXRecordDecl *RD = Base.getBase();
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
-
+ for (const auto &I : RD->bases()) {
// Don't layout virtual bases.
- if (I->isVirtual())
+ if (I.isVirtual())
continue;
const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
CharUnits BaseOffset = Base.getBaseOffset() +
@@ -88,10 +86,9 @@
if (!RD->getNumVBases() && !BaseIsMorallyVirtual)
return;
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
+ for (const auto &I : RD->bases()) {
const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
// Itanium C++ ABI 2.6.2:
// Secondary virtual pointers are present for all bases with either
@@ -106,7 +103,7 @@
bool BaseDeclIsMorallyVirtual = BaseIsMorallyVirtual;
bool BaseDeclIsNonVirtualPrimaryBase = false;
CharUnits BaseOffset;
- if (I->isVirtual()) {
+ if (I.isVirtual()) {
// Ignore virtual bases that we've already visited.
if (!VBases.insert(BaseDecl))
continue;
@@ -153,13 +150,12 @@
void VTTBuilder::LayoutVirtualVTTs(const CXXRecordDecl *RD,
VisitedVirtualBasesSetTy &VBases) {
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
+ for (const auto &I : RD->bases()) {
const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
// Check if this is a virtual base.
- if (I->isVirtual()) {
+ if (I.isVirtual()) {
// Check if we've seen this base before.
if (!VBases.insert(BaseDecl))
continue;
diff --git a/lib/AST/VTableBuilder.cpp b/lib/AST/VTableBuilder.cpp
index 5f7ae0f..c6b38d3 100644
--- a/lib/AST/VTableBuilder.cpp
+++ b/lib/AST/VTableBuilder.cpp
@@ -16,6 +16,7 @@
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
@@ -268,11 +269,11 @@
const FunctionType *DerivedFT = DerivedMD->getType()->getAs<FunctionType>();
// Canonicalize the return types.
- CanQualType CanDerivedReturnType =
- Context.getCanonicalType(DerivedFT->getResultType());
- CanQualType CanBaseReturnType =
- Context.getCanonicalType(BaseFT->getResultType());
-
+ CanQualType CanDerivedReturnType =
+ Context.getCanonicalType(DerivedFT->getReturnType());
+ CanQualType CanBaseReturnType =
+ Context.getCanonicalType(BaseFT->getReturnType());
+
assert(CanDerivedReturnType->getTypeClass() ==
CanBaseReturnType->getTypeClass() &&
"Types must have same type class!");
@@ -337,13 +338,12 @@
OffsetInLayoutClass;
// Traverse our bases.
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- const CXXRecordDecl *BaseDecl = I->getType()->getAsCXXRecordDecl();
+ for (const auto &B : RD->bases()) {
+ const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
CharUnits BaseOffset;
CharUnits BaseOffsetInLayoutClass;
- if (I->isVirtual()) {
+ if (B.isVirtual()) {
// Check if we've visited this virtual base before.
if (SubobjectOffsets.count(std::make_pair(BaseDecl, 0)))
continue;
@@ -363,7 +363,7 @@
}
ComputeBaseOffsets(BaseSubobject(BaseDecl, BaseOffset),
- I->isVirtual(), BaseOffsetInLayoutClass,
+ B.isVirtual(), BaseOffsetInLayoutClass,
SubobjectOffsets, SubobjectLayoutClassOffsets,
SubobjectCounts);
}
@@ -374,16 +374,15 @@
const CXXRecordDecl *RD = Base.getBase();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- const CXXRecordDecl *BaseDecl = I->getType()->getAsCXXRecordDecl();
+ for (const auto &B : RD->bases()) {
+ const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
// Ignore bases that don't have any virtual member functions.
if (!BaseDecl->isPolymorphic())
continue;
CharUnits BaseOffset;
- if (I->isVirtual()) {
+ if (B.isVirtual()) {
if (!VisitedVirtualBases.insert(BaseDecl)) {
// We've visited this base before.
continue;
@@ -397,21 +396,22 @@
dump(Out, BaseSubobject(BaseDecl, BaseOffset), VisitedVirtualBases);
}
- Out << "Final overriders for (" << RD->getQualifiedNameAsString() << ", ";
+ Out << "Final overriders for (";
+ RD->printQualifiedName(Out);
+ Out << ", ";
Out << Base.getBaseOffset().getQuantity() << ")\n";
// Now dump the overriders for this base subobject.
- for (CXXRecordDecl::method_iterator I = RD->method_begin(),
- E = RD->method_end(); I != E; ++I) {
- const CXXMethodDecl *MD = *I;
-
+ for (const auto *MD : RD->methods()) {
if (!MD->isVirtual())
continue;
OverriderInfo Overrider = getOverrider(MD, Base.getBaseOffset());
- Out << " " << MD->getQualifiedNameAsString() << " - (";
- Out << Overrider.Method->getQualifiedNameAsString();
+ Out << " ";
+ MD->printQualifiedName(Out);
+ Out << " - (";
+ Overrider.Method->printQualifiedName(Out);
Out << ", " << Overrider.Offset.getQuantity() << ')';
BaseOffset Offset;
@@ -420,8 +420,10 @@
if (!Offset.isEmpty()) {
Out << " [ret-adj: ";
- if (Offset.VirtualBase)
- Out << Offset.VirtualBase->getQualifiedNameAsString() << " vbase, ";
+ if (Offset.VirtualBase) {
+ Offset.VirtualBase->printQualifiedName(Out);
+ Out << " vbase, ";
+ }
Out << Offset.NonVirtualOffset.getQuantity() << " nv]";
}
@@ -472,10 +474,10 @@
// list here because there isn't necessarily an inheritance
// relationship between the two methods.
if (LT->getTypeQuals() != RT->getTypeQuals() ||
- LT->getNumArgs() != RT->getNumArgs())
+ LT->getNumParams() != RT->getNumParams())
return false;
- for (unsigned I = 0, E = LT->getNumArgs(); I != E; ++I)
- if (LT->getArgType(I) != RT->getArgType(I))
+ for (unsigned I = 0, E = LT->getNumParams(); I != E; ++I)
+ if (LT->getParamType(I) != RT->getParamType(I))
return false;
return true;
}
@@ -684,10 +686,7 @@
}
// Add the vcall offsets.
- for (CXXRecordDecl::method_iterator I = RD->method_begin(),
- E = RD->method_end(); I != E; ++I) {
- const CXXMethodDecl *MD = *I;
-
+ for (const auto *MD : RD->methods()) {
if (!MD->isVirtual())
continue;
@@ -715,13 +714,11 @@
}
// And iterate over all non-virtual bases (ignoring the primary base).
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
-
- if (I->isVirtual())
+ for (const auto &B : RD->bases()) {
+ if (B.isVirtual())
continue;
- const CXXRecordDecl *BaseDecl = I->getType()->getAsCXXRecordDecl();
+ const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
if (BaseDecl == PrimaryBase)
continue;
@@ -741,12 +738,11 @@
Context.getASTRecordLayout(LayoutClass);
// Add vbase offsets.
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- const CXXRecordDecl *BaseDecl = I->getType()->getAsCXXRecordDecl();
+ for (const auto &B : RD->bases()) {
+ const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
// Check if this is a virtual base that we haven't visited before.
- if (I->isVirtual() && VisitedVirtualBases.insert(BaseDecl)) {
+ if (B.isVirtual() && VisitedVirtualBases.insert(BaseDecl)) {
CharUnits Offset =
LayoutClassLayout.getVBaseClassOffset(BaseDecl) - OffsetInLayoutClass;
@@ -1509,10 +1505,7 @@
NewVirtualFunctionsTy NewVirtualFunctions;
// Now go through all virtual member functions and add them.
- for (CXXRecordDecl::method_iterator I = RD->method_begin(),
- E = RD->method_end(); I != E; ++I) {
- const CXXMethodDecl *MD = *I;
-
+ for (const auto *MD : RD->methods()) {
if (!MD->isVirtual())
continue;
@@ -1759,13 +1752,12 @@
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
+ for (const auto &B : RD->bases()) {
// Ignore virtual bases, we'll emit them later.
- if (I->isVirtual())
+ if (B.isVirtual())
continue;
- const CXXRecordDecl *BaseDecl = I->getType()->getAsCXXRecordDecl();
+ const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
// Ignore bases that don't have a vtable.
if (!BaseDecl->isDynamicClass())
@@ -1838,13 +1830,12 @@
}
// Traverse bases, looking for more primary virtual bases.
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- const CXXRecordDecl *BaseDecl = I->getType()->getAsCXXRecordDecl();
+ for (const auto &B : RD->bases()) {
+ const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
CharUnits BaseOffsetInLayoutClass;
- if (I->isVirtual()) {
+ if (B.isVirtual()) {
if (!VBases.insert(BaseDecl))
continue;
@@ -1868,13 +1859,12 @@
// Then come the virtual base virtual tables, also in inheritance graph
// order, and again excluding primary bases (which share virtual tables with
// the classes for which they are primary).
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- const CXXRecordDecl *BaseDecl = I->getType()->getAsCXXRecordDecl();
+ for (const auto &B : RD->bases()) {
+ const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
// Check if this base needs a vtable. (If it's virtual, not a primary base
// of some other class, and we haven't visited it before).
- if (I->isVirtual() && BaseDecl->isDynamicClass() &&
+ if (B.isVirtual() && BaseDecl->isDynamicClass() &&
!PrimaryVirtualBases.count(BaseDecl) && VBases.insert(BaseDecl)) {
const ASTRecordLayout &MostDerivedClassLayout =
Context.getASTRecordLayout(MostDerivedClass);
@@ -1900,21 +1890,6 @@
}
}
-struct ItaniumThunkInfoComparator {
- bool operator() (const ThunkInfo &LHS, const ThunkInfo &RHS) {
- assert(LHS.Method == 0);
- assert(RHS.Method == 0);
-
- if (LHS.This != RHS.This)
- return LHS.This < RHS.This;
-
- if (LHS.Return != RHS.Return)
- return LHS.Return < RHS.Return;
-
- return false;
- }
-};
-
/// dumpLayout - Dump the vtable layout.
void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
// FIXME: write more tests that actually use the dumpLayout output to prevent
@@ -1922,12 +1897,13 @@
if (isBuildingConstructorVTable()) {
Out << "Construction vtable for ('";
- Out << MostDerivedClass->getQualifiedNameAsString() << "', ";
+ MostDerivedClass->printQualifiedName(Out);
+ Out << "', ";
Out << MostDerivedClassOffset.getQuantity() << ") in '";
- Out << LayoutClass->getQualifiedNameAsString();
+ LayoutClass->printQualifiedName(Out);
} else {
Out << "Vtable for '";
- Out << MostDerivedClass->getQualifiedNameAsString();
+ MostDerivedClass->printQualifiedName(Out);
}
Out << "' (" << Components.size() << " entries).\n";
@@ -1973,7 +1949,8 @@
break;
case VTableComponent::CK_RTTI:
- Out << Component.getRTTIDecl()->getQualifiedNameAsString() << " RTTI";
+ Component.getRTTIDecl()->printQualifiedName(Out);
+ Out << " RTTI";
break;
case VTableComponent::CK_FunctionPointer: {
@@ -2028,7 +2005,7 @@
const CXXDestructorDecl *DD = Component.getDestructorDecl();
- Out << DD->getQualifiedNameAsString();
+ DD->printQualifiedName(Out);
if (IsComplete)
Out << "() [complete]";
else
@@ -2078,7 +2055,8 @@
const BaseSubobject &Base =
AddressPointsByIndex.find(NextIndex)->second;
- Out << " -- (" << Base.getBase()->getQualifiedNameAsString();
+ Out << " -- (";
+ Base.getBase()->printQualifiedName(Out);
Out << ", " << Base.getBaseOffset().getQuantity();
Out << ") vtable address --\n";
} else {
@@ -2124,7 +2102,8 @@
}
Out << "Virtual base offset offsets for '";
- Out << MostDerivedClass->getQualifiedNameAsString() << "' (";
+ MostDerivedClass->printQualifiedName(Out);
+ Out << "' (";
Out << ClassNamesAndOffsets.size();
Out << (ClassNamesAndOffsets.size() == 1 ? " entry" : " entries") << ").\n";
@@ -2158,7 +2137,10 @@
ThunkInfoVectorTy ThunksVector = Thunks[MD];
std::sort(ThunksVector.begin(), ThunksVector.end(),
- ItaniumThunkInfoComparator());
+ [](const ThunkInfo &LHS, const ThunkInfo &RHS) {
+ assert(LHS.Method == 0 && RHS.Method == 0);
+ return std::tie(LHS.This, LHS.Return) < std::tie(RHS.This, RHS.Return);
+ });
Out << "Thunks for '" << MethodName << "' (" << ThunksVector.size();
Out << (ThunksVector.size() == 1 ? " entry" : " entries") << ").\n";
@@ -2203,10 +2185,7 @@
// Store them in a map keyed by the index so we'll get a sorted table.
std::map<uint64_t, std::string> IndicesMap;
- for (CXXRecordDecl::method_iterator i = MostDerivedClass->method_begin(),
- e = MostDerivedClass->method_end(); i != e; ++i) {
- const CXXMethodDecl *MD = *i;
-
+ for (const auto *MD : MostDerivedClass->methods()) {
// We only want virtual member functions.
if (!MD->isVirtual())
continue;
@@ -2230,7 +2209,7 @@
// Print the vtable indices for all the member functions.
if (!IndicesMap.empty()) {
Out << "VTable indices for '";
- Out << MostDerivedClass->getQualifiedNameAsString();
+ MostDerivedClass->printQualifiedName(Out);
Out << "' (" << IndicesMap.size() << " entries).\n";
for (std::map<uint64_t, std::string>::const_iterator I = IndicesMap.begin(),
@@ -2245,17 +2224,6 @@
Out << '\n';
}
-
-struct VTableThunksComparator {
- bool operator()(const VTableLayout::VTableThunkTy &LHS,
- const VTableLayout::VTableThunkTy &RHS) {
- if (LHS.first == RHS.first) {
- assert(LHS.second == RHS.second &&
- "Different thunks should have unique indices!");
- }
- return LHS.first < RHS.first;
- }
-};
}
VTableLayout::VTableLayout(uint64_t NumVTableComponents,
@@ -2276,14 +2244,18 @@
this->VTableThunks.get());
std::sort(this->VTableThunks.get(),
this->VTableThunks.get() + NumVTableThunks,
- VTableThunksComparator());
+ [](const VTableLayout::VTableThunkTy &LHS,
+ const VTableLayout::VTableThunkTy &RHS) {
+ assert((LHS.first != RHS.first || LHS.second == RHS.second) &&
+ "Different thunks should have unique indices!");
+ return LHS.first < RHS.first;
+ });
}
VTableLayout::~VTableLayout() { }
ItaniumVTableContext::ItaniumVTableContext(ASTContext &Context)
- : IsMicrosoftABI(Context.getTargetInfo().getCXXABI().isMicrosoft()) {
-}
+ : VTableContextBase(/*MS=*/false) {}
ItaniumVTableContext::~ItaniumVTableContext() {
llvm::DeleteContainerSeconds(VTableLayouts);
@@ -2348,8 +2320,6 @@
void
ItaniumVTableContext::computeVTableRelatedInformation(const CXXRecordDecl *RD) {
- assert(!IsMicrosoftABI && "Shouldn't be called in this ABI!");
-
const VTableLayout *&Entry = VTableLayouts[RD];
// Check if we've computed this information before.
@@ -2444,6 +2414,9 @@
typedef llvm::DenseMap<GlobalDecl, MethodVFTableLocation>
MethodVFTableLocationsTy;
+ typedef llvm::iterator_range<MethodVFTableLocationsTy::const_iterator>
+ method_locations_range;
+
private:
/// VTables - Global vtable information.
MicrosoftVTableContext &VTables;
@@ -2457,7 +2430,7 @@
const ASTRecordLayout &MostDerivedClassLayout;
- VFPtrInfo WhichVFPtr;
+ const VPtrInfo &WhichVFPtr;
/// FinalOverriders - The final overriders of the most derived class.
const FinalOverriders Overriders;
@@ -2521,11 +2494,8 @@
}
/// ComputeThisOffset - Returns the 'this' argument offset for the given
- /// method in the given subobject, relative to the beginning of the
- /// MostDerivedClass.
- CharUnits ComputeThisOffset(const CXXMethodDecl *MD,
- BaseSubobject Base,
- FinalOverriders::OverriderInfo Overrider);
+ /// method, relative to the beginning of the MostDerivedClass.
+ CharUnits ComputeThisOffset(FinalOverriders::OverriderInfo Overrider);
void CalculateVtordispAdjustment(FinalOverriders::OverriderInfo Overrider,
CharUnits ThisOffset, ThisAdjustment &TA);
@@ -2533,17 +2503,21 @@
/// AddMethod - Add a single virtual member function to the vftable
/// components vector.
void AddMethod(const CXXMethodDecl *MD, ThunkInfo TI) {
+ if (!TI.isEmpty()) {
+ VTableThunks[Components.size()] = TI;
+ AddThunk(MD, TI);
+ }
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
assert(TI.Return.isEmpty() &&
"Destructor can't have return adjustment!");
Components.push_back(VTableComponent::MakeDeletingDtor(DD));
} else {
- if (!TI.isEmpty())
- VTableThunks[Components.size()] = TI;
Components.push_back(VTableComponent::MakeFunction(MD));
}
}
+ bool NeedsReturnAdjustingThunk(const CXXMethodDecl *MD);
+
/// AddMethods - Add the methods of this base subobject and the relevant
/// subbases to the vftable we're currently laying out.
void AddMethods(BaseSubobject Base, unsigned BaseDepth,
@@ -2557,6 +2531,7 @@
BasesSetVectorTy VisitedBases;
AddMethods(BaseSubobject(MostDerivedClass, CharUnits::Zero()), 0, 0,
VisitedBases);
+ assert(Components.size() && "vftable can't be empty");
assert(MethodVFTableLocations.empty());
for (MethodInfoMapTy::const_iterator I = MethodInfoMap.begin(),
@@ -2567,8 +2542,8 @@
// and the entries shadowed by return adjusting thunks.
if (MD->getParent() != MostDerivedClass || MI.Shadowed)
continue;
- MethodVFTableLocation Loc(MI.VBTableIndex, WhichVFPtr.LastVBase,
- WhichVFPtr.VFPtrOffset, MI.VFTableIndex);
+ MethodVFTableLocation Loc(MI.VBTableIndex, WhichVFPtr.getVBaseWithVPtr(),
+ WhichVFPtr.NonVirtualOffset, MI.VFTableIndex);
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
MethodVFTableLocations[GlobalDecl(DD, Dtor_Deleting)] = Loc;
} else {
@@ -2586,12 +2561,12 @@
public:
VFTableBuilder(MicrosoftVTableContext &VTables,
- const CXXRecordDecl *MostDerivedClass, VFPtrInfo Which)
+ const CXXRecordDecl *MostDerivedClass, const VPtrInfo *Which)
: VTables(VTables),
Context(MostDerivedClass->getASTContext()),
MostDerivedClass(MostDerivedClass),
MostDerivedClassLayout(Context.getASTRecordLayout(MostDerivedClass)),
- WhichVFPtr(Which),
+ WhichVFPtr(*Which),
Overriders(MostDerivedClass, CharUnits(), MostDerivedClass) {
LayoutVFTable();
@@ -2605,12 +2580,9 @@
ThunksMapTy::const_iterator thunks_end() const { return Thunks.end(); }
- MethodVFTableLocationsTy::const_iterator vtable_indices_begin() const {
- return MethodVFTableLocations.begin();
- }
-
- MethodVFTableLocationsTy::const_iterator vtable_indices_end() const {
- return MethodVFTableLocations.end();
+ method_locations_range vtable_locations() const {
+ return method_locations_range(MethodVFTableLocations.begin(),
+ MethodVFTableLocations.end());
}
uint64_t getNumVTableComponents() const { return Components.size(); }
@@ -2634,6 +2606,8 @@
void dumpLayout(raw_ostream &);
};
+} // end namespace
+
/// InitialOverriddenDefinitionCollector - Finds the set of least derived bases
/// that define the given method.
struct InitialOverriddenDefinitionCollector {
@@ -2655,14 +2629,18 @@
}
CharUnits
-VFTableBuilder::ComputeThisOffset(const CXXMethodDecl *MD,
- BaseSubobject Base,
- FinalOverriders::OverriderInfo Overrider) {
+VFTableBuilder::ComputeThisOffset(FinalOverriders::OverriderInfo Overrider) {
InitialOverriddenDefinitionCollector Collector;
- visitAllOverriddenMethods(MD, Collector);
+ visitAllOverriddenMethods(Overrider.Method, Collector);
+
+ // If there are no overrides then 'this' is located
+ // in the base that defines the method.
+ if (Collector.Bases.size() == 0)
+ return Overrider.Offset;
CXXBasePaths Paths;
- Base.getBase()->lookupInBases(BaseInSet, &Collector.Bases, Paths);
+ Overrider.Method->getParent()->lookupInBases(BaseInSet, &Collector.Bases,
+ Paths);
// This will hold the smallest this offset among overridees of MD.
// This implies that an offset of a non-virtual base will dominate an offset
@@ -2674,7 +2652,7 @@
for (CXXBasePaths::paths_iterator I = Paths.begin(), E = Paths.end();
I != E; ++I) {
const CXXBasePath &Path = (*I);
- CharUnits ThisOffset = Base.getBaseOffset();
+ CharUnits ThisOffset = Overrider.Offset;
CharUnits LastVBaseOffset;
// For each path from the overrider to the parents of the overridden methods,
@@ -2705,16 +2683,16 @@
}
}
- if (isa<CXXDestructorDecl>(MD)) {
+ if (isa<CXXDestructorDecl>(Overrider.Method)) {
if (LastVBaseOffset.isZero()) {
// If a "Base" class has at least one non-virtual base with a virtual
// destructor, the "Base" virtual destructor will take the address
// of the "Base" subobject as the "this" argument.
- return Base.getBaseOffset();
+ ThisOffset = Overrider.Offset;
} else {
// A virtual destructor of a virtual base takes the address of the
// virtual base subobject as the "this" argument.
- return LastVBaseOffset;
+ ThisOffset = LastVBaseOffset;
}
}
@@ -2734,7 +2712,7 @@
const ASTRecordLayout::VBaseOffsetsMapTy &VBaseMap =
MostDerivedClassLayout.getVBaseOffsetsMap();
const ASTRecordLayout::VBaseOffsetsMapTy::const_iterator &VBaseMapEntry =
- VBaseMap.find(WhichVFPtr.LastVBase);
+ VBaseMap.find(WhichVFPtr.getVBaseWithVPtr());
assert(VBaseMapEntry != VBaseMap.end());
// Check if we need a vtordisp adjustment at all.
@@ -2744,7 +2722,7 @@
CharUnits VFPtrVBaseOffset = VBaseMapEntry->second.VBaseOffset;
// The implicit vtordisp field is located right before the vbase.
TA.Virtual.Microsoft.VtordispOffset =
- (VFPtrVBaseOffset - WhichVFPtr.VFPtrFullOffset).getQuantity() - 4;
+ (VFPtrVBaseOffset - WhichVFPtr.FullOffsetInMDC).getQuantity() - 4;
// If the final overrider is defined in either:
// - the most derived class or its non-virtual base or
@@ -2756,13 +2734,13 @@
const CXXRecordDecl *OverriderVBase =
ComputeBaseOffset(Context, OverriderRD, MostDerivedClass).VirtualBase;
- if (!OverriderVBase || OverriderVBase == WhichVFPtr.LastVBase)
+ if (!OverriderVBase || OverriderVBase == WhichVFPtr.getVBaseWithVPtr())
return;
// Otherwise, we need to do use the dynamic offset of the final overrider
// in order to get "this" adjustment right.
TA.Virtual.Microsoft.VBPtrOffset =
- (VFPtrVBaseOffset + WhichVFPtr.VFPtrOffset -
+ (VFPtrVBaseOffset + WhichVFPtr.NonVirtualOffset -
MostDerivedClassLayout.getVBPtrOffset()).getQuantity();
TA.Virtual.Microsoft.VBOffsetOffset =
Context.getTypeSizeInChars(Context.IntTy).getQuantity() *
@@ -2777,25 +2755,20 @@
// Put the virtual methods into VirtualMethods in the proper order:
// 1) Group overloads by declaration name. New groups are added to the
// vftable in the order of their first declarations in this class
- // (including overrides).
+ // (including overrides and non-virtual methods).
// 2) In each group, new overloads appear in the reverse order of declaration.
typedef SmallVector<const CXXMethodDecl *, 1> MethodGroup;
SmallVector<MethodGroup, 10> Groups;
typedef llvm::DenseMap<DeclarationName, unsigned> VisitedGroupIndicesTy;
VisitedGroupIndicesTy VisitedGroupIndices;
- for (CXXRecordDecl::method_iterator I = RD->method_begin(),
- E = RD->method_end(); I != E; ++I) {
- const CXXMethodDecl *MD = *I;
- if (!MD->isVirtual())
- continue;
-
+ for (const auto *MD : RD->methods()) {
VisitedGroupIndicesTy::iterator J;
bool Inserted;
- llvm::tie(J, Inserted) = VisitedGroupIndices.insert(
+ std::tie(J, Inserted) = VisitedGroupIndices.insert(
std::make_pair(MD->getDeclName(), Groups.size()));
if (Inserted)
- Groups.push_back(MethodGroup(1, MD));
- else
+ Groups.push_back(MethodGroup());
+ if (MD->isVirtual())
Groups[J->second].push_back(MD);
}
@@ -2803,6 +2776,32 @@
VirtualMethods.append(Groups[I].rbegin(), Groups[I].rend());
}
+/// We need a return adjusting thunk for this method if its return type is
+/// not trivially convertible to the return type of any of its overridden
+/// methods.
+bool VFTableBuilder::NeedsReturnAdjustingThunk(const CXXMethodDecl *MD) {
+ OverriddenMethodsSetTy OverriddenMethods;
+ ComputeAllOverriddenMethods(MD, OverriddenMethods);
+ for (OverriddenMethodsSetTy::iterator I = OverriddenMethods.begin(),
+ E = OverriddenMethods.end();
+ I != E; ++I) {
+ const CXXMethodDecl *OverriddenMD = *I;
+ BaseOffset Adjustment =
+ ComputeReturnAdjustmentBaseOffset(Context, MD, OverriddenMD);
+ if (!Adjustment.isEmpty())
+ return true;
+ }
+ return false;
+}
+
+static bool isDirectVBase(const CXXRecordDecl *Base, const CXXRecordDecl *RD) {
+ for (const auto &B : RD->bases()) {
+ if (B.isVirtual() && B.getType()->getAsCXXRecordDecl() == Base)
+ return true;
+ }
+ return false;
+}
+
void VFTableBuilder::AddMethods(BaseSubobject Base, unsigned BaseDepth,
const CXXRecordDecl *LastVBase,
BasesSetVectorTy &VisitedBases) {
@@ -2816,9 +2815,9 @@
// the one defined by the vfptr base path or the primary base of the current class.
const CXXRecordDecl *NextBase = 0, *NextLastVBase = LastVBase;
CharUnits NextBaseOffset;
- if (BaseDepth < WhichVFPtr.PathToBaseWithVFPtr.size()) {
- NextBase = WhichVFPtr.PathToBaseWithVFPtr[BaseDepth];
- if (Layout.getVBaseOffsetsMap().count(NextBase)) {
+ if (BaseDepth < WhichVFPtr.PathToBaseWithVPtr.size()) {
+ NextBase = WhichVFPtr.PathToBaseWithVPtr[BaseDepth];
+ if (isDirectVBase(NextBase, RD)) {
NextLastVBase = NextBase;
NextBaseOffset = MostDerivedClassLayout.getVBaseClassOffset(NextBase);
} else {
@@ -2856,13 +2855,21 @@
FinalOverriders::OverriderInfo Overrider =
Overriders.getOverrider(MD, Base.getBaseOffset());
- ThisAdjustment ThisAdjustmentOffset;
- bool ForceThunk = false;
+ const CXXMethodDecl *OverriderMD = Overrider.Method;
+ const CXXMethodDecl *OverriddenMD =
+ FindNearestOverriddenMethod(MD, VisitedBases);
- // Check if this virtual member function overrides
- // a method in one of the visited bases.
- if (const CXXMethodDecl *OverriddenMD =
- FindNearestOverriddenMethod(MD, VisitedBases)) {
+ ThisAdjustment ThisAdjustmentOffset;
+ bool ReturnAdjustingThunk = false;
+ CharUnits ThisOffset = ComputeThisOffset(Overrider);
+ ThisAdjustmentOffset.NonVirtual =
+ (ThisOffset - WhichVFPtr.FullOffsetInMDC).getQuantity();
+ if ((OverriddenMD || OverriderMD != MD) &&
+ WhichVFPtr.getVBaseWithVPtr())
+ CalculateVtordispAdjustment(Overrider, ThisOffset, ThisAdjustmentOffset);
+
+ if (OverriddenMD) {
+ // If MD overrides anything in this vftable, we need to update the entries.
MethodInfoMapTy::iterator OverriddenMDIterator =
MethodInfoMap.find(OverriddenMD);
@@ -2872,23 +2879,7 @@
MethodInfo &OverriddenMethodInfo = OverriddenMDIterator->second;
- // Create a this-adjusting thunk if needed.
- CharUnits TI = ComputeThisOffset(MD, Base, Overrider);
- if (TI != WhichVFPtr.VFPtrFullOffset) {
- ThisAdjustmentOffset.NonVirtual =
- (TI - WhichVFPtr.VFPtrFullOffset).getQuantity();
- }
-
- if (WhichVFPtr.LastVBase)
- CalculateVtordispAdjustment(Overrider, TI, ThisAdjustmentOffset);
-
- if (!ThisAdjustmentOffset.isEmpty()) {
- VTableThunks[OverriddenMethodInfo.VFTableIndex].This =
- ThisAdjustmentOffset;
- AddThunk(MD, VTableThunks[OverriddenMethodInfo.VFTableIndex]);
- }
-
- if (MD->getResultType() == OverriddenMD->getResultType()) {
+ if (!NeedsReturnAdjustingThunk(MD)) {
// No return adjustment needed - just replace the overridden method info
// with the current info.
MethodInfo MI(OverriddenMethodInfo.VBTableIndex,
@@ -2899,33 +2890,17 @@
"Should not have method info for this method yet!");
MethodInfoMap.insert(std::make_pair(MD, MI));
continue;
- } else {
- // In case we need a return adjustment, we'll add a new slot for
- // the overrider and put a return-adjusting thunk where the overridden
- // method was in the vftable.
- // For now, just mark the overriden method as shadowed by a new slot.
- OverriddenMethodInfo.Shadowed = true;
- ForceThunk = true;
-
- // Also apply this adjustment to the shadowed slots.
- if (!ThisAdjustmentOffset.isEmpty()) {
- // FIXME: this is O(N^2), can be O(N).
- const CXXMethodDecl *SubOverride = OverriddenMD;
- while ((SubOverride =
- FindNearestOverriddenMethod(SubOverride, VisitedBases))) {
- MethodInfoMapTy::iterator SubOverrideIterator =
- MethodInfoMap.find(SubOverride);
- if (SubOverrideIterator == MethodInfoMap.end())
- break;
- MethodInfo &SubOverrideMI = SubOverrideIterator->second;
- assert(SubOverrideMI.Shadowed);
- VTableThunks[SubOverrideMI.VFTableIndex].This =
- ThisAdjustmentOffset;
- AddThunk(MD, VTableThunks[SubOverrideMI.VFTableIndex]);
- }
- }
}
- } else if (Base.getBaseOffset() != WhichVFPtr.VFPtrFullOffset ||
+
+ // In case we need a return adjustment, we'll add a new slot for
+ // the overrider. Mark the overriden method as shadowed by the new slot.
+ OverriddenMethodInfo.Shadowed = true;
+
+ // Force a special name mangling for a return-adjusting thunk
+ // unless the method is the final overrider without this adjustment.
+ ReturnAdjustingThunk =
+ !(MD == OverriderMD && ThisAdjustmentOffset.isEmpty());
+ } else if (Base.getBaseOffset() != WhichVFPtr.FullOffsetInMDC ||
MD->size_overridden_methods()) {
// Skip methods that don't belong to the vftable of the current class,
// e.g. each method that wasn't seen in any of the visited sub-bases
@@ -2943,8 +2918,6 @@
"Should not have method info for this method yet!");
MethodInfoMap.insert(std::make_pair(MD, MI));
- const CXXMethodDecl *OverriderMD = Overrider.Method;
-
// Check if this overrider needs a return adjustment.
// We don't want to do this for pure virtual member functions.
BaseOffset ReturnAdjustmentOffset;
@@ -2954,7 +2927,7 @@
ComputeReturnAdjustmentBaseOffset(Context, OverriderMD, MD);
}
if (!ReturnAdjustmentOffset.isEmpty()) {
- ForceThunk = true;
+ ReturnAdjustingThunk = true;
ReturnAdjustment.NonVirtual =
ReturnAdjustmentOffset.NonVirtualOffset.getQuantity();
if (ReturnAdjustmentOffset.VirtualBase) {
@@ -2969,40 +2942,30 @@
}
AddMethod(OverriderMD, ThunkInfo(ThisAdjustmentOffset, ReturnAdjustment,
- ForceThunk ? MD : 0));
+ ReturnAdjustingThunk ? MD : 0));
}
}
-void PrintBasePath(const VFPtrInfo::BasePath &Path, raw_ostream &Out) {
- for (VFPtrInfo::BasePath::const_reverse_iterator I = Path.rbegin(),
+static void PrintBasePath(const VPtrInfo::BasePath &Path, raw_ostream &Out) {
+ for (VPtrInfo::BasePath::const_reverse_iterator I = Path.rbegin(),
E = Path.rend(); I != E; ++I) {
- Out << "'" << (*I)->getQualifiedNameAsString() << "' in ";
+ Out << "'";
+ (*I)->printQualifiedName(Out);
+ Out << "' in ";
}
}
-struct MicrosoftThunkInfoStableSortComparator {
- bool operator() (const ThunkInfo &LHS, const ThunkInfo &RHS) {
- if (LHS.This != RHS.This)
- return LHS.This < RHS.This;
-
- if (LHS.Return != RHS.Return)
- return LHS.Return < RHS.Return;
-
- // Keep different thunks with the same adjustments in the order they
- // were put into the vector.
- return false;
- }
-};
-
static void dumpMicrosoftThunkAdjustment(const ThunkInfo &TI, raw_ostream &Out,
bool ContinueFirstLine) {
const ReturnAdjustment &R = TI.Return;
bool Multiline = false;
- const char *LinePrefix = "\n ";
- if (!R.isEmpty()) {
+ const char *LinePrefix = "\n ";
+ if (!R.isEmpty() || TI.Method) {
if (!ContinueFirstLine)
Out << LinePrefix;
- Out << "[return adjustment: ";
+ Out << "[return adjustment (to type '"
+ << TI.Method->getReturnType().getCanonicalType().getAsString()
+ << "'): ";
if (R.Virtual.Microsoft.VBPtrOffset)
Out << "vbptr at offset " << R.Virtual.Microsoft.VBPtrOffset << ", ";
if (R.Virtual.Microsoft.VBIndex)
@@ -3021,7 +2984,7 @@
Out << "vtordisp at " << T.Virtual.Microsoft.VtordispOffset << ", ";
if (T.Virtual.Microsoft.VBPtrOffset) {
Out << "vbptr at " << T.Virtual.Microsoft.VBPtrOffset
- << " to the left, ";
+ << " to the left,";
assert(T.Virtual.Microsoft.VBOffsetOffset > 0);
Out << LinePrefix << " vboffset at "
<< T.Virtual.Microsoft.VBOffsetOffset << " in the vbtable, ";
@@ -3033,9 +2996,11 @@
void VFTableBuilder::dumpLayout(raw_ostream &Out) {
Out << "VFTable for ";
- PrintBasePath(WhichVFPtr.PathToBaseWithVFPtr, Out);
- Out << "'" << MostDerivedClass->getQualifiedNameAsString();
- Out << "' (" << Components.size() << " entries).\n";
+ PrintBasePath(WhichVFPtr.PathToBaseWithVPtr, Out);
+ Out << "'";
+ MostDerivedClass->printQualifiedName(Out);
+ Out << "' (" << Components.size()
+ << (Components.size() == 1 ? " entry" : " entries") << ").\n";
for (unsigned I = 0, E = Components.size(); I != E; ++I) {
Out << llvm::format("%4d | ", I);
@@ -3045,12 +3010,15 @@
// Dump the component.
switch (Component.getKind()) {
case VTableComponent::CK_RTTI:
- Out << Component.getRTTIDecl()->getQualifiedNameAsString() << " RTTI";
+ Component.getRTTIDecl()->printQualifiedName(Out);
+ Out << " RTTI";
break;
case VTableComponent::CK_FunctionPointer: {
const CXXMethodDecl *MD = Component.getFunctionDecl();
+ // FIXME: Figure out how to print the real thunk type, since they can
+ // differ in the return type.
std::string Str = PredefinedExpr::ComputeName(
PredefinedExpr::PrettyFunctionNoVirtual, MD);
Out << Str;
@@ -3072,7 +3040,7 @@
case VTableComponent::CK_DeletingDtorPointer: {
const CXXDestructorDecl *DD = Component.getDestructorDecl();
- Out << DD->getQualifiedNameAsString();
+ DD->printQualifiedName(Out);
Out << "() [scalar deleting]";
if (DD->isPure())
@@ -3124,7 +3092,11 @@
ThunkInfoVectorTy ThunksVector = Thunks[MD];
std::stable_sort(ThunksVector.begin(), ThunksVector.end(),
- MicrosoftThunkInfoStableSortComparator());
+ [](const ThunkInfo &LHS, const ThunkInfo &RHS) {
+ // Keep different thunks with the same adjustments in the order they
+ // were put into the vector.
+ return std::tie(LHS.This, LHS.Return) < std::tie(RHS.This, RHS.Return);
+ });
Out << "Thunks for '" << MethodName << "' (" << ThunksVector.size();
Out << (ThunksVector.size() == 1 ? " entry" : " entries") << ").\n";
@@ -3140,123 +3112,169 @@
Out << '\n';
}
}
-}
+
+ Out.flush();
}
-void MicrosoftVTableContext::enumerateVFPtrs(
- const CXXRecordDecl *MostDerivedClass,
- const ASTRecordLayout &MostDerivedClassLayout, BaseSubobject Base,
- const CXXRecordDecl *LastVBase,
- const VFPtrInfo::BasePath &PathFromCompleteClass,
- BasesSetVectorTy &VisitedVBases,
- VFPtrListTy &Result) {
- const CXXRecordDecl *CurrentClass = Base.getBase();
- CharUnits OffsetInCompleteClass = Base.getBaseOffset();
- const ASTRecordLayout &CurrentClassLayout =
- Context.getASTRecordLayout(CurrentClass);
-
- if (CurrentClassLayout.hasOwnVFPtr()) {
- if (LastVBase) {
- uint64_t VBIndex = getVBTableIndex(MostDerivedClass, LastVBase);
- assert(VBIndex > 0 && "vbases must have vbindex!");
- CharUnits VFPtrOffset =
- OffsetInCompleteClass -
- MostDerivedClassLayout.getVBaseClassOffset(LastVBase);
- Result.push_back(VFPtrInfo(VBIndex, LastVBase, VFPtrOffset,
- PathFromCompleteClass, OffsetInCompleteClass));
- } else {
- Result.push_back(VFPtrInfo(OffsetInCompleteClass, PathFromCompleteClass));
- }
+static bool setsIntersect(const llvm::SmallPtrSet<const CXXRecordDecl *, 4> &A,
+ const llvm::ArrayRef<const CXXRecordDecl *> &B) {
+ for (llvm::ArrayRef<const CXXRecordDecl *>::iterator I = B.begin(),
+ E = B.end();
+ I != E; ++I) {
+ if (A.count(*I))
+ return true;
}
+ return false;
+}
- for (CXXRecordDecl::base_class_const_iterator I = CurrentClass->bases_begin(),
- E = CurrentClass->bases_end(); I != E; ++I) {
- const CXXRecordDecl *BaseDecl = I->getType()->getAsCXXRecordDecl();
+static bool rebucketPaths(VPtrInfoVector &Paths);
- CharUnits NextBaseOffset;
- const CXXRecordDecl *NextLastVBase;
- if (I->isVirtual()) {
- if (!VisitedVBases.insert(BaseDecl))
+/// Produces MSVC-compatible vbtable data. The symbols produced by this
+/// algorithm match those produced by MSVC 2012 and newer, which is different
+/// from MSVC 2010.
+///
+/// MSVC 2012 appears to minimize the vbtable names using the following
+/// algorithm. First, walk the class hierarchy in the usual order, depth first,
+/// left to right, to find all of the subobjects which contain a vbptr field.
+/// Visiting each class node yields a list of inheritance paths to vbptrs. Each
+/// record with a vbptr creates an initially empty path.
+///
+/// To combine paths from child nodes, the paths are compared to check for
+/// ambiguity. Paths are "ambiguous" if multiple paths have the same set of
+/// components in the same order. Each group of ambiguous paths is extended by
+/// appending the class of the base from which it came. If the current class
+/// node produced an ambiguous path, its path is extended with the current class.
+/// After extending paths, MSVC again checks for ambiguity, and extends any
+/// ambiguous path which wasn't already extended. Because each node yields an
+/// unambiguous set of paths, MSVC doesn't need to extend any path more than once
+/// to produce an unambiguous set of paths.
+///
+/// TODO: Presumably vftables use the same algorithm.
+void MicrosoftVTableContext::computeVTablePaths(bool ForVBTables,
+ const CXXRecordDecl *RD,
+ VPtrInfoVector &Paths) {
+ assert(Paths.empty());
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // Base case: this subobject has its own vptr.
+ if (ForVBTables ? Layout.hasOwnVBPtr() : Layout.hasOwnVFPtr())
+ Paths.push_back(new VPtrInfo(RD));
+
+ // Recursive case: get all the vbtables from our bases and remove anything
+ // that shares a virtual base.
+ llvm::SmallPtrSet<const CXXRecordDecl*, 4> VBasesSeen;
+ for (const auto &B : RD->bases()) {
+ const CXXRecordDecl *Base = B.getType()->getAsCXXRecordDecl();
+ if (B.isVirtual() && VBasesSeen.count(Base))
+ continue;
+
+ if (!Base->isDynamicClass())
+ continue;
+
+ const VPtrInfoVector &BasePaths =
+ ForVBTables ? enumerateVBTables(Base) : getVFPtrOffsets(Base);
+
+ for (VPtrInfoVector::const_iterator II = BasePaths.begin(),
+ EE = BasePaths.end();
+ II != EE; ++II) {
+ VPtrInfo *BaseInfo = *II;
+
+ // Don't include the path if it goes through a virtual base that we've
+ // already included.
+ if (setsIntersect(VBasesSeen, BaseInfo->ContainingVBases))
continue;
- NextBaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
- NextLastVBase = BaseDecl;
- } else {
- NextBaseOffset = OffsetInCompleteClass +
- CurrentClassLayout.getBaseClassOffset(BaseDecl);
- NextLastVBase = LastVBase;
+
+ // Copy the path and adjust it as necessary.
+ VPtrInfo *P = new VPtrInfo(*BaseInfo);
+
+ // We mangle Base into the path if the path would've been ambiguous and it
+ // wasn't already extended with Base.
+ if (P->MangledPath.empty() || P->MangledPath.back() != Base)
+ P->NextBaseToMangle = Base;
+
+ // Keep track of the full path.
+ // FIXME: Why do we need this?
+ P->PathToBaseWithVPtr.insert(P->PathToBaseWithVPtr.begin(), Base);
+
+ // Keep track of which derived class ultimately uses the vtable, and what
+ // the full adjustment is from the MDC to this vtable. The adjustment is
+ // captured by an optional vbase and a non-virtual offset.
+ if (Base == (ForVBTables ? Layout.getBaseSharingVBPtr()
+ : Layout.getPrimaryBase()))
+ P->ReusingBase = RD;
+ if (B.isVirtual())
+ P->ContainingVBases.push_back(Base);
+ else if (P->ContainingVBases.empty())
+ P->NonVirtualOffset += Layout.getBaseClassOffset(Base);
+
+ // Update the full offset in the MDC.
+ P->FullOffsetInMDC = P->NonVirtualOffset;
+ if (const CXXRecordDecl *VB = P->getVBaseWithVPtr())
+ P->FullOffsetInMDC += Layout.getVBaseClassOffset(VB);
+
+ Paths.push_back(P);
}
- VFPtrInfo::BasePath NewPath = PathFromCompleteClass;
- NewPath.push_back(BaseDecl);
- BaseSubobject NextBase(BaseDecl, NextBaseOffset);
+ if (B.isVirtual())
+ VBasesSeen.insert(Base);
- enumerateVFPtrs(MostDerivedClass, MostDerivedClassLayout, NextBase,
- NextLastVBase, NewPath, VisitedVBases, Result);
+ // After visiting any direct base, we've transitively visited all of its
+ // morally virtual bases.
+ for (const auto &VB : Base->vbases())
+ VBasesSeen.insert(VB.getType()->getAsCXXRecordDecl());
}
+
+ // Sort the paths into buckets, and if any of them are ambiguous, extend all
+ // paths in ambiguous buckets.
+ bool Changed = true;
+ while (Changed)
+ Changed = rebucketPaths(Paths);
}
-/// CalculatePathToMangle - Calculate the subset of records that should be used
-/// to mangle the vftable for the given vfptr.
-/// Should only be called if a class has multiple vftables.
-static void
-CalculatePathToMangle(const CXXRecordDecl *RD, VFPtrInfo &VFPtr) {
- // FIXME: In some rare cases this code produces a slightly incorrect mangling.
- // It's very likely that the vbtable mangling code can be adjusted to mangle
- // both vftables and vbtables correctly.
-
- VFPtrInfo::BasePath &FullPath = VFPtr.PathToBaseWithVFPtr;
- if (FullPath.empty()) {
- // Mangle the class's own vftable.
- assert(RD->getNumVBases() &&
- "Something's wrong: if the most derived "
- "class has more than one vftable, it can only have its own "
- "vftable if it has vbases");
- VFPtr.PathToMangle.push_back(RD);
- return;
+static bool extendPath(VPtrInfo *P) {
+ if (P->NextBaseToMangle) {
+ P->MangledPath.push_back(P->NextBaseToMangle);
+ P->NextBaseToMangle = 0; // Prevent the path from being extended twice.
+ return true;
}
-
- unsigned Begin = 0;
-
- // First, skip all the bases before the vbase.
- if (VFPtr.LastVBase) {
- while (FullPath[Begin] != VFPtr.LastVBase) {
- Begin++;
- assert(Begin < FullPath.size());
- }
- }
-
- // Then, put the rest of the base path in the reverse order.
- for (unsigned I = FullPath.size(); I != Begin; --I) {
- const CXXRecordDecl *CurBase = FullPath[I - 1],
- *ItsBase = (I == 1) ? RD : FullPath[I - 2];
- bool BaseIsVirtual = false;
- for (CXXRecordDecl::base_class_const_iterator J = ItsBase->bases_begin(),
- F = ItsBase->bases_end(); J != F; ++J) {
- if (J->getType()->getAsCXXRecordDecl() == CurBase) {
- BaseIsVirtual = J->isVirtual();
- break;
- }
- }
-
- // Should skip the current base if it is a non-virtual base with no siblings.
- if (BaseIsVirtual || ItsBase->getNumBases() != 1)
- VFPtr.PathToMangle.push_back(CurBase);
- }
+ return false;
}
-void MicrosoftVTableContext::enumerateVFPtrs(
- const CXXRecordDecl *ForClass,
- MicrosoftVTableContext::VFPtrListTy &Result) {
- Result.clear();
- const ASTRecordLayout &ClassLayout = Context.getASTRecordLayout(ForClass);
- BasesSetVectorTy VisitedVBases;
- enumerateVFPtrs(ForClass, ClassLayout,
- BaseSubobject(ForClass, CharUnits::Zero()), 0,
- VFPtrInfo::BasePath(), VisitedVBases, Result);
- if (Result.size() > 1) {
- for (unsigned I = 0, E = Result.size(); I != E; ++I)
- CalculatePathToMangle(ForClass, Result[I]);
+static bool rebucketPaths(VPtrInfoVector &Paths) {
+ // What we're essentially doing here is bucketing together ambiguous paths.
+ // Any bucket with more than one path in it gets extended by NextBase, which
+ // is usually the direct base of the inherited the vbptr. This code uses a
+ // sorted vector to implement a multiset to form the buckets. Note that the
+ // ordering is based on pointers, but it doesn't change our output order. The
+ // current algorithm is designed to match MSVC 2012's names.
+ VPtrInfoVector PathsSorted(Paths);
+ std::sort(PathsSorted.begin(), PathsSorted.end(),
+ [](const VPtrInfo *LHS, const VPtrInfo *RHS) {
+ return LHS->MangledPath < RHS->MangledPath;
+ });
+ bool Changed = false;
+ for (size_t I = 0, E = PathsSorted.size(); I != E;) {
+ // Scan forward to find the end of the bucket.
+ size_t BucketStart = I;
+ do {
+ ++I;
+ } while (I != E && PathsSorted[BucketStart]->MangledPath ==
+ PathsSorted[I]->MangledPath);
+
+ // If this bucket has multiple paths, extend them all.
+ if (I - BucketStart > 1) {
+ for (size_t II = BucketStart; II != I; ++II)
+ Changed |= extendPath(PathsSorted[II]);
+ assert(Changed && "no paths were extended to fix ambiguity");
+ }
}
+ return Changed;
+}
+
+MicrosoftVTableContext::~MicrosoftVTableContext() {
+ llvm::DeleteContainerSeconds(VFPtrLocations);
+ llvm::DeleteContainerSeconds(VFTableLayouts);
+ llvm::DeleteContainerSeconds(VBaseInfo);
}
void MicrosoftVTableContext::computeVTableRelatedInformation(
@@ -3269,24 +3287,31 @@
const VTableLayout::AddressPointsMapTy EmptyAddressPointsMap;
- VFPtrListTy &VFPtrs = VFPtrLocations[RD];
- enumerateVFPtrs(RD, VFPtrs);
+ VPtrInfoVector *VFPtrs = new VPtrInfoVector();
+ computeVTablePaths(/*ForVBTables=*/false, RD, *VFPtrs);
+ VFPtrLocations[RD] = VFPtrs;
MethodVFTableLocationsTy NewMethodLocations;
- for (VFPtrListTy::iterator I = VFPtrs.begin(), E = VFPtrs.end();
+ for (VPtrInfoVector::iterator I = VFPtrs->begin(), E = VFPtrs->end();
I != E; ++I) {
VFTableBuilder Builder(*this, RD, *I);
- VFTableIdTy id(RD, I->VFPtrFullOffset);
+ VFTableIdTy id(RD, (*I)->FullOffsetInMDC);
assert(VFTableLayouts.count(id) == 0);
SmallVector<VTableLayout::VTableThunkTy, 1> VTableThunks(
Builder.vtable_thunks_begin(), Builder.vtable_thunks_end());
VFTableLayouts[id] = new VTableLayout(
Builder.getNumVTableComponents(), Builder.vtable_component_begin(),
VTableThunks.size(), VTableThunks.data(), EmptyAddressPointsMap, true);
- NewMethodLocations.insert(Builder.vtable_indices_begin(),
- Builder.vtable_indices_end());
Thunks.insert(Builder.thunks_begin(), Builder.thunks_end());
+
+ for (const auto &Loc : Builder.vtable_locations()) {
+ GlobalDecl GD = Loc.first;
+ MethodVFTableLocation NewLoc = Loc.second;
+ auto M = NewMethodLocations.find(GD);
+ if (M == NewMethodLocations.end() || NewLoc < M->second)
+ NewMethodLocations[GD] = NewLoc;
+ }
}
MethodVFTableLocations.insert(NewMethodLocations.begin(),
@@ -3324,8 +3349,10 @@
// Print the vtable indices for all the member functions.
if (!IndicesMap.empty()) {
Out << "VFTable indices for ";
- Out << "'" << RD->getQualifiedNameAsString();
- Out << "' (" << IndicesMap.size() << " entries).\n";
+ Out << "'";
+ RD->printQualifiedName(Out);
+ Out << "' (" << IndicesMap.size()
+ << (IndicesMap.size() == 1 ? " entry" : " entries") << ").\n";
CharUnits LastVFPtrOffset = CharUnits::fromQuantity(-1);
uint64_t LastVBIndex = 0;
@@ -3352,49 +3379,66 @@
}
Out << '\n';
}
+
+ Out.flush();
}
-void MicrosoftVTableContext::computeVBTableRelatedInformation(
+const VirtualBaseInfo *MicrosoftVTableContext::computeVBTableRelatedInformation(
const CXXRecordDecl *RD) {
- if (ComputedVBTableIndices.count(RD))
- return;
- ComputedVBTableIndices.insert(RD);
+ VirtualBaseInfo *VBI;
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
- BasesSetVectorTy VisitedBases;
+ {
+ // Get or create a VBI for RD. Don't hold a reference to the DenseMap cell,
+ // as it may be modified and rehashed under us.
+ VirtualBaseInfo *&Entry = VBaseInfo[RD];
+ if (Entry)
+ return Entry;
+ Entry = VBI = new VirtualBaseInfo();
+ }
+
+ computeVTablePaths(/*ForVBTables=*/true, RD, VBI->VBPtrPaths);
// First, see if the Derived class shared the vbptr with a non-virtual base.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
if (const CXXRecordDecl *VBPtrBase = Layout.getBaseSharingVBPtr()) {
- // If the Derived class shares the vbptr with a non-virtual base,
- // it inherits its vbase indices.
- computeVBTableRelatedInformation(VBPtrBase);
- for (CXXRecordDecl::base_class_const_iterator I = VBPtrBase->vbases_begin(),
- E = VBPtrBase->vbases_end(); I != E; ++I) {
- const CXXRecordDecl *SubVBase = I->getType()->getAsCXXRecordDecl();
- assert(VBTableIndices.count(ClassPairTy(VBPtrBase, SubVBase)));
- VBTableIndices[ClassPairTy(RD, SubVBase)] =
- VBTableIndices[ClassPairTy(VBPtrBase, SubVBase)];
- VisitedBases.insert(SubVBase);
- }
+ // If the Derived class shares the vbptr with a non-virtual base, the shared
+ // virtual bases come first so that the layout is the same.
+ const VirtualBaseInfo *BaseInfo =
+ computeVBTableRelatedInformation(VBPtrBase);
+ VBI->VBTableIndices.insert(BaseInfo->VBTableIndices.begin(),
+ BaseInfo->VBTableIndices.end());
}
// New vbases are added to the end of the vbtable.
// Skip the self entry and vbases visited in the non-virtual base, if any.
- unsigned VBTableIndex = 1 + VisitedBases.size();
- for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
- E = RD->vbases_end(); I != E; ++I) {
- const CXXRecordDecl *CurVBase = I->getType()->getAsCXXRecordDecl();
- if (VisitedBases.insert(CurVBase))
- VBTableIndices[ClassPairTy(RD, CurVBase)] = VBTableIndex++;
+ unsigned VBTableIndex = 1 + VBI->VBTableIndices.size();
+ for (const auto &VB : RD->vbases()) {
+ const CXXRecordDecl *CurVBase = VB.getType()->getAsCXXRecordDecl();
+ if (!VBI->VBTableIndices.count(CurVBase))
+ VBI->VBTableIndices[CurVBase] = VBTableIndex++;
}
+
+ return VBI;
}
-const MicrosoftVTableContext::VFPtrListTy &
+unsigned MicrosoftVTableContext::getVBTableIndex(const CXXRecordDecl *Derived,
+ const CXXRecordDecl *VBase) {
+ const VirtualBaseInfo *VBInfo = computeVBTableRelatedInformation(Derived);
+ assert(VBInfo->VBTableIndices.count(VBase));
+ return VBInfo->VBTableIndices.find(VBase)->second;
+}
+
+const VPtrInfoVector &
+MicrosoftVTableContext::enumerateVBTables(const CXXRecordDecl *RD) {
+ return computeVBTableRelatedInformation(RD)->VBPtrPaths;
+}
+
+const VPtrInfoVector &
MicrosoftVTableContext::getVFPtrOffsets(const CXXRecordDecl *RD) {
computeVTableRelatedInformation(RD);
assert(VFPtrLocations.count(RD) && "Couldn't find vfptr locations");
- return VFPtrLocations[RD];
+ return *VFPtrLocations[RD];
}
const VTableLayout &